xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "qdf_atomic.h"
25 #include "hal_internal.h"
26 #include "hif.h"
27 #include "hif_io32.h"
28 #include "qdf_platform.h"
29 
30 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
31 #include "hal_hw_headers.h"
32 #endif
33 
34 /* Ring index for WBM2SW2 release ring */
35 #define HAL_IPA_TX_COMP_RING_IDX 2
36 
37 /* calculate the register address offset from bar0 of shadow register x */
38 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
39     defined(QCA_WIFI_WCN7850)
40 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
41 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
42 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
43 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
44 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
45 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
46 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
47 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
48 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
49 #elif defined(QCA_WIFI_QCA6750)
50 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
51 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
52 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
53 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
54 #else
55 #define SHADOW_REGISTER(x) 0
56 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
57 
58 /*
59  * BAR + 4K is always accessible, any access outside this
60  * space requires force wake procedure.
61  * OFFSET = 4K - 32 bytes = 0xFE0
62  */
63 #define MAPPED_REF_OFF 0xFE0
64 
65 #define HAL_OFFSET(block, field) block ## _ ## field ## _OFFSET
66 
67 #ifdef ENABLE_VERBOSE_DEBUG
68 static inline void
69 hal_set_verbose_debug(bool flag)
70 {
71 	is_hal_verbose_debug_enabled = flag;
72 }
73 #endif
74 
75 #ifdef ENABLE_HAL_SOC_STATS
76 #define HAL_STATS_INC(_handle, _field, _delta) \
77 { \
78 	if (likely(_handle)) \
79 		_handle->stats._field += _delta; \
80 }
81 #else
82 #define HAL_STATS_INC(_handle, _field, _delta)
83 #endif
84 
85 #ifdef ENABLE_HAL_REG_WR_HISTORY
86 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
87 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
88 
89 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
90 				 uint32_t offset,
91 				 uint32_t wr_val,
92 				 uint32_t rd_val);
93 
94 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
95 					     int array_size)
96 {
97 	int record_index = qdf_atomic_inc_return(table_index);
98 
99 	return record_index & (array_size - 1);
100 }
101 #else
102 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
103 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
104 		offset,	\
105 		wr_val,	\
106 		rd_val)
107 #endif
108 
109 /**
110  * hal_reg_write_result_check() - check register writing result
111  * @hal_soc: HAL soc handle
112  * @offset: register offset to read
113  * @exp_val: the expected value of register
114  * @ret_confirm: result confirm flag
115  *
116  * Return: none
117  */
118 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
119 					      uint32_t offset,
120 					      uint32_t exp_val)
121 {
122 	uint32_t value;
123 
124 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
125 	if (exp_val != value) {
126 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
127 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
128 	}
129 }
130 
131 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
132     !defined(QCA_WIFI_WCN7850)
133 static inline void hal_lock_reg_access(struct hal_soc *soc,
134 				       unsigned long *flags)
135 {
136 	qdf_spin_lock_irqsave(&soc->register_access_lock);
137 }
138 
139 static inline void hal_unlock_reg_access(struct hal_soc *soc,
140 					 unsigned long *flags)
141 {
142 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
143 }
144 #else
145 static inline void hal_lock_reg_access(struct hal_soc *soc,
146 				       unsigned long *flags)
147 {
148 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
149 }
150 
151 static inline void hal_unlock_reg_access(struct hal_soc *soc,
152 					 unsigned long *flags)
153 {
154 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
155 }
156 #endif
157 
158 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
159 /**
160  * hal_select_window_confirm() - write remap window register and
161 				 check writing result
162  *
163  */
164 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
165 					     uint32_t offset)
166 {
167 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
168 
169 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
170 		      WINDOW_ENABLE_BIT | window);
171 	hal_soc->register_window = window;
172 
173 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
174 				   WINDOW_ENABLE_BIT | window);
175 }
176 #else
177 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
178 					     uint32_t offset)
179 {
180 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
181 
182 	if (window != hal_soc->register_window) {
183 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
184 			      WINDOW_ENABLE_BIT | window);
185 		hal_soc->register_window = window;
186 
187 		hal_reg_write_result_check(
188 					hal_soc,
189 					WINDOW_REG_ADDRESS,
190 					WINDOW_ENABLE_BIT | window);
191 	}
192 }
193 #endif
194 
195 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
196 						 qdf_iomem_t addr)
197 {
198 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
199 }
200 
201 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
202 					       hal_ring_handle_t hal_ring_hdl)
203 {
204 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
205 
206 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
207 							 hal_ring_hdl);
208 }
209 
210 /**
211  * hal_write32_mb() - Access registers to update configuration
212  * @hal_soc: hal soc handle
213  * @offset: offset address from the BAR
214  * @value: value to write
215  *
216  * Return: None
217  *
218  * Description: Register address space is split below:
219  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
220  *  |--------------------|-------------------|------------------|
221  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
222  *
223  * 1. Any access to the shadow region, doesn't need force wake
224  *    and windowing logic to access.
225  * 2. Any access beyond BAR + 4K:
226  *    If init_phase enabled, no force wake is needed and access
227  *    should be based on windowed or unwindowed access.
228  *    If init_phase disabled, force wake is needed and access
229  *    should be based on windowed or unwindowed access.
230  *
231  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
232  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
233  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
234  *                            that window would be a bug
235  */
236 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
237     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_WCN7850)
238 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
239 				  uint32_t value)
240 {
241 	unsigned long flags;
242 	qdf_iomem_t new_addr;
243 
244 	if (!hal_soc->use_register_windowing ||
245 	    offset < MAX_UNWINDOWED_ADDRESS) {
246 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
247 	} else if (hal_soc->static_window_map) {
248 		new_addr = hal_get_window_address(hal_soc,
249 				hal_soc->dev_base_addr + offset);
250 		qdf_iowrite32(new_addr, value);
251 	} else {
252 		hal_lock_reg_access(hal_soc, &flags);
253 		hal_select_window_confirm(hal_soc, offset);
254 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
255 			  (offset & WINDOW_RANGE_MASK), value);
256 		hal_unlock_reg_access(hal_soc, &flags);
257 	}
258 }
259 
260 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
261 		hal_write32_mb(_hal_soc, _offset, _value)
262 
263 #define hal_write32_mb_cmem(_hal_soc, _offset, _value)
264 #else
265 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
266 				  uint32_t value)
267 {
268 	int ret;
269 	unsigned long flags;
270 	qdf_iomem_t new_addr;
271 
272 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
273 					hal_soc->hif_handle))) {
274 		hal_err_rl("target access is not allowed");
275 		return;
276 	}
277 
278 	/* Region < BAR + 4K can be directly accessed */
279 	if (offset < MAPPED_REF_OFF) {
280 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
281 		return;
282 	}
283 
284 	/* Region greater than BAR + 4K */
285 	if (!hal_soc->init_phase) {
286 		ret = hif_force_wake_request(hal_soc->hif_handle);
287 		if (ret) {
288 			hal_err_rl("Wake up request failed");
289 			qdf_check_state_before_panic(__func__, __LINE__);
290 			return;
291 		}
292 	}
293 
294 	if (!hal_soc->use_register_windowing ||
295 	    offset < MAX_UNWINDOWED_ADDRESS) {
296 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
297 	} else if (hal_soc->static_window_map) {
298 		new_addr = hal_get_window_address(
299 					hal_soc,
300 					hal_soc->dev_base_addr + offset);
301 		qdf_iowrite32(new_addr, value);
302 	} else {
303 		hal_lock_reg_access(hal_soc, &flags);
304 		hal_select_window_confirm(hal_soc, offset);
305 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
306 			  (offset & WINDOW_RANGE_MASK), value);
307 		hal_unlock_reg_access(hal_soc, &flags);
308 	}
309 
310 	if (!hal_soc->init_phase) {
311 		ret = hif_force_wake_release(hal_soc->hif_handle);
312 		if (ret) {
313 			hal_err("Wake up release failed");
314 			qdf_check_state_before_panic(__func__, __LINE__);
315 			return;
316 		}
317 	}
318 }
319 
320 /**
321  * hal_write32_mb_confirm() - write register and check wirting result
322  *
323  */
324 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
325 					  uint32_t offset,
326 					  uint32_t value)
327 {
328 	int ret;
329 	unsigned long flags;
330 	qdf_iomem_t new_addr;
331 
332 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
333 					hal_soc->hif_handle))) {
334 		hal_err_rl("target access is not allowed");
335 		return;
336 	}
337 
338 	/* Region < BAR + 4K can be directly accessed */
339 	if (offset < MAPPED_REF_OFF) {
340 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
341 		return;
342 	}
343 
344 	/* Region greater than BAR + 4K */
345 	if (!hal_soc->init_phase) {
346 		ret = hif_force_wake_request(hal_soc->hif_handle);
347 		if (ret) {
348 			hal_err("Wake up request failed");
349 			qdf_check_state_before_panic(__func__, __LINE__);
350 			return;
351 		}
352 	}
353 
354 	if (!hal_soc->use_register_windowing ||
355 	    offset < MAX_UNWINDOWED_ADDRESS) {
356 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
357 		hal_reg_write_result_check(hal_soc, offset,
358 					   value);
359 	} else if (hal_soc->static_window_map) {
360 		new_addr = hal_get_window_address(
361 					hal_soc,
362 					hal_soc->dev_base_addr + offset);
363 		qdf_iowrite32(new_addr, value);
364 		hal_reg_write_result_check(hal_soc,
365 					   new_addr - hal_soc->dev_base_addr,
366 					   value);
367 	} else {
368 		hal_lock_reg_access(hal_soc, &flags);
369 		hal_select_window_confirm(hal_soc, offset);
370 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
371 			  (offset & WINDOW_RANGE_MASK), value);
372 
373 		hal_reg_write_result_check(
374 				hal_soc,
375 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
376 				value);
377 		hal_unlock_reg_access(hal_soc, &flags);
378 	}
379 
380 	if (!hal_soc->init_phase) {
381 		ret = hif_force_wake_release(hal_soc->hif_handle);
382 		if (ret) {
383 			hal_err("Wake up release failed");
384 			qdf_check_state_before_panic(__func__, __LINE__);
385 			return;
386 		}
387 	}
388 }
389 
390 static inline void hal_write32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset,
391 				       uint32_t value)
392 {
393 	unsigned long flags;
394 	qdf_iomem_t new_addr;
395 
396 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
397 					hal_soc->hif_handle))) {
398 		hal_err_rl("%s: target access is not allowed", __func__);
399 		return;
400 	}
401 
402 	if (!hal_soc->use_register_windowing ||
403 	    offset < MAX_UNWINDOWED_ADDRESS) {
404 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
405 	} else if (hal_soc->static_window_map) {
406 		new_addr = hal_get_window_address(
407 					hal_soc,
408 					hal_soc->dev_base_addr + offset);
409 		qdf_iowrite32(new_addr, value);
410 	} else {
411 		hal_lock_reg_access(hal_soc, &flags);
412 		hal_select_window_confirm(hal_soc, offset);
413 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
414 			  (offset & WINDOW_RANGE_MASK), value);
415 		hal_unlock_reg_access(hal_soc, &flags);
416 	}
417 }
418 #endif
419 
420 /**
421  * hal_write_address_32_mb - write a value to a register
422  *
423  */
424 static inline
425 void hal_write_address_32_mb(struct hal_soc *hal_soc,
426 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
427 {
428 	uint32_t offset;
429 
430 	if (!hal_soc->use_register_windowing)
431 		return qdf_iowrite32(addr, value);
432 
433 	offset = addr - hal_soc->dev_base_addr;
434 
435 	if (qdf_unlikely(wr_confirm))
436 		hal_write32_mb_confirm(hal_soc, offset, value);
437 	else
438 		hal_write32_mb(hal_soc, offset, value);
439 }
440 
441 
442 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
443 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
444 						struct hal_srng *srng,
445 						void __iomem *addr,
446 						uint32_t value)
447 {
448 	qdf_iowrite32(addr, value);
449 }
450 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
451 	defined(FEATURE_HAL_DELAYED_REG_WRITE_V2)
452 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
453 						struct hal_srng *srng,
454 						void __iomem *addr,
455 						uint32_t value)
456 {
457 	hal_delayed_reg_write(hal_soc, srng, addr, value);
458 }
459 #else
460 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
461 						struct hal_srng *srng,
462 						void __iomem *addr,
463 						uint32_t value)
464 {
465 	hal_write_address_32_mb(hal_soc, addr, value, false);
466 }
467 #endif
468 
469 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
470     !defined(QCA_WIFI_QCA6750) && !defined(QCA_WIFI_WCN7850)
471 /**
472  * hal_read32_mb() - Access registers to read configuration
473  * @hal_soc: hal soc handle
474  * @offset: offset address from the BAR
475  * @value: value to write
476  *
477  * Description: Register address space is split below:
478  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
479  *  |--------------------|-------------------|------------------|
480  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
481  *
482  * 1. Any access to the shadow region, doesn't need force wake
483  *    and windowing logic to access.
484  * 2. Any access beyond BAR + 4K:
485  *    If init_phase enabled, no force wake is needed and access
486  *    should be based on windowed or unwindowed access.
487  *    If init_phase disabled, force wake is needed and access
488  *    should be based on windowed or unwindowed access.
489  *
490  * Return: < 0 for failure/>= 0 for success
491  */
492 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
493 {
494 	uint32_t ret;
495 	unsigned long flags;
496 	qdf_iomem_t new_addr;
497 
498 	if (!hal_soc->use_register_windowing ||
499 	    offset < MAX_UNWINDOWED_ADDRESS) {
500 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
501 	} else if (hal_soc->static_window_map) {
502 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
503 		return qdf_ioread32(new_addr);
504 	}
505 
506 	hal_lock_reg_access(hal_soc, &flags);
507 	hal_select_window_confirm(hal_soc, offset);
508 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
509 		       (offset & WINDOW_RANGE_MASK));
510 	hal_unlock_reg_access(hal_soc, &flags);
511 
512 	return ret;
513 }
514 
515 #define hal_read32_mb_cmem(_hal_soc, _offset)
516 #else
517 static
518 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
519 {
520 	uint32_t ret;
521 	unsigned long flags;
522 	qdf_iomem_t new_addr;
523 
524 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
525 					hal_soc->hif_handle))) {
526 		hal_err_rl("target access is not allowed");
527 		return 0;
528 	}
529 
530 	/* Region < BAR + 4K can be directly accessed */
531 	if (offset < MAPPED_REF_OFF)
532 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
533 
534 	if ((!hal_soc->init_phase) &&
535 	    hif_force_wake_request(hal_soc->hif_handle)) {
536 		hal_err("Wake up request failed");
537 		qdf_check_state_before_panic(__func__, __LINE__);
538 		return 0;
539 	}
540 
541 	if (!hal_soc->use_register_windowing ||
542 	    offset < MAX_UNWINDOWED_ADDRESS) {
543 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
544 	} else if (hal_soc->static_window_map) {
545 		new_addr = hal_get_window_address(
546 					hal_soc,
547 					hal_soc->dev_base_addr + offset);
548 		ret = qdf_ioread32(new_addr);
549 	} else {
550 		hal_lock_reg_access(hal_soc, &flags);
551 		hal_select_window_confirm(hal_soc, offset);
552 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
553 			       (offset & WINDOW_RANGE_MASK));
554 		hal_unlock_reg_access(hal_soc, &flags);
555 	}
556 
557 	if ((!hal_soc->init_phase) &&
558 	    hif_force_wake_release(hal_soc->hif_handle)) {
559 		hal_err("Wake up release failed");
560 		qdf_check_state_before_panic(__func__, __LINE__);
561 		return 0;
562 	}
563 
564 	return ret;
565 }
566 
567 static inline
568 uint32_t hal_read32_mb_cmem(struct hal_soc *hal_soc, uint32_t offset)
569 {
570 	uint32_t ret;
571 	unsigned long flags;
572 	qdf_iomem_t new_addr;
573 
574 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
575 					hal_soc->hif_handle))) {
576 		hal_err_rl("%s: target access is not allowed", __func__);
577 		return 0;
578 	}
579 
580 	if (!hal_soc->use_register_windowing ||
581 	    offset < MAX_UNWINDOWED_ADDRESS) {
582 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
583 	} else if (hal_soc->static_window_map) {
584 		new_addr = hal_get_window_address(
585 					hal_soc,
586 					hal_soc->dev_base_addr + offset);
587 		ret = qdf_ioread32(new_addr);
588 	} else {
589 		hal_lock_reg_access(hal_soc, &flags);
590 		hal_select_window_confirm(hal_soc, offset);
591 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
592 			       (offset & WINDOW_RANGE_MASK));
593 		hal_unlock_reg_access(hal_soc, &flags);
594 	}
595 	return ret;
596 }
597 #endif
598 
599 /* Max times allowed for register writing retry */
600 #define HAL_REG_WRITE_RETRY_MAX		5
601 /* Delay milliseconds for each time retry */
602 #define HAL_REG_WRITE_RETRY_DELAY	1
603 
604 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
605 /* To check shadow config index range between 0..31 */
606 #define HAL_SHADOW_REG_INDEX_LOW 32
607 /* To check shadow config index range between 32..39 */
608 #define HAL_SHADOW_REG_INDEX_HIGH 40
609 /* Dirty bit reg offsets corresponding to shadow config index */
610 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET 0x30C8
611 #define HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET 0x30C4
612 /* PCIE_PCIE_TOP base addr offset */
613 #define HAL_PCIE_PCIE_TOP_WRAPPER 0x01E00000
614 /* Max retry attempts to read the dirty bit reg */
615 #ifdef HAL_CONFIG_SLUB_DEBUG_ON
616 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 10000
617 #else
618 #define HAL_SHADOW_DIRTY_BIT_POLL_MAX 2000
619 #endif
620 /* Delay in usecs for polling dirty bit reg */
621 #define HAL_SHADOW_DIRTY_BIT_POLL_DELAY 5
622 
623 /**
624  * hal_poll_dirty_bit_reg() - Poll dirty register bit to confirm
625  * write was successful
626  * @hal_soc: hal soc handle
627  * @shadow_config_index: index of shadow reg used to confirm
628  * write
629  *
630  * Return: QDF_STATUS_SUCCESS on success
631  */
632 static inline QDF_STATUS hal_poll_dirty_bit_reg(struct hal_soc *hal,
633 						int shadow_config_index)
634 {
635 	uint32_t read_value = 0;
636 	int retry_cnt = 0;
637 	uint32_t reg_offset = 0;
638 
639 	if (shadow_config_index > 0 &&
640 	    shadow_config_index < HAL_SHADOW_REG_INDEX_LOW) {
641 		reg_offset =
642 			HAL_SHADOW_REG_DIRTY_BIT_DATA_LOW_OFFSET;
643 	} else if (shadow_config_index >= HAL_SHADOW_REG_INDEX_LOW &&
644 		   shadow_config_index < HAL_SHADOW_REG_INDEX_HIGH) {
645 		reg_offset =
646 			HAL_SHADOW_REG_DIRTY_BIT_DATA_HIGH_OFFSET;
647 	} else {
648 		hal_err("Invalid shadow_config_index = %d",
649 			shadow_config_index);
650 		return QDF_STATUS_E_INVAL;
651 	}
652 	while (retry_cnt < HAL_SHADOW_DIRTY_BIT_POLL_MAX) {
653 		read_value = hal_read32_mb(
654 				hal, HAL_PCIE_PCIE_TOP_WRAPPER + reg_offset);
655 		/* Check if dirty bit corresponding to shadow_index is set */
656 		if (read_value & BIT(shadow_config_index)) {
657 			/* Dirty reg bit not reset */
658 			qdf_udelay(HAL_SHADOW_DIRTY_BIT_POLL_DELAY);
659 			retry_cnt++;
660 		} else {
661 			hal_debug("Shadow write: offset 0x%x read val 0x%x",
662 				  reg_offset, read_value);
663 			return QDF_STATUS_SUCCESS;
664 		}
665 	}
666 	return QDF_STATUS_E_TIMEOUT;
667 }
668 
669 /**
670  * hal_write32_mb_shadow_confirm() - write to shadow reg and
671  * poll dirty register bit to confirm write
672  * @hal_soc: hal soc handle
673  * @reg_offset: target reg offset address from BAR
674  * @value: value to write
675  *
676  * Return: QDF_STATUS_SUCCESS on success
677  */
678 static inline QDF_STATUS hal_write32_mb_shadow_confirm(
679 	struct hal_soc *hal,
680 	uint32_t reg_offset,
681 	uint32_t value)
682 {
683 	int i;
684 	QDF_STATUS ret;
685 	uint32_t shadow_reg_offset;
686 	int shadow_config_index;
687 	bool is_reg_offset_present = false;
688 
689 	for (i = 0; i < MAX_GENERIC_SHADOW_REG; i++) {
690 		/* Found the shadow config for the reg_offset */
691 		struct shadow_reg_config *hal_shadow_reg_list =
692 			&hal->list_shadow_reg_config[i];
693 		if (hal_shadow_reg_list->target_register ==
694 			reg_offset) {
695 			shadow_config_index =
696 				hal_shadow_reg_list->shadow_config_index;
697 			shadow_reg_offset =
698 				SHADOW_REGISTER(shadow_config_index);
699 			hal_write32_mb_confirm(
700 				hal, shadow_reg_offset, value);
701 			is_reg_offset_present = true;
702 			break;
703 		}
704 		ret = QDF_STATUS_E_FAILURE;
705 	}
706 	if (is_reg_offset_present) {
707 		ret = hal_poll_dirty_bit_reg(hal, shadow_config_index);
708 		hal_info("Shadow write:reg 0x%x val 0x%x ret %d",
709 			 reg_offset, value, ret);
710 		if (QDF_IS_STATUS_ERROR(ret)) {
711 			HAL_STATS_INC(hal, shadow_reg_write_fail, 1);
712 			return ret;
713 		}
714 		HAL_STATS_INC(hal, shadow_reg_write_succ, 1);
715 	}
716 	return ret;
717 }
718 
719 /**
720  * hal_write32_mb_confirm_retry() - write register with confirming and
721 				    do retry/recovery if writing failed
722  * @hal_soc: hal soc handle
723  * @offset: offset address from the BAR
724  * @value: value to write
725  * @recovery: is recovery needed or not.
726  *
727  * Write the register value with confirming and read it back, if
728  * read back value is not as expected, do retry for writing, if
729  * retry hit max times allowed but still fail, check if recovery
730  * needed.
731  *
732  * Return: None
733  */
734 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
735 						uint32_t offset,
736 						uint32_t value,
737 						bool recovery)
738 {
739 	QDF_STATUS ret;
740 
741 	ret = hal_write32_mb_shadow_confirm(hal_soc, offset, value);
742 	if (QDF_IS_STATUS_ERROR(ret) && recovery)
743 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
744 }
745 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
746 
747 static inline void hal_write32_mb_confirm_retry(struct hal_soc *hal_soc,
748 						uint32_t offset,
749 						uint32_t value,
750 						bool recovery)
751 {
752 	uint8_t retry_cnt = 0;
753 	uint32_t read_value;
754 
755 	while (retry_cnt <= HAL_REG_WRITE_RETRY_MAX) {
756 		hal_write32_mb_confirm(hal_soc, offset, value);
757 		read_value = hal_read32_mb(hal_soc, offset);
758 		if (qdf_likely(read_value == value))
759 			break;
760 
761 		/* write failed, do retry */
762 		hal_warn("Retry reg offset 0x%x, value 0x%x, read value 0x%x",
763 			 offset, value, read_value);
764 		qdf_mdelay(HAL_REG_WRITE_RETRY_DELAY);
765 		retry_cnt++;
766 	}
767 
768 	if (retry_cnt > HAL_REG_WRITE_RETRY_MAX && recovery)
769 		qdf_trigger_self_recovery(NULL, QDF_HAL_REG_WRITE_FAILURE);
770 }
771 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
772 
773 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
774 	defined(FEATURE_HAL_DELAYED_REG_WRITE_V2)
775 /**
776  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
777  * @hal_soc: HAL soc handle
778  *
779  * Return: none
780  */
781 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
782 
783 /**
784  * hal_dump_reg_write_stats() - dump reg write stats
785  * @hal_soc: HAL soc handle
786  *
787  * Return: none
788  */
789 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
790 
791 /**
792  * hal_get_reg_write_pending_work() - get the number of entries
793  *		pending in the workqueue to be processed.
794  * @hal_soc: HAL soc handle
795  *
796  * Returns: the number of entries pending to be processed
797  */
798 int hal_get_reg_write_pending_work(void *hal_soc);
799 
800 #else
801 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
802 {
803 }
804 
805 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
806 {
807 }
808 
809 static inline int hal_get_reg_write_pending_work(void *hal_soc)
810 {
811 	return 0;
812 }
813 #endif
814 
815 /**
816  * hal_read_address_32_mb() - Read 32-bit value from the register
817  * @soc: soc handle
818  * @addr: register address to read
819  *
820  * Return: 32-bit value
821  */
822 static inline
823 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
824 				qdf_iomem_t addr)
825 {
826 	uint32_t offset;
827 	uint32_t ret;
828 
829 	if (!soc->use_register_windowing)
830 		return qdf_ioread32(addr);
831 
832 	offset = addr - soc->dev_base_addr;
833 	ret = hal_read32_mb(soc, offset);
834 	return ret;
835 }
836 
837 /**
838  * hal_attach - Initialize HAL layer
839  * @hif_handle: Opaque HIF handle
840  * @qdf_dev: QDF device
841  *
842  * Return: Opaque HAL SOC handle
843  *		 NULL on failure (if given ring is not available)
844  *
845  * This function should be called as part of HIF initialization (for accessing
846  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
847  */
848 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
849 
850 /**
851  * hal_detach - Detach HAL layer
852  * @hal_soc: HAL SOC handle
853  *
854  * This function should be called as part of HIF detach
855  *
856  */
857 extern void hal_detach(void *hal_soc);
858 
859 #define HAL_SRNG_LMAC_RING 0x80000000
860 /* SRNG flags passed in hal_srng_params.flags */
861 #define HAL_SRNG_MSI_SWAP				0x00000008
862 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
863 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
864 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
865 #define HAL_SRNG_MSI_INTR				0x00020000
866 #define HAL_SRNG_CACHED_DESC		0x00040000
867 
868 #if defined(QCA_WIFI_QCA6490)  || defined(QCA_WIFI_WCN7850)
869 #define HAL_SRNG_PREFETCH_TIMER 1
870 #else
871 #define HAL_SRNG_PREFETCH_TIMER 0
872 #endif
873 
874 #define PN_SIZE_24 0
875 #define PN_SIZE_48 1
876 #define PN_SIZE_128 2
877 
878 #ifdef FORCE_WAKE
879 /**
880  * hal_set_init_phase() - Indicate initialization of
881  *                        datapath rings
882  * @soc: hal_soc handle
883  * @init_phase: flag to indicate datapath rings
884  *              initialization status
885  *
886  * Return: None
887  */
888 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
889 #else
890 static inline
891 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
892 {
893 }
894 #endif /* FORCE_WAKE */
895 
896 /**
897  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
898  * used by callers for calculating the size of memory to be allocated before
899  * calling hal_srng_setup to setup the ring
900  *
901  * @hal_soc: Opaque HAL SOC handle
902  * @ring_type: one of the types from hal_ring_type
903  *
904  */
905 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
906 
907 /**
908  * hal_srng_max_entries - Returns maximum possible number of ring entries
909  * @hal_soc: Opaque HAL SOC handle
910  * @ring_type: one of the types from hal_ring_type
911  *
912  * Return: Maximum number of entries for the given ring_type
913  */
914 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
915 
916 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
917 				 uint32_t low_threshold);
918 
919 /**
920  * hal_srng_dump - Dump ring status
921  * @srng: hal srng pointer
922  */
923 void hal_srng_dump(struct hal_srng *srng);
924 
925 /**
926  * hal_srng_get_dir - Returns the direction of the ring
927  * @hal_soc: Opaque HAL SOC handle
928  * @ring_type: one of the types from hal_ring_type
929  *
930  * Return: Ring direction
931  */
932 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
933 
934 /* HAL memory information */
935 struct hal_mem_info {
936 	/* dev base virutal addr */
937 	void *dev_base_addr;
938 	/* dev base physical addr */
939 	void *dev_base_paddr;
940 	/* dev base ce virutal addr - applicable only for qca5018  */
941 	/* In qca5018 CE register are outside wcss block */
942 	/* using a separate address space to access CE registers */
943 	void *dev_base_addr_ce;
944 	/* dev base ce physical addr */
945 	void *dev_base_paddr_ce;
946 	/* Remote virtual pointer memory for HW/FW updates */
947 	void *shadow_rdptr_mem_vaddr;
948 	/* Remote physical pointer memory for HW/FW updates */
949 	void *shadow_rdptr_mem_paddr;
950 	/* Shared memory for ring pointer updates from host to FW */
951 	void *shadow_wrptr_mem_vaddr;
952 	/* Shared physical memory for ring pointer updates from host to FW */
953 	void *shadow_wrptr_mem_paddr;
954 	/* lmac srng start id */
955 	uint8_t lmac_srng_start_id;
956 };
957 
958 /* SRNG parameters to be passed to hal_srng_setup */
959 struct hal_srng_params {
960 	/* Physical base address of the ring */
961 	qdf_dma_addr_t ring_base_paddr;
962 	/* Virtual base address of the ring */
963 	void *ring_base_vaddr;
964 	/* Number of entries in ring */
965 	uint32_t num_entries;
966 	/* max transfer length */
967 	uint16_t max_buffer_length;
968 	/* MSI Address */
969 	qdf_dma_addr_t msi_addr;
970 	/* MSI data */
971 	uint32_t msi_data;
972 	/* Interrupt timer threshold – in micro seconds */
973 	uint32_t intr_timer_thres_us;
974 	/* Interrupt batch counter threshold – in number of ring entries */
975 	uint32_t intr_batch_cntr_thres_entries;
976 	/* Low threshold – in number of ring entries
977 	 * (valid for src rings only)
978 	 */
979 	uint32_t low_threshold;
980 	/* Misc flags */
981 	uint32_t flags;
982 	/* Unique ring id */
983 	uint8_t ring_id;
984 	/* Source or Destination ring */
985 	enum hal_srng_dir ring_dir;
986 	/* Size of ring entry */
987 	uint32_t entry_size;
988 	/* hw register base address */
989 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
990 	/* prefetch timer config - in micro seconds */
991 	uint32_t prefetch_timer;
992 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
993 	/* Near full IRQ support flag */
994 	uint32_t nf_irq_support;
995 	/* MSI2 Address */
996 	qdf_dma_addr_t msi2_addr;
997 	/* MSI2 data */
998 	uint32_t msi2_data;
999 	/* Critical threshold */
1000 	uint16_t crit_thresh;
1001 	/* High threshold */
1002 	uint16_t high_thresh;
1003 	/* Safe threshold */
1004 	uint16_t safe_thresh;
1005 #endif
1006 };
1007 
1008 /* hal_construct_srng_shadow_regs() - initialize the shadow
1009  * registers for srngs
1010  * @hal_soc: hal handle
1011  *
1012  * Return: QDF_STATUS_OK on success
1013  */
1014 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc);
1015 
1016 /* hal_set_one_shadow_config() - add a config for the specified ring
1017  * @hal_soc: hal handle
1018  * @ring_type: ring type
1019  * @ring_num: ring num
1020  *
1021  * The ring type and ring num uniquely specify the ring.  After this call,
1022  * the hp/tp will be added as the next entry int the shadow register
1023  * configuration table.  The hal code will use the shadow register address
1024  * in place of the hp/tp address.
1025  *
1026  * This function is exposed, so that the CE module can skip configuring shadow
1027  * registers for unused ring and rings assigned to the firmware.
1028  *
1029  * Return: QDF_STATUS_OK on success
1030  */
1031 QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
1032 				     int ring_num);
1033 /**
1034  * hal_get_shadow_config() - retrieve the config table
1035  * @hal_soc: hal handle
1036  * @shadow_config: will point to the table after
1037  * @num_shadow_registers_configured: will contain the number of valid entries
1038  */
1039 extern void hal_get_shadow_config(void *hal_soc,
1040 				  struct pld_shadow_reg_v2_cfg **shadow_config,
1041 				  int *num_shadow_registers_configured);
1042 
1043 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1044 /**
1045  * hal_srng_is_near_full_irq_supported() - Check if srng supports near full irq
1046  * @hal_soc: HAL SoC handle [To be validated by caller]
1047  * @ring_type: srng type
1048  * @ring_num: The index of the srng (of the same type)
1049  *
1050  * Return: true, if srng support near full irq trigger
1051  *	false, if the srng does not support near full irq support.
1052  */
1053 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1054 					 int ring_type, int ring_num);
1055 #else
1056 static inline
1057 bool hal_srng_is_near_full_irq_supported(hal_soc_handle_t hal_soc,
1058 					 int ring_type, int ring_num)
1059 {
1060 	return false;
1061 }
1062 #endif
1063 
1064 /**
1065  * hal_srng_setup - Initialize HW SRNG ring.
1066  *
1067  * @hal_soc: Opaque HAL SOC handle
1068  * @ring_type: one of the types from hal_ring_type
1069  * @ring_num: Ring number if there are multiple rings of
1070  *		same type (staring from 0)
1071  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1072  * @ring_params: SRNG ring params in hal_srng_params structure.
1073 
1074  * Callers are expected to allocate contiguous ring memory of size
1075  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1076  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
1077  * structure. Ring base address should be 8 byte aligned and size of each ring
1078  * entry should be queried using the API hal_srng_get_entrysize
1079  *
1080  * Return: Opaque pointer to ring on success
1081  *		 NULL on failure (if given ring is not available)
1082  */
1083 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1084 	int mac_id, struct hal_srng_params *ring_params);
1085 
1086 /* Remapping ids of REO rings */
1087 #define REO_REMAP_TCL 0
1088 #define REO_REMAP_SW1 1
1089 #define REO_REMAP_SW2 2
1090 #define REO_REMAP_SW3 3
1091 #define REO_REMAP_SW4 4
1092 #define REO_REMAP_RELEASE 5
1093 #define REO_REMAP_FW 6
1094 /*
1095  * In Beryllium: 4 bits REO destination ring value is defined as: 0: TCL
1096  * 1:SW1  2:SW2  3:SW3  4:SW4  5:Release  6:FW(WIFI)  7:SW5
1097  * 8:SW6 9:SW7  10:SW8  11: NOT_USED.
1098  *
1099  */
1100 #define REO_REMAP_SW5 7
1101 #define REO_REMAP_SW6 8
1102 #define REO_REMAP_SW7 9
1103 #define REO_REMAP_SW8 10
1104 
1105 /*
1106  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
1107  * to map destination to rings
1108  */
1109 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
1110 	((_VALUE) << \
1111 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
1112 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1113 
1114 /*
1115  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_1
1116  * to map destination to rings
1117  */
1118 #define HAL_REO_ERR_REMAP_IX1(_VALUE, _OFFSET) \
1119 	((_VALUE) << \
1120 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_1_ERROR_ ## \
1121 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
1122 
1123 /*
1124  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
1125  * to map destination to rings
1126  */
1127 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
1128 	((_VALUE) << \
1129 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
1130 	  _OFFSET ## _SHFT))
1131 
1132 /*
1133  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
1134  * to map destination to rings
1135  */
1136 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
1137 	((_VALUE) << \
1138 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
1139 	  _OFFSET ## _SHFT))
1140 
1141 /*
1142  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
1143  * to map destination to rings
1144  */
1145 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
1146 	((_VALUE) << \
1147 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
1148 	  _OFFSET ## _SHFT))
1149 
1150 /**
1151  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
1152  * @hal_soc_hdl: HAL SOC handle
1153  * @read: boolean value to indicate if read or write
1154  * @ix0: pointer to store IX0 reg value
1155  * @ix1: pointer to store IX1 reg value
1156  * @ix2: pointer to store IX2 reg value
1157  * @ix3: pointer to store IX3 reg value
1158  */
1159 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1160 				uint32_t *ix0, uint32_t *ix1,
1161 				uint32_t *ix2, uint32_t *ix3);
1162 
1163 /**
1164  * hal_srng_set_hp_paddr_confirm() - Set physical address to dest SRNG head
1165  *  pointer and confirm that write went through by reading back the value
1166  * @sring: sring pointer
1167  * @paddr: physical address
1168  *
1169  * Return: None
1170  */
1171 extern void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *sring,
1172 					      uint64_t paddr);
1173 
1174 /**
1175  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
1176  * @hal_soc: hal_soc handle
1177  * @srng: sring pointer
1178  * @vaddr: virtual address
1179  */
1180 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1181 			  struct hal_srng *srng,
1182 			  uint32_t *vaddr);
1183 
1184 /**
1185  * hal_srng_cleanup - Deinitialize HW SRNG ring.
1186  * @hal_soc: Opaque HAL SOC handle
1187  * @hal_srng: Opaque HAL SRNG pointer
1188  */
1189 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
1190 
1191 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
1192 {
1193 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1194 
1195 	return !!srng->initialized;
1196 }
1197 
1198 /**
1199  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
1200  * @hal_soc: Opaque HAL SOC handle
1201  * @hal_ring_hdl: Destination ring pointer
1202  *
1203  * Caller takes responsibility for any locking needs.
1204  *
1205  * Return: Opaque pointer for next ring entry; NULL on failire
1206  */
1207 static inline
1208 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
1209 			hal_ring_handle_t hal_ring_hdl)
1210 {
1211 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1212 
1213 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1214 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
1215 
1216 	return NULL;
1217 }
1218 
1219 
1220 /**
1221  * hal_mem_dma_cache_sync - Cache sync the specified virtual address Range
1222  * @hal_soc: HAL soc handle
1223  * @desc: desc start address
1224  * @entry_size: size of memory to sync
1225  *
1226  * Return: void
1227  */
1228 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
1229 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1230 					  uint32_t entry_size)
1231 {
1232 	qdf_nbuf_dma_inv_range((void *)desc, (void *)(desc + entry_size));
1233 }
1234 #else
1235 static inline void hal_mem_dma_cache_sync(struct hal_soc *soc, uint32_t *desc,
1236 					  uint32_t entry_size)
1237 {
1238 	qdf_mem_dma_cache_sync(soc->qdf_dev, qdf_mem_virt_to_phys(desc),
1239 			       QDF_DMA_FROM_DEVICE,
1240 			       (entry_size * sizeof(uint32_t)));
1241 }
1242 #endif
1243 
1244 /**
1245  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
1246  * hal_srng_access_start if locked access is required
1247  *
1248  * @hal_soc: Opaque HAL SOC handle
1249  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1250  *
1251  * This API doesn't implement any byte-order conversion on reading hp/tp.
1252  * So, Use API only for those srngs for which the target writes hp/tp values to
1253  * the DDR in the Host order.
1254  *
1255  * Return: 0 on success; error on failire
1256  */
1257 static inline int
1258 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
1259 			       hal_ring_handle_t hal_ring_hdl)
1260 {
1261 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1262 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1263 	uint32_t *desc;
1264 
1265 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1266 		srng->u.src_ring.cached_tp =
1267 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
1268 	else {
1269 		srng->u.dst_ring.cached_hp =
1270 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1271 
1272 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1273 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1274 			if (qdf_likely(desc)) {
1275 				hal_mem_dma_cache_sync(soc, desc,
1276 						       srng->entry_size);
1277 				qdf_prefetch(desc);
1278 			}
1279 		}
1280 	}
1281 
1282 	return 0;
1283 }
1284 
1285 /**
1286  * hal_le_srng_access_start_unlocked_in_cpu_order - Start ring access
1287  * (unlocked) with endianness correction.
1288  * @hal_soc: Opaque HAL SOC handle
1289  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1290  *
1291  * This API provides same functionally as hal_srng_access_start_unlocked()
1292  * except that it converts the little-endian formatted hp/tp values to
1293  * Host order on reading them. So, this API should only be used for those srngs
1294  * for which the target always writes hp/tp values in little-endian order
1295  * regardless of Host order.
1296  *
1297  * Also, this API doesn't take the lock. For locked access, use
1298  * hal_srng_access_start/hal_le_srng_access_start_in_cpu_order.
1299  *
1300  * Return: 0 on success; error on failire
1301  */
1302 static inline int
1303 hal_le_srng_access_start_unlocked_in_cpu_order(
1304 	hal_soc_handle_t hal_soc_hdl,
1305 	hal_ring_handle_t hal_ring_hdl)
1306 {
1307 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1308 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
1309 	uint32_t *desc;
1310 
1311 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1312 		srng->u.src_ring.cached_tp =
1313 			qdf_le32_to_cpu(*(volatile uint32_t *)
1314 					(srng->u.src_ring.tp_addr));
1315 	else {
1316 		srng->u.dst_ring.cached_hp =
1317 			qdf_le32_to_cpu(*(volatile uint32_t *)
1318 					(srng->u.dst_ring.hp_addr));
1319 
1320 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
1321 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
1322 			if (qdf_likely(desc)) {
1323 				hal_mem_dma_cache_sync(soc, desc,
1324 						       srng->entry_size);
1325 				qdf_prefetch(desc);
1326 			}
1327 		}
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 /**
1334  * hal_srng_try_access_start - Try to start (locked) ring access
1335  *
1336  * @hal_soc: Opaque HAL SOC handle
1337  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1338  *
1339  * Return: 0 on success; error on failure
1340  */
1341 static inline int hal_srng_try_access_start(hal_soc_handle_t hal_soc_hdl,
1342 					    hal_ring_handle_t hal_ring_hdl)
1343 {
1344 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1345 
1346 	if (qdf_unlikely(!hal_ring_hdl)) {
1347 		qdf_print("Error: Invalid hal_ring\n");
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (!SRNG_TRY_LOCK(&(srng->lock)))
1352 		return -EINVAL;
1353 
1354 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1355 }
1356 
1357 /**
1358  * hal_srng_access_start - Start (locked) ring access
1359  *
1360  * @hal_soc: Opaque HAL SOC handle
1361  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1362  *
1363  * This API doesn't implement any byte-order conversion on reading hp/tp.
1364  * So, Use API only for those srngs for which the target writes hp/tp values to
1365  * the DDR in the Host order.
1366  *
1367  * Return: 0 on success; error on failire
1368  */
1369 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
1370 					hal_ring_handle_t hal_ring_hdl)
1371 {
1372 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1373 
1374 	if (qdf_unlikely(!hal_ring_hdl)) {
1375 		qdf_print("Error: Invalid hal_ring\n");
1376 		return -EINVAL;
1377 	}
1378 
1379 	SRNG_LOCK(&(srng->lock));
1380 
1381 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1382 }
1383 
1384 /**
1385  * hal_le_srng_access_start_in_cpu_order - Start (locked) ring access with
1386  * endianness correction
1387  * @hal_soc: Opaque HAL SOC handle
1388  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1389  *
1390  * This API provides same functionally as hal_srng_access_start()
1391  * except that it converts the little-endian formatted hp/tp values to
1392  * Host order on reading them. So, this API should only be used for those srngs
1393  * for which the target always writes hp/tp values in little-endian order
1394  * regardless of Host order.
1395  *
1396  * Return: 0 on success; error on failire
1397  */
1398 static inline int
1399 hal_le_srng_access_start_in_cpu_order(
1400 	hal_soc_handle_t hal_soc_hdl,
1401 	hal_ring_handle_t hal_ring_hdl)
1402 {
1403 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1404 
1405 	if (qdf_unlikely(!hal_ring_hdl)) {
1406 		qdf_print("Error: Invalid hal_ring\n");
1407 		return -EINVAL;
1408 	}
1409 
1410 	SRNG_LOCK(&(srng->lock));
1411 
1412 	return hal_le_srng_access_start_unlocked_in_cpu_order(
1413 			hal_soc_hdl, hal_ring_hdl);
1414 }
1415 
1416 /**
1417  * hal_srng_dst_get_next - Get next entry from a destination ring
1418  * @hal_soc: Opaque HAL SOC handle
1419  * @hal_ring_hdl: Destination ring pointer
1420  *
1421  * Return: Opaque pointer for next ring entry; NULL on failure
1422  */
1423 static inline
1424 void *hal_srng_dst_get_next(void *hal_soc,
1425 			    hal_ring_handle_t hal_ring_hdl)
1426 {
1427 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1428 	uint32_t *desc;
1429 
1430 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1431 		return NULL;
1432 
1433 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1434 	/* TODO: Using % is expensive, but we have to do this since
1435 	 * size of some SRNG rings is not power of 2 (due to descriptor
1436 	 * sizes). Need to create separate API for rings used
1437 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1438 	 * SW2RXDMA and CE rings)
1439 	 */
1440 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1441 	if (srng->u.dst_ring.tp == srng->ring_size)
1442 		srng->u.dst_ring.tp = 0;
1443 
1444 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1445 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1446 		uint32_t *desc_next;
1447 		uint32_t tp;
1448 
1449 		tp = srng->u.dst_ring.tp;
1450 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1451 		hal_mem_dma_cache_sync(soc, desc_next, srng->entry_size);
1452 		qdf_prefetch(desc_next);
1453 	}
1454 
1455 	return (void *)desc;
1456 }
1457 
1458 /**
1459  * hal_srng_dst_get_next_cached - Get cached next entry
1460  * @hal_soc: Opaque HAL SOC handle
1461  * @hal_ring_hdl: Destination ring pointer
1462  *
1463  * Get next entry from a destination ring and move cached tail pointer
1464  *
1465  * Return: Opaque pointer for next ring entry; NULL on failure
1466  */
1467 static inline
1468 void *hal_srng_dst_get_next_cached(void *hal_soc,
1469 				   hal_ring_handle_t hal_ring_hdl)
1470 {
1471 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1472 	uint32_t *desc;
1473 	uint32_t *desc_next;
1474 
1475 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1476 		return NULL;
1477 
1478 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1479 	/* TODO: Using % is expensive, but we have to do this since
1480 	 * size of some SRNG rings is not power of 2 (due to descriptor
1481 	 * sizes). Need to create separate API for rings used
1482 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1483 	 * SW2RXDMA and CE rings)
1484 	 */
1485 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1486 	if (srng->u.dst_ring.tp == srng->ring_size)
1487 		srng->u.dst_ring.tp = 0;
1488 
1489 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1490 	qdf_prefetch(desc_next);
1491 	return (void *)desc;
1492 }
1493 
1494 static inline int hal_srng_lock(hal_ring_handle_t hal_ring_hdl)
1495 {
1496 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1497 
1498 	if (qdf_unlikely(!hal_ring_hdl)) {
1499 		qdf_print("error: invalid hal_ring\n");
1500 		return -EINVAL;
1501 	}
1502 
1503 	SRNG_LOCK(&(srng->lock));
1504 	return 0;
1505 }
1506 
1507 static inline int hal_srng_unlock(hal_ring_handle_t hal_ring_hdl)
1508 {
1509 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1510 
1511 	if (qdf_unlikely(!hal_ring_hdl)) {
1512 		qdf_print("error: invalid hal_ring\n");
1513 		return -EINVAL;
1514 	}
1515 
1516 	SRNG_UNLOCK(&(srng->lock));
1517 	return 0;
1518 }
1519 
1520 /**
1521  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1522  * cached head pointer
1523  *
1524  * @hal_soc: Opaque HAL SOC handle
1525  * @hal_ring_hdl: Destination ring pointer
1526  *
1527  * Return: Opaque pointer for next ring entry; NULL on failire
1528  */
1529 static inline void *
1530 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1531 			 hal_ring_handle_t hal_ring_hdl)
1532 {
1533 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1534 	uint32_t *desc;
1535 	/* TODO: Using % is expensive, but we have to do this since
1536 	 * size of some SRNG rings is not power of 2 (due to descriptor
1537 	 * sizes). Need to create separate API for rings used
1538 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1539 	 * SW2RXDMA and CE rings)
1540 	 */
1541 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1542 		srng->ring_size;
1543 
1544 	if (next_hp != srng->u.dst_ring.tp) {
1545 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1546 		srng->u.dst_ring.cached_hp = next_hp;
1547 		return (void *)desc;
1548 	}
1549 
1550 	return NULL;
1551 }
1552 
1553 /**
1554  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1555  * @hal_soc: Opaque HAL SOC handle
1556  * @hal_ring_hdl: Destination ring pointer
1557  *
1558  * Sync cached head pointer with HW.
1559  * Caller takes responsibility for any locking needs.
1560  *
1561  * Return: Opaque pointer for next ring entry; NULL on failire
1562  */
1563 static inline
1564 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1565 			     hal_ring_handle_t hal_ring_hdl)
1566 {
1567 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1568 
1569 	srng->u.dst_ring.cached_hp =
1570 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1571 
1572 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1573 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1574 
1575 	return NULL;
1576 }
1577 
1578 /**
1579  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1580  * @hal_soc: Opaque HAL SOC handle
1581  * @hal_ring_hdl: Destination ring pointer
1582  *
1583  * Sync cached head pointer with HW.
1584  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1585  *
1586  * Return: Opaque pointer for next ring entry; NULL on failire
1587  */
1588 static inline
1589 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1590 				    hal_ring_handle_t hal_ring_hdl)
1591 {
1592 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1593 	void *ring_desc_ptr = NULL;
1594 
1595 	if (qdf_unlikely(!hal_ring_hdl)) {
1596 		qdf_print("Error: Invalid hal_ring\n");
1597 		return  NULL;
1598 	}
1599 
1600 	SRNG_LOCK(&srng->lock);
1601 
1602 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1603 
1604 	SRNG_UNLOCK(&srng->lock);
1605 
1606 	return ring_desc_ptr;
1607 }
1608 
1609 #define hal_srng_dst_num_valid_nolock(hal_soc, hal_ring_hdl, sync_hw_ptr) \
1610 		hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr)
1611 
1612 /**
1613  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1614  * by SW) in destination ring
1615  *
1616  * @hal_soc: Opaque HAL SOC handle
1617  * @hal_ring_hdl: Destination ring pointer
1618  * @sync_hw_ptr: Sync cached head pointer with HW
1619  *
1620  */
1621 static inline
1622 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1623 				hal_ring_handle_t hal_ring_hdl,
1624 				int sync_hw_ptr)
1625 {
1626 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1627 	uint32_t hp;
1628 	uint32_t tp = srng->u.dst_ring.tp;
1629 
1630 	if (sync_hw_ptr) {
1631 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1632 		srng->u.dst_ring.cached_hp = hp;
1633 	} else {
1634 		hp = srng->u.dst_ring.cached_hp;
1635 	}
1636 
1637 	if (hp >= tp)
1638 		return (hp - tp) / srng->entry_size;
1639 
1640 	return (srng->ring_size - tp + hp) / srng->entry_size;
1641 }
1642 
1643 /**
1644  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1645  * @hal_soc: Opaque HAL SOC handle
1646  * @hal_ring_hdl: Destination ring pointer
1647  * @entry_count: Number of descriptors to be invalidated
1648  *
1649  * Invalidates a set of cached descriptors starting from tail to
1650  * provided count worth
1651  *
1652  * Return - None
1653  */
1654 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1655 						 hal_ring_handle_t hal_ring_hdl,
1656 						 uint32_t entry_count)
1657 {
1658 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1659 	uint32_t hp = srng->u.dst_ring.cached_hp;
1660 	uint32_t tp = srng->u.dst_ring.tp;
1661 	uint32_t sync_p = 0;
1662 
1663 	/*
1664 	 * If SRNG does not have cached descriptors this
1665 	 * API call should be a no op
1666 	 */
1667 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1668 		return;
1669 
1670 	if (qdf_unlikely(entry_count == 0))
1671 		return;
1672 
1673 	sync_p = (entry_count - 1) * srng->entry_size;
1674 
1675 	if (hp > tp) {
1676 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1677 				       &srng->ring_base_vaddr[tp + sync_p]
1678 				       + (srng->entry_size * sizeof(uint32_t)));
1679 	} else {
1680 		/*
1681 		 * We have wrapped around
1682 		 */
1683 		uint32_t wrap_cnt = ((srng->ring_size - tp) / srng->entry_size);
1684 
1685 		if (entry_count <= wrap_cnt) {
1686 			qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1687 					       &srng->ring_base_vaddr[tp + sync_p] +
1688 					       (srng->entry_size * sizeof(uint32_t)));
1689 			return;
1690 		}
1691 
1692 		entry_count -= wrap_cnt;
1693 		sync_p = (entry_count - 1) * srng->entry_size;
1694 
1695 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1696 				       &srng->ring_base_vaddr[srng->ring_size - srng->entry_size] +
1697 				       (srng->entry_size * sizeof(uint32_t)));
1698 
1699 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[0],
1700 				       &srng->ring_base_vaddr[sync_p]
1701 				       + (srng->entry_size * sizeof(uint32_t)));
1702 	}
1703 }
1704 
1705 /**
1706  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1707  *
1708  * @hal_soc: Opaque HAL SOC handle
1709  * @hal_ring_hdl: Destination ring pointer
1710  * @sync_hw_ptr: Sync cached head pointer with HW
1711  *
1712  * Returns number of valid entries to be processed by the host driver. The
1713  * function takes up SRNG lock.
1714  *
1715  * Return: Number of valid destination entries
1716  */
1717 static inline uint32_t
1718 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1719 			      hal_ring_handle_t hal_ring_hdl,
1720 			      int sync_hw_ptr)
1721 {
1722 	uint32_t num_valid;
1723 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1724 
1725 	SRNG_LOCK(&srng->lock);
1726 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1727 	SRNG_UNLOCK(&srng->lock);
1728 
1729 	return num_valid;
1730 }
1731 
1732 /**
1733  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1734  *
1735  * @hal_soc: Opaque HAL SOC handle
1736  * @hal_ring_hdl: Destination ring pointer
1737  *
1738  */
1739 static inline
1740 void hal_srng_sync_cachedhp(void *hal_soc,
1741 				hal_ring_handle_t hal_ring_hdl)
1742 {
1743 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1744 	uint32_t hp;
1745 
1746 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1747 	srng->u.dst_ring.cached_hp = hp;
1748 }
1749 
1750 /**
1751  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1752  * pointer. This can be used to release any buffers associated with completed
1753  * ring entries. Note that this should not be used for posting new descriptor
1754  * entries. Posting of new entries should be done only using
1755  * hal_srng_src_get_next_reaped when this function is used for reaping.
1756  *
1757  * @hal_soc: Opaque HAL SOC handle
1758  * @hal_ring_hdl: Source ring pointer
1759  *
1760  * Return: Opaque pointer for next ring entry; NULL on failire
1761  */
1762 static inline void *
1763 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1764 {
1765 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1766 	uint32_t *desc;
1767 
1768 	/* TODO: Using % is expensive, but we have to do this since
1769 	 * size of some SRNG rings is not power of 2 (due to descriptor
1770 	 * sizes). Need to create separate API for rings used
1771 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1772 	 * SW2RXDMA and CE rings)
1773 	 */
1774 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1775 		srng->ring_size;
1776 
1777 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1778 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1779 		srng->u.src_ring.reap_hp = next_reap_hp;
1780 		return (void *)desc;
1781 	}
1782 
1783 	return NULL;
1784 }
1785 
1786 /**
1787  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1788  * already reaped using hal_srng_src_reap_next, for posting new entries to
1789  * the ring
1790  *
1791  * @hal_soc: Opaque HAL SOC handle
1792  * @hal_ring_hdl: Source ring pointer
1793  *
1794  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1795  */
1796 static inline void *
1797 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1798 {
1799 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1800 	uint32_t *desc;
1801 
1802 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1803 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1804 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1805 			srng->ring_size;
1806 
1807 		return (void *)desc;
1808 	}
1809 
1810 	return NULL;
1811 }
1812 
1813 /**
1814  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1815  * move reap pointer. This API is used in detach path to release any buffers
1816  * associated with ring entries which are pending reap.
1817  *
1818  * @hal_soc: Opaque HAL SOC handle
1819  * @hal_ring_hdl: Source ring pointer
1820  *
1821  * Return: Opaque pointer for next ring entry; NULL on failire
1822  */
1823 static inline void *
1824 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1825 {
1826 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1827 	uint32_t *desc;
1828 
1829 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1830 		srng->ring_size;
1831 
1832 	if (next_reap_hp != srng->u.src_ring.hp) {
1833 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1834 		srng->u.src_ring.reap_hp = next_reap_hp;
1835 		return (void *)desc;
1836 	}
1837 
1838 	return NULL;
1839 }
1840 
1841 /**
1842  * hal_srng_src_done_val -
1843  *
1844  * @hal_soc: Opaque HAL SOC handle
1845  * @hal_ring_hdl: Source ring pointer
1846  *
1847  * Return: Opaque pointer for next ring entry; NULL on failire
1848  */
1849 static inline uint32_t
1850 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1851 {
1852 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1853 	/* TODO: Using % is expensive, but we have to do this since
1854 	 * size of some SRNG rings is not power of 2 (due to descriptor
1855 	 * sizes). Need to create separate API for rings used
1856 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1857 	 * SW2RXDMA and CE rings)
1858 	 */
1859 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1860 		srng->ring_size;
1861 
1862 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1863 		return 0;
1864 
1865 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1866 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1867 			srng->entry_size;
1868 	else
1869 		return ((srng->ring_size - next_reap_hp) +
1870 			srng->u.src_ring.cached_tp) / srng->entry_size;
1871 }
1872 
1873 /**
1874  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1875  * @hal_ring_hdl: Source ring pointer
1876  *
1877  * srng->entry_size value is in 4 byte dwords so left shifting
1878  * this by 2 to return the value of entry_size in bytes.
1879  *
1880  * Return: uint8_t
1881  */
1882 static inline
1883 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1884 {
1885 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1886 
1887 	return srng->entry_size << 2;
1888 }
1889 
1890 /**
1891  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1892  * @hal_soc: Opaque HAL SOC handle
1893  * @hal_ring_hdl: Source ring pointer
1894  * @tailp: Tail Pointer
1895  * @headp: Head Pointer
1896  *
1897  * Return: Update tail pointer and head pointer in arguments.
1898  */
1899 static inline
1900 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1901 		     uint32_t *tailp, uint32_t *headp)
1902 {
1903 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1904 
1905 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1906 		*headp = srng->u.src_ring.hp;
1907 		*tailp = *srng->u.src_ring.tp_addr;
1908 	} else {
1909 		*tailp = srng->u.dst_ring.tp;
1910 		*headp = *srng->u.dst_ring.hp_addr;
1911 	}
1912 }
1913 
1914 #if defined(CLEAR_SW2TCL_CONSUMED_DESC)
1915 /**
1916  * hal_srng_src_get_next_consumed - Get the next desc if consumed by HW
1917  *
1918  * @hal_soc: Opaque HAL SOC handle
1919  * @hal_ring_hdl: Source ring pointer
1920  *
1921  * Return: pointer to descriptor if consumed by HW, else NULL
1922  */
1923 static inline
1924 void *hal_srng_src_get_next_consumed(void *hal_soc,
1925 				     hal_ring_handle_t hal_ring_hdl)
1926 {
1927 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1928 	uint32_t *desc = NULL;
1929 	/* TODO: Using % is expensive, but we have to do this since
1930 	 * size of some SRNG rings is not power of 2 (due to descriptor
1931 	 * sizes). Need to create separate API for rings used
1932 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1933 	 * SW2RXDMA and CE rings)
1934 	 */
1935 	uint32_t next_entry = (srng->last_desc_cleared + srng->entry_size) %
1936 			      srng->ring_size;
1937 
1938 	if (next_entry != srng->u.src_ring.cached_tp) {
1939 		desc = &srng->ring_base_vaddr[next_entry];
1940 		srng->last_desc_cleared = next_entry;
1941 	}
1942 
1943 	return desc;
1944 }
1945 
1946 #else
1947 static inline
1948 void *hal_srng_src_get_next_consumed(void *hal_soc,
1949 				     hal_ring_handle_t hal_ring_hdl)
1950 {
1951 	return NULL;
1952 }
1953 #endif /* CLEAR_SW2TCL_CONSUMED_DESC */
1954 
1955 /**
1956  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1957  *
1958  * @hal_soc: Opaque HAL SOC handle
1959  * @hal_ring_hdl: Source ring pointer
1960  *
1961  * Return: Opaque pointer for next ring entry; NULL on failire
1962  */
1963 static inline
1964 void *hal_srng_src_get_next(void *hal_soc,
1965 			    hal_ring_handle_t hal_ring_hdl)
1966 {
1967 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1968 	uint32_t *desc;
1969 	/* TODO: Using % is expensive, but we have to do this since
1970 	 * size of some SRNG rings is not power of 2 (due to descriptor
1971 	 * sizes). Need to create separate API for rings used
1972 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1973 	 * SW2RXDMA and CE rings)
1974 	 */
1975 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1976 		srng->ring_size;
1977 
1978 	if (next_hp != srng->u.src_ring.cached_tp) {
1979 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1980 		srng->u.src_ring.hp = next_hp;
1981 		/* TODO: Since reap function is not used by all rings, we can
1982 		 * remove the following update of reap_hp in this function
1983 		 * if we can ensure that only hal_srng_src_get_next_reaped
1984 		 * is used for the rings requiring reap functionality
1985 		 */
1986 		srng->u.src_ring.reap_hp = next_hp;
1987 		return (void *)desc;
1988 	}
1989 
1990 	return NULL;
1991 }
1992 
1993 /**
1994  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
1995  * moving head pointer.
1996  * hal_srng_src_get_next should be called subsequently to move the head pointer
1997  *
1998  * @hal_soc: Opaque HAL SOC handle
1999  * @hal_ring_hdl: Source ring pointer
2000  *
2001  * Return: Opaque pointer for next ring entry; NULL on failire
2002  */
2003 static inline
2004 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
2005 				   hal_ring_handle_t hal_ring_hdl)
2006 {
2007 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2008 	uint32_t *desc;
2009 
2010 	/* TODO: Using % is expensive, but we have to do this since
2011 	 * size of some SRNG rings is not power of 2 (due to descriptor
2012 	 * sizes). Need to create separate API for rings used
2013 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2014 	 * SW2RXDMA and CE rings)
2015 	 */
2016 	if (((srng->u.src_ring.hp + srng->entry_size) %
2017 		srng->ring_size) != srng->u.src_ring.cached_tp) {
2018 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2019 						srng->entry_size) %
2020 						srng->ring_size]);
2021 		return (void *)desc;
2022 	}
2023 
2024 	return NULL;
2025 }
2026 
2027 /**
2028  * hal_srng_src_peek_n_get_next_next - Get next to next, i.e HP + 2 entry
2029  * from a ring without moving head pointer.
2030  *
2031  * @hal_soc: Opaque HAL SOC handle
2032  * @hal_ring_hdl: Source ring pointer
2033  *
2034  * Return: Opaque pointer for next to next ring entry; NULL on failire
2035  */
2036 static inline
2037 void *hal_srng_src_peek_n_get_next_next(hal_soc_handle_t hal_soc_hdl,
2038 					hal_ring_handle_t hal_ring_hdl)
2039 {
2040 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2041 	uint32_t *desc;
2042 
2043 	/* TODO: Using % is expensive, but we have to do this since
2044 	 * size of some SRNG rings is not power of 2 (due to descriptor
2045 	 * sizes). Need to create separate API for rings used
2046 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
2047 	 * SW2RXDMA and CE rings)
2048 	 */
2049 	if ((((srng->u.src_ring.hp + (srng->entry_size)) %
2050 		srng->ring_size) != srng->u.src_ring.cached_tp) &&
2051 	    (((srng->u.src_ring.hp + (srng->entry_size * 2)) %
2052 		srng->ring_size) != srng->u.src_ring.cached_tp)) {
2053 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
2054 						(srng->entry_size * 2)) %
2055 						srng->ring_size]);
2056 		return (void *)desc;
2057 	}
2058 
2059 	return NULL;
2060 }
2061 
2062 /**
2063  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
2064  * and move hp to next in src ring
2065  *
2066  * Usage: This API should only be used at init time replenish.
2067  *
2068  * @hal_soc_hdl: HAL soc handle
2069  * @hal_ring_hdl: Source ring pointer
2070  *
2071  */
2072 static inline void *
2073 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
2074 				    hal_ring_handle_t hal_ring_hdl)
2075 {
2076 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2077 	uint32_t *cur_desc = NULL;
2078 	uint32_t next_hp;
2079 
2080 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
2081 
2082 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
2083 		srng->ring_size;
2084 
2085 	if (next_hp != srng->u.src_ring.cached_tp)
2086 		srng->u.src_ring.hp = next_hp;
2087 
2088 	return (void *)cur_desc;
2089 }
2090 
2091 /**
2092  * hal_srng_src_num_avail - Returns number of available entries in src ring
2093  *
2094  * @hal_soc: Opaque HAL SOC handle
2095  * @hal_ring_hdl: Source ring pointer
2096  * @sync_hw_ptr: Sync cached tail pointer with HW
2097  *
2098  */
2099 static inline uint32_t
2100 hal_srng_src_num_avail(void *hal_soc,
2101 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
2102 {
2103 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2104 	uint32_t tp;
2105 	uint32_t hp = srng->u.src_ring.hp;
2106 
2107 	if (sync_hw_ptr) {
2108 		tp = *(srng->u.src_ring.tp_addr);
2109 		srng->u.src_ring.cached_tp = tp;
2110 	} else {
2111 		tp = srng->u.src_ring.cached_tp;
2112 	}
2113 
2114 	if (tp > hp)
2115 		return ((tp - hp) / srng->entry_size) - 1;
2116 	else
2117 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
2118 }
2119 
2120 /**
2121  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
2122  * ring head/tail pointers to HW.
2123  *
2124  * @hal_soc: Opaque HAL SOC handle
2125  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2126  *
2127  * The target expects cached head/tail pointer to be updated to the
2128  * shared location in the little-endian order, This API ensures that.
2129  * This API should be used only if hal_srng_access_start_unlocked was used to
2130  * start ring access
2131  *
2132  * Return: None
2133  */
2134 static inline void
2135 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2136 {
2137 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2138 
2139 	/* TODO: See if we need a write memory barrier here */
2140 	if (srng->flags & HAL_SRNG_LMAC_RING) {
2141 		/* For LMAC rings, ring pointer updates are done through FW and
2142 		 * hence written to a shared memory location that is read by FW
2143 		 */
2144 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2145 			*srng->u.src_ring.hp_addr =
2146 				qdf_cpu_to_le32(srng->u.src_ring.hp);
2147 		} else {
2148 			*srng->u.dst_ring.tp_addr =
2149 				qdf_cpu_to_le32(srng->u.dst_ring.tp);
2150 		}
2151 	} else {
2152 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
2153 			hal_srng_write_address_32_mb(hal_soc,
2154 						     srng,
2155 						     srng->u.src_ring.hp_addr,
2156 						     srng->u.src_ring.hp);
2157 		else
2158 			hal_srng_write_address_32_mb(hal_soc,
2159 						     srng,
2160 						     srng->u.dst_ring.tp_addr,
2161 						     srng->u.dst_ring.tp);
2162 	}
2163 }
2164 
2165 /* hal_srng_access_end_unlocked already handles endianness conversion,
2166  * use the same.
2167  */
2168 #define hal_le_srng_access_end_unlocked_in_cpu_order \
2169 	hal_srng_access_end_unlocked
2170 
2171 /**
2172  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
2173  * pointers to HW
2174  *
2175  * @hal_soc: Opaque HAL SOC handle
2176  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2177  *
2178  * The target expects cached head/tail pointer to be updated to the
2179  * shared location in the little-endian order, This API ensures that.
2180  * This API should be used only if hal_srng_access_start was used to
2181  * start ring access
2182  *
2183  * Return: 0 on success; error on failire
2184  */
2185 static inline void
2186 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2187 {
2188 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2189 
2190 	if (qdf_unlikely(!hal_ring_hdl)) {
2191 		qdf_print("Error: Invalid hal_ring\n");
2192 		return;
2193 	}
2194 
2195 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
2196 	SRNG_UNLOCK(&(srng->lock));
2197 }
2198 
2199 /* hal_srng_access_end already handles endianness conversion, so use the same */
2200 #define hal_le_srng_access_end_in_cpu_order \
2201 	hal_srng_access_end
2202 
2203 /**
2204  * hal_srng_access_end_reap - Unlock ring access
2205  * This should be used only if hal_srng_access_start to start ring access
2206  * and should be used only while reaping SRC ring completions
2207  *
2208  * @hal_soc: Opaque HAL SOC handle
2209  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2210  *
2211  * Return: 0 on success; error on failire
2212  */
2213 static inline void
2214 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2215 {
2216 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2217 
2218 	SRNG_UNLOCK(&(srng->lock));
2219 }
2220 
2221 /* TODO: Check if the following definitions is available in HW headers */
2222 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
2223 #define NUM_MPDUS_PER_LINK_DESC 6
2224 #define NUM_MSDUS_PER_LINK_DESC 7
2225 #define REO_QUEUE_DESC_ALIGN 128
2226 
2227 #define LINK_DESC_ALIGN 128
2228 
2229 #define ADDRESS_MATCH_TAG_VAL 0x5
2230 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
2231  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
2232  */
2233 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
2234 
2235 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
2236  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
2237  * should be specified in 16 word units. But the number of bits defined for
2238  * this field in HW header files is 5.
2239  */
2240 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
2241 
2242 
2243 /**
2244  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
2245  * in an idle list
2246  *
2247  * @hal_soc: Opaque HAL SOC handle
2248  *
2249  */
2250 static inline
2251 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
2252 {
2253 	return WBM_IDLE_SCATTER_BUF_SIZE;
2254 }
2255 
2256 /**
2257  * hal_get_link_desc_size - Get the size of each link descriptor
2258  *
2259  * @hal_soc: Opaque HAL SOC handle
2260  *
2261  */
2262 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
2263 {
2264 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2265 
2266 	if (!hal_soc || !hal_soc->ops) {
2267 		qdf_print("Error: Invalid ops\n");
2268 		QDF_BUG(0);
2269 		return -EINVAL;
2270 	}
2271 	if (!hal_soc->ops->hal_get_link_desc_size) {
2272 		qdf_print("Error: Invalid function pointer\n");
2273 		QDF_BUG(0);
2274 		return -EINVAL;
2275 	}
2276 	return hal_soc->ops->hal_get_link_desc_size();
2277 }
2278 
2279 /**
2280  * hal_get_link_desc_align - Get the required start address alignment for
2281  * link descriptors
2282  *
2283  * @hal_soc: Opaque HAL SOC handle
2284  *
2285  */
2286 static inline
2287 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
2288 {
2289 	return LINK_DESC_ALIGN;
2290 }
2291 
2292 /**
2293  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
2294  *
2295  * @hal_soc: Opaque HAL SOC handle
2296  *
2297  */
2298 static inline
2299 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2300 {
2301 	return NUM_MPDUS_PER_LINK_DESC;
2302 }
2303 
2304 /**
2305  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
2306  *
2307  * @hal_soc: Opaque HAL SOC handle
2308  *
2309  */
2310 static inline
2311 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
2312 {
2313 	return NUM_MSDUS_PER_LINK_DESC;
2314 }
2315 
2316 /**
2317  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
2318  * descriptor can hold
2319  *
2320  * @hal_soc: Opaque HAL SOC handle
2321  *
2322  */
2323 static inline
2324 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
2325 {
2326 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
2327 }
2328 
2329 /**
2330  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
2331  * that the given buffer size
2332  *
2333  * @hal_soc: Opaque HAL SOC handle
2334  * @scatter_buf_size: Size of scatter buffer
2335  *
2336  */
2337 static inline
2338 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
2339 					  uint32_t scatter_buf_size)
2340 {
2341 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
2342 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
2343 }
2344 
2345 /**
2346  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
2347  * each given buffer size
2348  *
2349  * @hal_soc: Opaque HAL SOC handle
2350  * @total_mem: size of memory to be scattered
2351  * @scatter_buf_size: Size of scatter buffer
2352  *
2353  */
2354 static inline
2355 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
2356 					uint32_t total_mem,
2357 					uint32_t scatter_buf_size)
2358 {
2359 	uint8_t rem = (total_mem % (scatter_buf_size -
2360 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
2361 
2362 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
2363 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
2364 
2365 	return num_scatter_bufs;
2366 }
2367 
2368 enum hal_pn_type {
2369 	HAL_PN_NONE,
2370 	HAL_PN_WPA,
2371 	HAL_PN_WAPI_EVEN,
2372 	HAL_PN_WAPI_UNEVEN,
2373 };
2374 
2375 #define HAL_RX_MAX_BA_WINDOW 256
2376 
2377 /**
2378  * hal_get_reo_qdesc_align - Get start address alignment for reo
2379  * queue descriptors
2380  *
2381  * @hal_soc: Opaque HAL SOC handle
2382  *
2383  */
2384 static inline
2385 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
2386 {
2387 	return REO_QUEUE_DESC_ALIGN;
2388 }
2389 
2390 /**
2391  * hal_srng_get_hp_addr - Get head pointer physical address
2392  *
2393  * @hal_soc: Opaque HAL SOC handle
2394  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2395  *
2396  */
2397 static inline qdf_dma_addr_t
2398 hal_srng_get_hp_addr(void *hal_soc,
2399 		     hal_ring_handle_t hal_ring_hdl)
2400 {
2401 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2402 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2403 
2404 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2405 		return hal->shadow_wrptr_mem_paddr +
2406 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
2407 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
2408 	} else {
2409 		return hal->shadow_rdptr_mem_paddr +
2410 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
2411 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
2412 	}
2413 }
2414 
2415 /**
2416  * hal_srng_get_tp_addr - Get tail pointer physical address
2417  *
2418  * @hal_soc: Opaque HAL SOC handle
2419  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2420  *
2421  */
2422 static inline qdf_dma_addr_t
2423 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
2424 {
2425 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2426 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
2427 
2428 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2429 		return hal->shadow_rdptr_mem_paddr +
2430 			((unsigned long)(srng->u.src_ring.tp_addr) -
2431 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
2432 	} else {
2433 		return hal->shadow_wrptr_mem_paddr +
2434 			((unsigned long)(srng->u.dst_ring.tp_addr) -
2435 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
2436 	}
2437 }
2438 
2439 /**
2440  * hal_srng_get_num_entries - Get total entries in the HAL Srng
2441  *
2442  * @hal_soc: Opaque HAL SOC handle
2443  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2444  *
2445  * Return: total number of entries in hal ring
2446  */
2447 static inline
2448 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
2449 				  hal_ring_handle_t hal_ring_hdl)
2450 {
2451 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2452 
2453 	return srng->num_entries;
2454 }
2455 
2456 /**
2457  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
2458  *
2459  * @hal_soc: Opaque HAL SOC handle
2460  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
2461  * @ring_params: SRNG parameters will be returned through this structure
2462  */
2463 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
2464 			 hal_ring_handle_t hal_ring_hdl,
2465 			 struct hal_srng_params *ring_params);
2466 
2467 /**
2468  * hal_mem_info - Retrieve hal memory base address
2469  *
2470  * @hal_soc: Opaque HAL SOC handle
2471  * @mem: pointer to structure to be updated with hal mem info
2472  */
2473 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
2474 
2475 /**
2476  * hal_get_target_type - Return target type
2477  *
2478  * @hal_soc: Opaque HAL SOC handle
2479  */
2480 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
2481 
2482 /**
2483  * hal_srng_dst_hw_init - Private function to initialize SRNG
2484  * destination ring HW
2485  * @hal_soc: HAL SOC handle
2486  * @srng: SRNG ring pointer
2487  */
2488 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
2489 	struct hal_srng *srng)
2490 {
2491 	hal->ops->hal_srng_dst_hw_init(hal, srng);
2492 }
2493 
2494 /**
2495  * hal_srng_src_hw_init - Private function to initialize SRNG
2496  * source ring HW
2497  * @hal_soc: HAL SOC handle
2498  * @srng: SRNG ring pointer
2499  */
2500 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2501 	struct hal_srng *srng)
2502 {
2503 	hal->ops->hal_srng_src_hw_init(hal, srng);
2504 }
2505 
2506 /**
2507  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2508  * @hal_soc: Opaque HAL SOC handle
2509  * @hal_ring_hdl: Source ring pointer
2510  * @headp: Head Pointer
2511  * @tailp: Tail Pointer
2512  * @ring_type: Ring
2513  *
2514  * Return: Update tail pointer and head pointer in arguments.
2515  */
2516 static inline
2517 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2518 		     hal_ring_handle_t hal_ring_hdl,
2519 		     uint32_t *headp, uint32_t *tailp,
2520 		     uint8_t ring_type)
2521 {
2522 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2523 
2524 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2525 			headp, tailp, ring_type);
2526 }
2527 
2528 /**
2529  * hal_reo_setup - Initialize HW REO block
2530  *
2531  * @hal_soc: Opaque HAL SOC handle
2532  * @reo_params: parameters needed by HAL for REO config
2533  */
2534 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2535 				 void *reoparams)
2536 {
2537 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2538 
2539 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
2540 }
2541 
2542 static inline
2543 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2544 				   uint32_t *ring, uint32_t num_rings,
2545 				   uint32_t *remap1, uint32_t *remap2)
2546 {
2547 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2548 
2549 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2550 					num_rings, remap1, remap2);
2551 }
2552 
2553 /**
2554  * hal_setup_link_idle_list - Setup scattered idle list using the
2555  * buffer list provided
2556  *
2557  * @hal_soc: Opaque HAL SOC handle
2558  * @scatter_bufs_base_paddr: Array of physical base addresses
2559  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2560  * @num_scatter_bufs: Number of scatter buffers in the above lists
2561  * @scatter_buf_size: Size of each scatter buffer
2562  * @last_buf_end_offset: Offset to the last entry
2563  * @num_entries: Total entries of all scatter bufs
2564  *
2565  */
2566 static inline
2567 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2568 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2569 			      void *scatter_bufs_base_vaddr[],
2570 			      uint32_t num_scatter_bufs,
2571 			      uint32_t scatter_buf_size,
2572 			      uint32_t last_buf_end_offset,
2573 			      uint32_t num_entries)
2574 {
2575 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2576 
2577 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2578 			scatter_bufs_base_vaddr, num_scatter_bufs,
2579 			scatter_buf_size, last_buf_end_offset,
2580 			num_entries);
2581 
2582 }
2583 
2584 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
2585 /**
2586  * hal_dump_rx_reo_queue_desc() - Dump reo queue descriptor fields
2587  * @hw_qdesc_vaddr_aligned: Pointer to hw reo queue desc virtual addr
2588  *
2589  * Use the virtual addr pointer to reo h/w queue desc to read
2590  * the values from ddr and log them.
2591  *
2592  * Return: none
2593  */
2594 static inline void hal_dump_rx_reo_queue_desc(
2595 	void *hw_qdesc_vaddr_aligned)
2596 {
2597 	struct rx_reo_queue *hw_qdesc =
2598 		(struct rx_reo_queue *)hw_qdesc_vaddr_aligned;
2599 
2600 	if (!hw_qdesc)
2601 		return;
2602 
2603 	hal_info("receive_queue_number %u vld %u window_jump_2k %u"
2604 		 " hole_count %u ba_window_size %u ignore_ampdu_flag %u"
2605 		 " svld %u ssn %u current_index %u"
2606 		 " disable_duplicate_detection %u soft_reorder_enable %u"
2607 		 " chk_2k_mode %u oor_mode %u mpdu_frames_processed_count %u"
2608 		 " msdu_frames_processed_count %u total_processed_byte_count %u"
2609 		 " late_receive_mpdu_count %u seq_2k_error_detected_flag %u"
2610 		 " pn_error_detected_flag %u current_mpdu_count %u"
2611 		 " current_msdu_count %u timeout_count %u"
2612 		 " forward_due_to_bar_count %u duplicate_count %u"
2613 		 " frames_in_order_count %u bar_received_count %u"
2614 		 " pn_check_needed %u pn_shall_be_even %u"
2615 		 " pn_shall_be_uneven %u pn_size %u",
2616 		 hw_qdesc->receive_queue_number,
2617 		 hw_qdesc->vld,
2618 		 hw_qdesc->window_jump_2k,
2619 		 hw_qdesc->hole_count,
2620 		 hw_qdesc->ba_window_size,
2621 		 hw_qdesc->ignore_ampdu_flag,
2622 		 hw_qdesc->svld,
2623 		 hw_qdesc->ssn,
2624 		 hw_qdesc->current_index,
2625 		 hw_qdesc->disable_duplicate_detection,
2626 		 hw_qdesc->soft_reorder_enable,
2627 		 hw_qdesc->chk_2k_mode,
2628 		 hw_qdesc->oor_mode,
2629 		 hw_qdesc->mpdu_frames_processed_count,
2630 		 hw_qdesc->msdu_frames_processed_count,
2631 		 hw_qdesc->total_processed_byte_count,
2632 		 hw_qdesc->late_receive_mpdu_count,
2633 		 hw_qdesc->seq_2k_error_detected_flag,
2634 		 hw_qdesc->pn_error_detected_flag,
2635 		 hw_qdesc->current_mpdu_count,
2636 		 hw_qdesc->current_msdu_count,
2637 		 hw_qdesc->timeout_count,
2638 		 hw_qdesc->forward_due_to_bar_count,
2639 		 hw_qdesc->duplicate_count,
2640 		 hw_qdesc->frames_in_order_count,
2641 		 hw_qdesc->bar_received_count,
2642 		 hw_qdesc->pn_check_needed,
2643 		 hw_qdesc->pn_shall_be_even,
2644 		 hw_qdesc->pn_shall_be_uneven,
2645 		 hw_qdesc->pn_size);
2646 }
2647 
2648 #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
2649 
2650 static inline void hal_dump_rx_reo_queue_desc(
2651 	void *hw_qdesc_vaddr_aligned)
2652 {
2653 }
2654 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2655 
2656 /**
2657  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2658  *
2659  * @hal_soc: Opaque HAL SOC handle
2660  * @hal_ring_hdl: Source ring pointer
2661  * @ring_desc: Opaque ring descriptor handle
2662  */
2663 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2664 					   hal_ring_handle_t hal_ring_hdl,
2665 					   hal_ring_desc_t ring_desc)
2666 {
2667 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2668 
2669 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2670 			   ring_desc, (srng->entry_size << 2));
2671 }
2672 
2673 /**
2674  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2675  *
2676  * @hal_soc: Opaque HAL SOC handle
2677  * @hal_ring_hdl: Source ring pointer
2678  */
2679 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2680 				      hal_ring_handle_t hal_ring_hdl)
2681 {
2682 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2683 	uint32_t *desc;
2684 	uint32_t tp, i;
2685 
2686 	tp = srng->u.dst_ring.tp;
2687 
2688 	for (i = 0; i < 128; i++) {
2689 		if (!tp)
2690 			tp = srng->ring_size;
2691 
2692 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2693 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2694 				   QDF_TRACE_LEVEL_DEBUG,
2695 				   desc, (srng->entry_size << 2));
2696 
2697 		tp -= srng->entry_size;
2698 	}
2699 }
2700 
2701 /*
2702  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2703  * to opaque dp_ring desc type
2704  * @ring_desc - rxdma ring desc
2705  *
2706  * Return: hal_rxdma_desc_t type
2707  */
2708 static inline
2709 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2710 {
2711 	return (hal_ring_desc_t)ring_desc;
2712 }
2713 
2714 /**
2715  * hal_srng_set_event() - Set hal_srng event
2716  * @hal_ring_hdl: Source ring pointer
2717  * @event: SRNG ring event
2718  *
2719  * Return: None
2720  */
2721 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
2722 {
2723 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2724 
2725 	qdf_atomic_set_bit(event, &srng->srng_event);
2726 }
2727 
2728 /**
2729  * hal_srng_clear_event() - Clear hal_srng event
2730  * @hal_ring_hdl: Source ring pointer
2731  * @event: SRNG ring event
2732  *
2733  * Return: None
2734  */
2735 static inline
2736 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2737 {
2738 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2739 
2740 	qdf_atomic_clear_bit(event, &srng->srng_event);
2741 }
2742 
2743 /**
2744  * hal_srng_get_clear_event() - Clear srng event and return old value
2745  * @hal_ring_hdl: Source ring pointer
2746  * @event: SRNG ring event
2747  *
2748  * Return: Return old event value
2749  */
2750 static inline
2751 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2752 {
2753 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2754 
2755 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
2756 }
2757 
2758 /**
2759  * hal_srng_set_flush_last_ts() - Record last flush time stamp
2760  * @hal_ring_hdl: Source ring pointer
2761  *
2762  * Return: None
2763  */
2764 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
2765 {
2766 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2767 
2768 	srng->last_flush_ts = qdf_get_log_timestamp();
2769 }
2770 
2771 /**
2772  * hal_srng_inc_flush_cnt() - Increment flush counter
2773  * @hal_ring_hdl: Source ring pointer
2774  *
2775  * Return: None
2776  */
2777 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
2778 {
2779 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2780 
2781 	srng->flush_count++;
2782 }
2783 
2784 /**
2785  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
2786  *
2787  * @hal: Core HAL soc handle
2788  * @ring_desc: Mon dest ring descriptor
2789  * @desc_info: Desc info to be populated
2790  *
2791  * Return void
2792  */
2793 static inline void
2794 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
2795 			    hal_ring_desc_t ring_desc,
2796 			    hal_rx_mon_desc_info_t desc_info)
2797 {
2798 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
2799 }
2800 
2801 /**
2802  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
2803  *				 register value.
2804  *
2805  * @hal_soc_hdl: Opaque HAL soc handle
2806  *
2807  * Return: None
2808  */
2809 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
2810 {
2811 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2812 
2813 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
2814 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
2815 }
2816 
2817 /**
2818  * hal_reo_enable_pn_in_dest() - Subscribe for previous PN for 2k-jump or
2819  *			OOR error frames
2820  * @hal_soc_hdl: Opaque HAL soc handle
2821  *
2822  * Return: true if feature is enabled,
2823  *	false, otherwise.
2824  */
2825 static inline uint8_t
2826 hal_reo_enable_pn_in_dest(hal_soc_handle_t hal_soc_hdl)
2827 {
2828 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2829 
2830 	if (hal_soc->ops->hal_reo_enable_pn_in_dest)
2831 		return hal_soc->ops->hal_reo_enable_pn_in_dest(hal_soc);
2832 
2833 	return 0;
2834 }
2835 
2836 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
2837 
2838 /**
2839  * hal_set_one_target_reg_config() - Populate the target reg
2840  * offset in hal_soc for one non srng related register at the
2841  * given list index
2842  * @hal_soc: hal handle
2843  * @target_reg_offset: target register offset
2844  * @list_index: index in hal list for shadow regs
2845  *
2846  * Return: none
2847  */
2848 void hal_set_one_target_reg_config(struct hal_soc *hal,
2849 				   uint32_t target_reg_offset,
2850 				   int list_index);
2851 
2852 /**
2853  * hal_set_shadow_regs() - Populate register offset for
2854  * registers that need to be populated in list_shadow_reg_config
2855  * in order to be sent to FW. These reg offsets will be mapped
2856  * to shadow registers.
2857  * @hal_soc: hal handle
2858  *
2859  * Return: QDF_STATUS_OK on success
2860  */
2861 QDF_STATUS hal_set_shadow_regs(void *hal_soc);
2862 
2863 /**
2864  * hal_construct_shadow_regs() - initialize the shadow registers
2865  * for non-srng related register configs
2866  * @hal_soc: hal handle
2867  *
2868  * Return: QDF_STATUS_OK on success
2869  */
2870 QDF_STATUS hal_construct_shadow_regs(void *hal_soc);
2871 
2872 #else /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
2873 static inline void hal_set_one_target_reg_config(
2874 	struct hal_soc *hal,
2875 	uint32_t target_reg_offset,
2876 	int list_index)
2877 {
2878 }
2879 
2880 static inline QDF_STATUS hal_set_shadow_regs(void *hal_soc)
2881 {
2882 	return QDF_STATUS_SUCCESS;
2883 }
2884 
2885 static inline QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
2886 {
2887 	return QDF_STATUS_SUCCESS;
2888 }
2889 #endif /* GENERIC_SHADOW_REGISTER_ACCESS_ENABLE */
2890 
2891 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
2892 /**
2893  * hal_flush_reg_write_work() - flush all writes from register write queue
2894  * @arg: hal_soc pointer
2895  *
2896  * Return: None
2897  */
2898 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle);
2899 
2900 #else
2901 static inline void hal_flush_reg_write_work(hal_soc_handle_t hal_handle) { }
2902 #endif
2903 
2904 /**
2905  * hal_get_ring_usage - Calculate the ring usage percentage
2906  * @hal_ring_hdl: Ring pointer
2907  * @ring_type: Ring type
2908  * @headp: pointer to head value
2909  * @tailp: pointer to tail value
2910  *
2911  * Calculate the ring usage percentage for src and dest rings
2912  *
2913  * Return: Ring usage percentage
2914  */
2915 static inline
2916 uint32_t hal_get_ring_usage(
2917 	hal_ring_handle_t hal_ring_hdl,
2918 	enum hal_ring_type ring_type, uint32_t *headp, uint32_t *tailp)
2919 {
2920 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2921 	uint32_t num_avail, num_valid = 0;
2922 	uint32_t ring_usage;
2923 
2924 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
2925 		if (*tailp > *headp)
2926 			num_avail =  ((*tailp - *headp) / srng->entry_size) - 1;
2927 		else
2928 			num_avail = ((srng->ring_size - *headp + *tailp) /
2929 				     srng->entry_size) - 1;
2930 		if (ring_type == WBM_IDLE_LINK)
2931 			num_valid = num_avail;
2932 		else
2933 			num_valid = srng->num_entries - num_avail;
2934 	} else {
2935 		if (*headp >= *tailp)
2936 			num_valid = ((*headp - *tailp) / srng->entry_size);
2937 		else
2938 			num_valid = ((srng->ring_size - *tailp + *headp) /
2939 				     srng->entry_size);
2940 	}
2941 	ring_usage = (100 * num_valid) / srng->num_entries;
2942 	return ring_usage;
2943 }
2944 
2945 /**
2946  * hal_cmem_write() - function for CMEM buffer writing
2947  * @hal_soc_hdl: HAL SOC handle
2948  * @offset: CMEM address
2949  * @value: value to write
2950  *
2951  * Return: None.
2952  */
2953 static inline void
2954 hal_cmem_write(hal_soc_handle_t hal_soc_hdl, uint32_t offset,
2955 	       uint32_t value)
2956 {
2957 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2958 
2959 	if (hal_soc->ops->hal_cmem_write)
2960 		hal_soc->ops->hal_cmem_write(hal_soc_hdl, offset, value);
2961 
2962 	return;
2963 }
2964 #endif /* _HAL_APIH_ */
2965