xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 97b39bfea3401259bed153a56c00d1fddbb9e87d)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "qdf_atomic.h"
25 #include "hal_internal.h"
26 #include "hif.h"
27 #include "hif_io32.h"
28 #include "qdf_platform.h"
29 
30 /* calculate the register address offset from bar0 of shadow register x */
31 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
32 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
33 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
34 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
35 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
36 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
37 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
38 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
39 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
40 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
41 #elif defined(QCA_WIFI_QCA6750)
42 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
43 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
44 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
45 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
46 #else
47 #define SHADOW_REGISTER(x) 0
48 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
49 
50 #define MAX_UNWINDOWED_ADDRESS 0x80000
51 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
52     defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6750)
53 #define WINDOW_ENABLE_BIT 0x40000000
54 #else
55 #define WINDOW_ENABLE_BIT 0x80000000
56 #endif
57 #define WINDOW_REG_ADDRESS 0x310C
58 #define WINDOW_SHIFT 19
59 #define WINDOW_VALUE_MASK 0x3F
60 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
61 #define WINDOW_RANGE_MASK 0x7FFFF
62 /*
63  * BAR + 4K is always accessible, any access outside this
64  * space requires force wake procedure.
65  * OFFSET = 4K - 32 bytes = 0xFE0
66  */
67 #define MAPPED_REF_OFF 0xFE0
68 
69 #ifdef ENABLE_VERBOSE_DEBUG
70 static inline void
71 hal_set_verbose_debug(bool flag)
72 {
73 	is_hal_verbose_debug_enabled = flag;
74 }
75 #endif
76 
77 #ifdef ENABLE_HAL_SOC_STATS
78 #define HAL_STATS_INC(_handle, _field, _delta) \
79 { \
80 	if (likely(_handle)) \
81 		_handle->stats._field += _delta; \
82 }
83 #else
84 #define HAL_STATS_INC(_handle, _field, _delta)
85 #endif
86 
87 #ifdef ENABLE_HAL_REG_WR_HISTORY
88 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
89 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
90 
91 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
92 				 uint32_t offset,
93 				 uint32_t wr_val,
94 				 uint32_t rd_val);
95 
96 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
97 					     int array_size)
98 {
99 	int record_index = qdf_atomic_inc_return(table_index);
100 
101 	return record_index & (array_size - 1);
102 }
103 #else
104 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
105 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
106 		offset,	\
107 		wr_val,	\
108 		rd_val)
109 #endif
110 
111 /**
112  * hal_reg_write_result_check() - check register writing result
113  * @hal_soc: HAL soc handle
114  * @offset: register offset to read
115  * @exp_val: the expected value of register
116  * @ret_confirm: result confirm flag
117  *
118  * Return: none
119  */
120 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
121 					      uint32_t offset,
122 					      uint32_t exp_val)
123 {
124 	uint32_t value;
125 
126 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
127 	if (exp_val != value) {
128 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
129 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
130 	}
131 }
132 
133 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) || \
134     !defined(QCA_WIFI_QCA6750)
135 static inline void hal_lock_reg_access(struct hal_soc *soc,
136 				       unsigned long *flags)
137 {
138 	qdf_spin_lock_irqsave(&soc->register_access_lock);
139 }
140 
141 static inline void hal_unlock_reg_access(struct hal_soc *soc,
142 					 unsigned long *flags)
143 {
144 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
145 }
146 #else
147 static inline void hal_lock_reg_access(struct hal_soc *soc,
148 				       unsigned long *flags)
149 {
150 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
151 }
152 
153 static inline void hal_unlock_reg_access(struct hal_soc *soc,
154 					 unsigned long *flags)
155 {
156 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
157 }
158 #endif
159 
160 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
161 /**
162  * hal_select_window_confirm() - write remap window register and
163 				 check writing result
164  *
165  */
166 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
167 					     uint32_t offset)
168 {
169 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
170 
171 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
172 		      WINDOW_ENABLE_BIT | window);
173 	hal_soc->register_window = window;
174 
175 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
176 				   WINDOW_ENABLE_BIT | window);
177 }
178 #else
179 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
180 					     uint32_t offset)
181 {
182 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
183 
184 	if (window != hal_soc->register_window) {
185 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
186 			      WINDOW_ENABLE_BIT | window);
187 		hal_soc->register_window = window;
188 
189 		hal_reg_write_result_check(
190 					hal_soc,
191 					WINDOW_REG_ADDRESS,
192 					WINDOW_ENABLE_BIT | window);
193 	}
194 }
195 #endif
196 
197 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
198 						 qdf_iomem_t addr)
199 {
200 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
201 }
202 
203 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
204 					       hal_ring_handle_t hal_ring_hdl)
205 {
206 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
207 
208 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
209 							 hal_ring_hdl);
210 }
211 
212 /**
213  * hal_write32_mb() - Access registers to update configuration
214  * @hal_soc: hal soc handle
215  * @offset: offset address from the BAR
216  * @value: value to write
217  *
218  * Return: None
219  *
220  * Description: Register address space is split below:
221  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
222  *  |--------------------|-------------------|------------------|
223  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
224  *
225  * 1. Any access to the shadow region, doesn't need force wake
226  *    and windowing logic to access.
227  * 2. Any access beyond BAR + 4K:
228  *    If init_phase enabled, no force wake is needed and access
229  *    should be based on windowed or unwindowed access.
230  *    If init_phase disabled, force wake is needed and access
231  *    should be based on windowed or unwindowed access.
232  *
233  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
234  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
235  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
236  *                            that window would be a bug
237  */
238 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
239     !defined(QCA_WIFI_QCA6750)
240 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
241 				  uint32_t value)
242 {
243 	unsigned long flags;
244 	qdf_iomem_t new_addr;
245 
246 	if (!hal_soc->use_register_windowing ||
247 	    offset < MAX_UNWINDOWED_ADDRESS) {
248 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
249 	} else if (hal_soc->static_window_map) {
250 		new_addr = hal_get_window_address(hal_soc,
251 				hal_soc->dev_base_addr + offset);
252 		qdf_iowrite32(new_addr, value);
253 	} else {
254 		hal_lock_reg_access(hal_soc, &flags);
255 		hal_select_window_confirm(hal_soc, offset);
256 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
257 			  (offset & WINDOW_RANGE_MASK), value);
258 		hal_unlock_reg_access(hal_soc, &flags);
259 	}
260 }
261 
262 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
263 		hal_write32_mb(_hal_soc, _offset, _value)
264 #else
265 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
266 				  uint32_t value)
267 {
268 	int ret;
269 	unsigned long flags;
270 	qdf_iomem_t new_addr;
271 
272 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
273 					hal_soc->hif_handle))) {
274 		hal_err_rl("%s: target access is not allowed", __func__);
275 		return;
276 	}
277 
278 	/* Region < BAR + 4K can be directly accessed */
279 	if (offset < MAPPED_REF_OFF) {
280 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
281 		return;
282 	}
283 
284 	/* Region greater than BAR + 4K */
285 	if (!hal_soc->init_phase) {
286 		ret = hif_force_wake_request(hal_soc->hif_handle);
287 		if (ret) {
288 			hal_err("Wake up request failed");
289 			qdf_check_state_before_panic();
290 			return;
291 		}
292 	}
293 
294 	if (!hal_soc->use_register_windowing ||
295 	    offset < MAX_UNWINDOWED_ADDRESS) {
296 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
297 	} else if (hal_soc->static_window_map) {
298 		new_addr = hal_get_window_address(
299 					hal_soc,
300 					hal_soc->dev_base_addr + offset);
301 		qdf_iowrite32(new_addr, value);
302 	} else {
303 		hal_lock_reg_access(hal_soc, &flags);
304 		hal_select_window_confirm(hal_soc, offset);
305 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
306 			  (offset & WINDOW_RANGE_MASK), value);
307 		hal_unlock_reg_access(hal_soc, &flags);
308 	}
309 
310 	if (!hal_soc->init_phase) {
311 		ret = hif_force_wake_release(hal_soc->hif_handle);
312 		if (ret) {
313 			hal_err("Wake up release failed");
314 			qdf_check_state_before_panic();
315 			return;
316 		}
317 	}
318 }
319 
320 /**
321  * hal_write32_mb_confirm() - write register and check wirting result
322  *
323  */
324 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
325 					  uint32_t offset,
326 					  uint32_t value)
327 {
328 	int ret;
329 	unsigned long flags;
330 	qdf_iomem_t new_addr;
331 
332 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
333 					hal_soc->hif_handle))) {
334 		hal_err_rl("%s: target access is not allowed", __func__);
335 		return;
336 	}
337 
338 	/* Region < BAR + 4K can be directly accessed */
339 	if (offset < MAPPED_REF_OFF) {
340 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
341 		return;
342 	}
343 
344 	/* Region greater than BAR + 4K */
345 	if (!hal_soc->init_phase) {
346 		ret = hif_force_wake_request(hal_soc->hif_handle);
347 		if (ret) {
348 			hal_err("Wake up request failed");
349 			qdf_check_state_before_panic();
350 			return;
351 		}
352 	}
353 
354 	if (!hal_soc->use_register_windowing ||
355 	    offset < MAX_UNWINDOWED_ADDRESS) {
356 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
357 		hal_reg_write_result_check(hal_soc, offset,
358 					   value);
359 	} else if (hal_soc->static_window_map) {
360 		new_addr = hal_get_window_address(
361 					hal_soc,
362 					hal_soc->dev_base_addr + offset);
363 		qdf_iowrite32(new_addr, value);
364 		hal_reg_write_result_check(hal_soc,
365 					   new_addr - hal_soc->dev_base_addr,
366 					   value);
367 	} else {
368 		hal_lock_reg_access(hal_soc, &flags);
369 		hal_select_window_confirm(hal_soc, offset);
370 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
371 			  (offset & WINDOW_RANGE_MASK), value);
372 
373 		hal_reg_write_result_check(
374 				hal_soc,
375 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
376 				value);
377 		hal_unlock_reg_access(hal_soc, &flags);
378 	}
379 
380 	if (!hal_soc->init_phase) {
381 		ret = hif_force_wake_release(hal_soc->hif_handle);
382 		if (ret) {
383 			hal_err("Wake up release failed");
384 			qdf_check_state_before_panic();
385 			return;
386 		}
387 	}
388 }
389 #endif
390 
391 /**
392  * hal_write_address_32_mb - write a value to a register
393  *
394  */
395 static inline
396 void hal_write_address_32_mb(struct hal_soc *hal_soc,
397 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
398 {
399 	uint32_t offset;
400 
401 	if (!hal_soc->use_register_windowing)
402 		return qdf_iowrite32(addr, value);
403 
404 	offset = addr - hal_soc->dev_base_addr;
405 
406 	if (qdf_unlikely(wr_confirm))
407 		hal_write32_mb_confirm(hal_soc, offset, value);
408 	else
409 		hal_write32_mb(hal_soc, offset, value);
410 }
411 
412 
413 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
414 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
415 						struct hal_srng *srng,
416 						void __iomem *addr,
417 						uint32_t value)
418 {
419 	qdf_iowrite32(addr, value);
420 }
421 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
422 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
423 						struct hal_srng *srng,
424 						void __iomem *addr,
425 						uint32_t value)
426 {
427 	hal_delayed_reg_write(hal_soc, srng, addr, value);
428 }
429 #else
430 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
431 						struct hal_srng *srng,
432 						void __iomem *addr,
433 						uint32_t value)
434 {
435 	hal_write_address_32_mb(hal_soc, addr, value, false);
436 }
437 #endif
438 
439 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
440     !defined(QCA_WIFI_QCA6750)
441 /**
442  * hal_read32_mb() - Access registers to read configuration
443  * @hal_soc: hal soc handle
444  * @offset: offset address from the BAR
445  * @value: value to write
446  *
447  * Description: Register address space is split below:
448  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
449  *  |--------------------|-------------------|------------------|
450  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
451  *
452  * 1. Any access to the shadow region, doesn't need force wake
453  *    and windowing logic to access.
454  * 2. Any access beyond BAR + 4K:
455  *    If init_phase enabled, no force wake is needed and access
456  *    should be based on windowed or unwindowed access.
457  *    If init_phase disabled, force wake is needed and access
458  *    should be based on windowed or unwindowed access.
459  *
460  * Return: < 0 for failure/>= 0 for success
461  */
462 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
463 {
464 	uint32_t ret;
465 	unsigned long flags;
466 	qdf_iomem_t new_addr;
467 
468 	if (!hal_soc->use_register_windowing ||
469 	    offset < MAX_UNWINDOWED_ADDRESS) {
470 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
471 	} else if (hal_soc->static_window_map) {
472 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
473 		return qdf_ioread32(new_addr);
474 	}
475 
476 	hal_lock_reg_access(hal_soc, &flags);
477 	hal_select_window_confirm(hal_soc, offset);
478 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
479 		       (offset & WINDOW_RANGE_MASK));
480 	hal_unlock_reg_access(hal_soc, &flags);
481 
482 	return ret;
483 }
484 #else
485 static
486 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
487 {
488 	uint32_t ret;
489 	unsigned long flags;
490 	qdf_iomem_t new_addr;
491 
492 	if (!TARGET_ACCESS_ALLOWED(HIF_GET_SOFTC(
493 					hal_soc->hif_handle))) {
494 		hal_err_rl("%s: target access is not allowed", __func__);
495 		return 0;
496 	}
497 
498 	/* Region < BAR + 4K can be directly accessed */
499 	if (offset < MAPPED_REF_OFF)
500 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
501 
502 	if ((!hal_soc->init_phase) &&
503 	    hif_force_wake_request(hal_soc->hif_handle)) {
504 		hal_err("Wake up request failed");
505 		qdf_check_state_before_panic();
506 		return 0;
507 	}
508 
509 	if (!hal_soc->use_register_windowing ||
510 	    offset < MAX_UNWINDOWED_ADDRESS) {
511 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
512 	} else if (hal_soc->static_window_map) {
513 		new_addr = hal_get_window_address(
514 					hal_soc,
515 					hal_soc->dev_base_addr + offset);
516 		ret = qdf_ioread32(new_addr);
517 	} else {
518 		hal_lock_reg_access(hal_soc, &flags);
519 		hal_select_window_confirm(hal_soc, offset);
520 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
521 			       (offset & WINDOW_RANGE_MASK));
522 		hal_unlock_reg_access(hal_soc, &flags);
523 	}
524 
525 	if ((!hal_soc->init_phase) &&
526 	    hif_force_wake_release(hal_soc->hif_handle)) {
527 		hal_err("Wake up release failed");
528 		qdf_check_state_before_panic();
529 		return 0;
530 	}
531 
532 	return ret;
533 }
534 #endif
535 
536 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
537 /**
538  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
539  * @hal_soc: HAL soc handle
540  *
541  * Return: none
542  */
543 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
544 
545 /**
546  * hal_dump_reg_write_stats() - dump reg write stats
547  * @hal_soc: HAL soc handle
548  *
549  * Return: none
550  */
551 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
552 
553 /**
554  * hal_get_reg_write_pending_work() - get the number of entries
555  *		pending in the workqueue to be processed.
556  * @hal_soc: HAL soc handle
557  *
558  * Returns: the number of entries pending to be processed
559  */
560 int hal_get_reg_write_pending_work(void *hal_soc);
561 
562 #else
563 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
564 {
565 }
566 
567 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
568 {
569 }
570 
571 static inline int hal_get_reg_write_pending_work(void *hal_soc)
572 {
573 	return 0;
574 }
575 #endif
576 
577 /**
578  * hal_read_address_32_mb() - Read 32-bit value from the register
579  * @soc: soc handle
580  * @addr: register address to read
581  *
582  * Return: 32-bit value
583  */
584 static inline
585 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
586 				qdf_iomem_t addr)
587 {
588 	uint32_t offset;
589 	uint32_t ret;
590 
591 	if (!soc->use_register_windowing)
592 		return qdf_ioread32(addr);
593 
594 	offset = addr - soc->dev_base_addr;
595 	ret = hal_read32_mb(soc, offset);
596 	return ret;
597 }
598 
599 /**
600  * hal_attach - Initialize HAL layer
601  * @hif_handle: Opaque HIF handle
602  * @qdf_dev: QDF device
603  *
604  * Return: Opaque HAL SOC handle
605  *		 NULL on failure (if given ring is not available)
606  *
607  * This function should be called as part of HIF initialization (for accessing
608  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
609  */
610 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
611 
612 /**
613  * hal_detach - Detach HAL layer
614  * @hal_soc: HAL SOC handle
615  *
616  * This function should be called as part of HIF detach
617  *
618  */
619 extern void hal_detach(void *hal_soc);
620 
621 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
622 enum hal_ring_type {
623 	REO_DST = 0,
624 	REO_EXCEPTION = 1,
625 	REO_REINJECT = 2,
626 	REO_CMD = 3,
627 	REO_STATUS = 4,
628 	TCL_DATA = 5,
629 	TCL_CMD_CREDIT = 6,
630 	TCL_STATUS = 7,
631 	CE_SRC = 8,
632 	CE_DST = 9,
633 	CE_DST_STATUS = 10,
634 	WBM_IDLE_LINK = 11,
635 	SW2WBM_RELEASE = 12,
636 	WBM2SW_RELEASE = 13,
637 	RXDMA_BUF = 14,
638 	RXDMA_DST = 15,
639 	RXDMA_MONITOR_BUF = 16,
640 	RXDMA_MONITOR_STATUS = 17,
641 	RXDMA_MONITOR_DST = 18,
642 	RXDMA_MONITOR_DESC = 19,
643 	DIR_BUF_RX_DMA_SRC = 20,
644 #ifdef WLAN_FEATURE_CIF_CFR
645 	WIFI_POS_SRC,
646 #endif
647 	MAX_RING_TYPES
648 };
649 
650 #define HAL_SRNG_LMAC_RING 0x80000000
651 /* SRNG flags passed in hal_srng_params.flags */
652 #define HAL_SRNG_MSI_SWAP				0x00000008
653 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
654 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
655 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
656 #define HAL_SRNG_MSI_INTR				0x00020000
657 #define HAL_SRNG_CACHED_DESC		0x00040000
658 
659 #ifdef QCA_WIFI_QCA6490
660 #define HAL_SRNG_PREFETCH_TIMER 1
661 #else
662 #define HAL_SRNG_PREFETCH_TIMER 0
663 #endif
664 
665 #define PN_SIZE_24 0
666 #define PN_SIZE_48 1
667 #define PN_SIZE_128 2
668 
669 #ifdef FORCE_WAKE
670 /**
671  * hal_set_init_phase() - Indicate initialization of
672  *                        datapath rings
673  * @soc: hal_soc handle
674  * @init_phase: flag to indicate datapath rings
675  *              initialization status
676  *
677  * Return: None
678  */
679 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
680 #else
681 static inline
682 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
683 {
684 }
685 #endif /* FORCE_WAKE */
686 
687 /**
688  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
689  * used by callers for calculating the size of memory to be allocated before
690  * calling hal_srng_setup to setup the ring
691  *
692  * @hal_soc: Opaque HAL SOC handle
693  * @ring_type: one of the types from hal_ring_type
694  *
695  */
696 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
697 
698 /**
699  * hal_srng_max_entries - Returns maximum possible number of ring entries
700  * @hal_soc: Opaque HAL SOC handle
701  * @ring_type: one of the types from hal_ring_type
702  *
703  * Return: Maximum number of entries for the given ring_type
704  */
705 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
706 
707 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
708 				 uint32_t low_threshold);
709 
710 /**
711  * hal_srng_dump - Dump ring status
712  * @srng: hal srng pointer
713  */
714 void hal_srng_dump(struct hal_srng *srng);
715 
716 /**
717  * hal_srng_get_dir - Returns the direction of the ring
718  * @hal_soc: Opaque HAL SOC handle
719  * @ring_type: one of the types from hal_ring_type
720  *
721  * Return: Ring direction
722  */
723 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
724 
725 /* HAL memory information */
726 struct hal_mem_info {
727 	/* dev base virutal addr */
728 	void *dev_base_addr;
729 	/* dev base physical addr */
730 	void *dev_base_paddr;
731 	/* dev base ce virutal addr - applicable only for qca5018  */
732 	/* In qca5018 CE register are outside wcss block */
733 	/* using a separate address space to access CE registers */
734 	void *dev_base_addr_ce;
735 	/* dev base ce physical addr */
736 	void *dev_base_paddr_ce;
737 	/* Remote virtual pointer memory for HW/FW updates */
738 	void *shadow_rdptr_mem_vaddr;
739 	/* Remote physical pointer memory for HW/FW updates */
740 	void *shadow_rdptr_mem_paddr;
741 	/* Shared memory for ring pointer updates from host to FW */
742 	void *shadow_wrptr_mem_vaddr;
743 	/* Shared physical memory for ring pointer updates from host to FW */
744 	void *shadow_wrptr_mem_paddr;
745 };
746 
747 /* SRNG parameters to be passed to hal_srng_setup */
748 struct hal_srng_params {
749 	/* Physical base address of the ring */
750 	qdf_dma_addr_t ring_base_paddr;
751 	/* Virtual base address of the ring */
752 	void *ring_base_vaddr;
753 	/* Number of entries in ring */
754 	uint32_t num_entries;
755 	/* max transfer length */
756 	uint16_t max_buffer_length;
757 	/* MSI Address */
758 	qdf_dma_addr_t msi_addr;
759 	/* MSI data */
760 	uint32_t msi_data;
761 	/* Interrupt timer threshold – in micro seconds */
762 	uint32_t intr_timer_thres_us;
763 	/* Interrupt batch counter threshold – in number of ring entries */
764 	uint32_t intr_batch_cntr_thres_entries;
765 	/* Low threshold – in number of ring entries
766 	 * (valid for src rings only)
767 	 */
768 	uint32_t low_threshold;
769 	/* Misc flags */
770 	uint32_t flags;
771 	/* Unique ring id */
772 	uint8_t ring_id;
773 	/* Source or Destination ring */
774 	enum hal_srng_dir ring_dir;
775 	/* Size of ring entry */
776 	uint32_t entry_size;
777 	/* hw register base address */
778 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
779 	/* prefetch timer config - in micro seconds */
780 	uint32_t prefetch_timer;
781 };
782 
783 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
784  * @hal_soc: hal handle
785  *
786  * Return: QDF_STATUS_OK on success
787  */
788 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
789 
790 /* hal_set_one_shadow_config() - add a config for the specified ring
791  * @hal_soc: hal handle
792  * @ring_type: ring type
793  * @ring_num: ring num
794  *
795  * The ring type and ring num uniquely specify the ring.  After this call,
796  * the hp/tp will be added as the next entry int the shadow register
797  * configuration table.  The hal code will use the shadow register address
798  * in place of the hp/tp address.
799  *
800  * This function is exposed, so that the CE module can skip configuring shadow
801  * registers for unused ring and rings assigned to the firmware.
802  *
803  * Return: QDF_STATUS_OK on success
804  */
805 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
806 					    int ring_num);
807 /**
808  * hal_get_shadow_config() - retrieve the config table
809  * @hal_soc: hal handle
810  * @shadow_config: will point to the table after
811  * @num_shadow_registers_configured: will contain the number of valid entries
812  */
813 extern void hal_get_shadow_config(void *hal_soc,
814 				  struct pld_shadow_reg_v2_cfg **shadow_config,
815 				  int *num_shadow_registers_configured);
816 /**
817  * hal_srng_setup - Initialize HW SRNG ring.
818  *
819  * @hal_soc: Opaque HAL SOC handle
820  * @ring_type: one of the types from hal_ring_type
821  * @ring_num: Ring number if there are multiple rings of
822  *		same type (staring from 0)
823  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
824  * @ring_params: SRNG ring params in hal_srng_params structure.
825 
826  * Callers are expected to allocate contiguous ring memory of size
827  * 'num_entries * entry_size' bytes and pass the physical and virtual base
828  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
829  * structure. Ring base address should be 8 byte aligned and size of each ring
830  * entry should be queried using the API hal_srng_get_entrysize
831  *
832  * Return: Opaque pointer to ring on success
833  *		 NULL on failure (if given ring is not available)
834  */
835 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
836 	int mac_id, struct hal_srng_params *ring_params);
837 
838 /* Remapping ids of REO rings */
839 #define REO_REMAP_TCL 0
840 #define REO_REMAP_SW1 1
841 #define REO_REMAP_SW2 2
842 #define REO_REMAP_SW3 3
843 #define REO_REMAP_SW4 4
844 #define REO_REMAP_RELEASE 5
845 #define REO_REMAP_FW 6
846 #define REO_REMAP_UNUSED 7
847 
848 /*
849  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
850  * to map destination to rings
851  */
852 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
853 	((_VALUE) << \
854 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
855 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
856 
857 /*
858  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
859  * to map destination to rings
860  */
861 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
862 	((_VALUE) << \
863 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
864 	  _OFFSET ## _SHFT))
865 
866 /*
867  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
868  * to map destination to rings
869  */
870 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
871 	((_VALUE) << \
872 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
873 	  _OFFSET ## _SHFT))
874 
875 /*
876  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
877  * to map destination to rings
878  */
879 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
880 	((_VALUE) << \
881 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
882 	  _OFFSET ## _SHFT))
883 
884 /**
885  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
886  * @hal_soc_hdl: HAL SOC handle
887  * @read: boolean value to indicate if read or write
888  * @ix0: pointer to store IX0 reg value
889  * @ix1: pointer to store IX1 reg value
890  * @ix2: pointer to store IX2 reg value
891  * @ix3: pointer to store IX3 reg value
892  */
893 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
894 				uint32_t *ix0, uint32_t *ix1,
895 				uint32_t *ix2, uint32_t *ix3);
896 
897 /**
898  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
899  * @sring: sring pointer
900  * @paddr: physical address
901  */
902 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
903 
904 /**
905  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
906  * @srng: sring pointer
907  * @vaddr: virtual address
908  */
909 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
910 
911 /**
912  * hal_srng_cleanup - Deinitialize HW SRNG ring.
913  * @hal_soc: Opaque HAL SOC handle
914  * @hal_srng: Opaque HAL SRNG pointer
915  */
916 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
917 
918 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
919 {
920 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
921 
922 	return !!srng->initialized;
923 }
924 
925 /**
926  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
927  * @hal_soc: Opaque HAL SOC handle
928  * @hal_ring_hdl: Destination ring pointer
929  *
930  * Caller takes responsibility for any locking needs.
931  *
932  * Return: Opaque pointer for next ring entry; NULL on failire
933  */
934 static inline
935 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
936 			hal_ring_handle_t hal_ring_hdl)
937 {
938 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
939 
940 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
941 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
942 
943 	return NULL;
944 }
945 
946 /**
947  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
948  * hal_srng_access_start if locked access is required
949  *
950  * @hal_soc: Opaque HAL SOC handle
951  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
952  *
953  * Return: 0 on success; error on failire
954  */
955 static inline int
956 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
957 			       hal_ring_handle_t hal_ring_hdl)
958 {
959 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
960 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
961 	uint32_t *desc;
962 
963 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
964 		srng->u.src_ring.cached_tp =
965 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
966 	else {
967 		srng->u.dst_ring.cached_hp =
968 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
969 
970 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
971 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
972 			if (qdf_likely(desc)) {
973 				qdf_mem_dma_cache_sync(soc->qdf_dev,
974 						       qdf_mem_virt_to_phys
975 						       (desc),
976 						       QDF_DMA_FROM_DEVICE,
977 						       (srng->entry_size *
978 							sizeof(uint32_t)));
979 				qdf_prefetch(desc);
980 			}
981 		}
982 	}
983 
984 	return 0;
985 }
986 
987 /**
988  * hal_srng_access_start - Start (locked) ring access
989  *
990  * @hal_soc: Opaque HAL SOC handle
991  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
992  *
993  * Return: 0 on success; error on failire
994  */
995 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
996 					hal_ring_handle_t hal_ring_hdl)
997 {
998 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
999 
1000 	if (qdf_unlikely(!hal_ring_hdl)) {
1001 		qdf_print("Error: Invalid hal_ring\n");
1002 		return -EINVAL;
1003 	}
1004 
1005 	SRNG_LOCK(&(srng->lock));
1006 
1007 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
1008 }
1009 
1010 /**
1011  * hal_srng_dst_get_next - Get next entry from a destination ring
1012  * @hal_soc: Opaque HAL SOC handle
1013  * @hal_ring_hdl: Destination ring pointer
1014  *
1015  * Return: Opaque pointer for next ring entry; NULL on failure
1016  */
1017 static inline
1018 void *hal_srng_dst_get_next(void *hal_soc,
1019 			    hal_ring_handle_t hal_ring_hdl)
1020 {
1021 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1022 	uint32_t *desc;
1023 
1024 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1025 		return NULL;
1026 
1027 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1028 	/* TODO: Using % is expensive, but we have to do this since
1029 	 * size of some SRNG rings is not power of 2 (due to descriptor
1030 	 * sizes). Need to create separate API for rings used
1031 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1032 	 * SW2RXDMA and CE rings)
1033 	 */
1034 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1035 	if (srng->u.dst_ring.tp == srng->ring_size)
1036 		srng->u.dst_ring.tp = 0;
1037 
1038 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1039 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1040 		uint32_t *desc_next;
1041 		uint32_t tp;
1042 
1043 		tp = srng->u.dst_ring.tp;
1044 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1045 		qdf_mem_dma_cache_sync(soc->qdf_dev,
1046 				       qdf_mem_virt_to_phys(desc_next),
1047 				       QDF_DMA_FROM_DEVICE,
1048 				       (srng->entry_size *
1049 					sizeof(uint32_t)));
1050 		qdf_prefetch(desc_next);
1051 	}
1052 
1053 	return (void *)desc;
1054 }
1055 
1056 /**
1057  * hal_srng_dst_get_next_cached - Get cached next entry
1058  * @hal_soc: Opaque HAL SOC handle
1059  * @hal_ring_hdl: Destination ring pointer
1060  *
1061  * Get next entry from a destination ring and move cached tail pointer
1062  *
1063  * Return: Opaque pointer for next ring entry; NULL on failure
1064  */
1065 static inline
1066 void *hal_srng_dst_get_next_cached(void *hal_soc,
1067 				   hal_ring_handle_t hal_ring_hdl)
1068 {
1069 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1070 	uint32_t *desc;
1071 	uint32_t *desc_next;
1072 
1073 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1074 		return NULL;
1075 
1076 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1077 	/* TODO: Using % is expensive, but we have to do this since
1078 	 * size of some SRNG rings is not power of 2 (due to descriptor
1079 	 * sizes). Need to create separate API for rings used
1080 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1081 	 * SW2RXDMA and CE rings)
1082 	 */
1083 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1084 	if (srng->u.dst_ring.tp == srng->ring_size)
1085 		srng->u.dst_ring.tp = 0;
1086 
1087 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1088 	qdf_prefetch(desc_next);
1089 	return (void *)desc;
1090 }
1091 
1092 /**
1093  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1094  * cached head pointer
1095  *
1096  * @hal_soc: Opaque HAL SOC handle
1097  * @hal_ring_hdl: Destination ring pointer
1098  *
1099  * Return: Opaque pointer for next ring entry; NULL on failire
1100  */
1101 static inline void *
1102 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1103 			 hal_ring_handle_t hal_ring_hdl)
1104 {
1105 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1106 	uint32_t *desc;
1107 	/* TODO: Using % is expensive, but we have to do this since
1108 	 * size of some SRNG rings is not power of 2 (due to descriptor
1109 	 * sizes). Need to create separate API for rings used
1110 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1111 	 * SW2RXDMA and CE rings)
1112 	 */
1113 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1114 		srng->ring_size;
1115 
1116 	if (next_hp != srng->u.dst_ring.tp) {
1117 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1118 		srng->u.dst_ring.cached_hp = next_hp;
1119 		return (void *)desc;
1120 	}
1121 
1122 	return NULL;
1123 }
1124 
1125 /**
1126  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1127  * @hal_soc: Opaque HAL SOC handle
1128  * @hal_ring_hdl: Destination ring pointer
1129  *
1130  * Sync cached head pointer with HW.
1131  * Caller takes responsibility for any locking needs.
1132  *
1133  * Return: Opaque pointer for next ring entry; NULL on failire
1134  */
1135 static inline
1136 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1137 			     hal_ring_handle_t hal_ring_hdl)
1138 {
1139 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1140 
1141 	srng->u.dst_ring.cached_hp =
1142 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1143 
1144 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1145 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1146 
1147 	return NULL;
1148 }
1149 
1150 /**
1151  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1152  * @hal_soc: Opaque HAL SOC handle
1153  * @hal_ring_hdl: Destination ring pointer
1154  *
1155  * Sync cached head pointer with HW.
1156  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1157  *
1158  * Return: Opaque pointer for next ring entry; NULL on failire
1159  */
1160 static inline
1161 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1162 				    hal_ring_handle_t hal_ring_hdl)
1163 {
1164 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1165 	void *ring_desc_ptr = NULL;
1166 
1167 	if (qdf_unlikely(!hal_ring_hdl)) {
1168 		qdf_print("Error: Invalid hal_ring\n");
1169 		return  NULL;
1170 	}
1171 
1172 	SRNG_LOCK(&srng->lock);
1173 
1174 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1175 
1176 	SRNG_UNLOCK(&srng->lock);
1177 
1178 	return ring_desc_ptr;
1179 }
1180 
1181 /**
1182  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1183  * by SW) in destination ring
1184  *
1185  * @hal_soc: Opaque HAL SOC handle
1186  * @hal_ring_hdl: Destination ring pointer
1187  * @sync_hw_ptr: Sync cached head pointer with HW
1188  *
1189  */
1190 static inline
1191 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1192 				hal_ring_handle_t hal_ring_hdl,
1193 				int sync_hw_ptr)
1194 {
1195 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1196 	uint32_t hp;
1197 	uint32_t tp = srng->u.dst_ring.tp;
1198 
1199 	if (sync_hw_ptr) {
1200 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1201 		srng->u.dst_ring.cached_hp = hp;
1202 	} else {
1203 		hp = srng->u.dst_ring.cached_hp;
1204 	}
1205 
1206 	if (hp >= tp)
1207 		return (hp - tp) / srng->entry_size;
1208 
1209 	return (srng->ring_size - tp + hp) / srng->entry_size;
1210 }
1211 
1212 /**
1213  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1214  * @hal_soc: Opaque HAL SOC handle
1215  * @hal_ring_hdl: Destination ring pointer
1216  * @entry_count: Number of descriptors to be invalidated
1217  *
1218  * Invalidates a set of cached descriptors starting from tail to
1219  * provided count worth
1220  *
1221  * Return - None
1222  */
1223 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1224 						 hal_ring_handle_t hal_ring_hdl,
1225 						 uint32_t entry_count)
1226 {
1227 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1228 	uint32_t hp = srng->u.dst_ring.cached_hp;
1229 	uint32_t tp = srng->u.dst_ring.tp;
1230 	uint32_t sync_p = 0;
1231 
1232 	/*
1233 	 * If SRNG does not have cached descriptors this
1234 	 * API call should be a no op
1235 	 */
1236 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1237 		return;
1238 
1239 	if (qdf_unlikely(entry_count == 0))
1240 		return;
1241 
1242 	sync_p = (entry_count - 1) * srng->entry_size;
1243 
1244 	if (hp > tp) {
1245 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1246 				       &srng->ring_base_vaddr[tp + sync_p]
1247 				       + (srng->entry_size * sizeof(uint32_t)));
1248 	} else {
1249 		/*
1250 		 * We have wrapped around
1251 		 */
1252 		uint32_t wrap_cnt = ((srng->ring_size - tp) / srng->entry_size);
1253 
1254 		if (entry_count <= wrap_cnt) {
1255 			qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1256 					       &srng->ring_base_vaddr[tp + sync_p] +
1257 					       (srng->entry_size * sizeof(uint32_t)));
1258 			return;
1259 		}
1260 
1261 		entry_count -= wrap_cnt;
1262 		sync_p = (entry_count - 1) * srng->entry_size;
1263 
1264 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1265 				       &srng->ring_base_vaddr[srng->ring_size - srng->entry_size] +
1266 				       (srng->entry_size * sizeof(uint32_t)));
1267 
1268 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[0],
1269 				       &srng->ring_base_vaddr[sync_p]
1270 				       + (srng->entry_size * sizeof(uint32_t)));
1271 	}
1272 }
1273 
1274 /**
1275  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1276  *
1277  * @hal_soc: Opaque HAL SOC handle
1278  * @hal_ring_hdl: Destination ring pointer
1279  * @sync_hw_ptr: Sync cached head pointer with HW
1280  *
1281  * Returns number of valid entries to be processed by the host driver. The
1282  * function takes up SRNG lock.
1283  *
1284  * Return: Number of valid destination entries
1285  */
1286 static inline uint32_t
1287 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1288 			      hal_ring_handle_t hal_ring_hdl,
1289 			      int sync_hw_ptr)
1290 {
1291 	uint32_t num_valid;
1292 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1293 
1294 	SRNG_LOCK(&srng->lock);
1295 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1296 	SRNG_UNLOCK(&srng->lock);
1297 
1298 	return num_valid;
1299 }
1300 
1301 /**
1302  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1303  *
1304  * @hal_soc: Opaque HAL SOC handle
1305  * @hal_ring_hdl: Destination ring pointer
1306  *
1307  */
1308 static inline
1309 void hal_srng_sync_cachedhp(void *hal_soc,
1310 				hal_ring_handle_t hal_ring_hdl)
1311 {
1312 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1313 	uint32_t hp;
1314 
1315 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1316 	srng->u.dst_ring.cached_hp = hp;
1317 }
1318 
1319 /**
1320  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1321  * pointer. This can be used to release any buffers associated with completed
1322  * ring entries. Note that this should not be used for posting new descriptor
1323  * entries. Posting of new entries should be done only using
1324  * hal_srng_src_get_next_reaped when this function is used for reaping.
1325  *
1326  * @hal_soc: Opaque HAL SOC handle
1327  * @hal_ring_hdl: Source ring pointer
1328  *
1329  * Return: Opaque pointer for next ring entry; NULL on failire
1330  */
1331 static inline void *
1332 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1333 {
1334 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1335 	uint32_t *desc;
1336 
1337 	/* TODO: Using % is expensive, but we have to do this since
1338 	 * size of some SRNG rings is not power of 2 (due to descriptor
1339 	 * sizes). Need to create separate API for rings used
1340 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1341 	 * SW2RXDMA and CE rings)
1342 	 */
1343 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1344 		srng->ring_size;
1345 
1346 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1347 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1348 		srng->u.src_ring.reap_hp = next_reap_hp;
1349 		return (void *)desc;
1350 	}
1351 
1352 	return NULL;
1353 }
1354 
1355 /**
1356  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1357  * already reaped using hal_srng_src_reap_next, for posting new entries to
1358  * the ring
1359  *
1360  * @hal_soc: Opaque HAL SOC handle
1361  * @hal_ring_hdl: Source ring pointer
1362  *
1363  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1364  */
1365 static inline void *
1366 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1367 {
1368 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1369 	uint32_t *desc;
1370 
1371 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1372 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1373 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1374 			srng->ring_size;
1375 
1376 		return (void *)desc;
1377 	}
1378 
1379 	return NULL;
1380 }
1381 
1382 /**
1383  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1384  * move reap pointer. This API is used in detach path to release any buffers
1385  * associated with ring entries which are pending reap.
1386  *
1387  * @hal_soc: Opaque HAL SOC handle
1388  * @hal_ring_hdl: Source ring pointer
1389  *
1390  * Return: Opaque pointer for next ring entry; NULL on failire
1391  */
1392 static inline void *
1393 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1394 {
1395 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1396 	uint32_t *desc;
1397 
1398 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1399 		srng->ring_size;
1400 
1401 	if (next_reap_hp != srng->u.src_ring.hp) {
1402 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1403 		srng->u.src_ring.reap_hp = next_reap_hp;
1404 		return (void *)desc;
1405 	}
1406 
1407 	return NULL;
1408 }
1409 
1410 /**
1411  * hal_srng_src_done_val -
1412  *
1413  * @hal_soc: Opaque HAL SOC handle
1414  * @hal_ring_hdl: Source ring pointer
1415  *
1416  * Return: Opaque pointer for next ring entry; NULL on failire
1417  */
1418 static inline uint32_t
1419 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1420 {
1421 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1422 	/* TODO: Using % is expensive, but we have to do this since
1423 	 * size of some SRNG rings is not power of 2 (due to descriptor
1424 	 * sizes). Need to create separate API for rings used
1425 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1426 	 * SW2RXDMA and CE rings)
1427 	 */
1428 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1429 		srng->ring_size;
1430 
1431 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1432 		return 0;
1433 
1434 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1435 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1436 			srng->entry_size;
1437 	else
1438 		return ((srng->ring_size - next_reap_hp) +
1439 			srng->u.src_ring.cached_tp) / srng->entry_size;
1440 }
1441 
1442 /**
1443  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1444  * @hal_ring_hdl: Source ring pointer
1445  *
1446  * Return: uint8_t
1447  */
1448 static inline
1449 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1450 {
1451 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1452 
1453 	return srng->entry_size;
1454 }
1455 
1456 /**
1457  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1458  * @hal_soc: Opaque HAL SOC handle
1459  * @hal_ring_hdl: Source ring pointer
1460  * @tailp: Tail Pointer
1461  * @headp: Head Pointer
1462  *
1463  * Return: Update tail pointer and head pointer in arguments.
1464  */
1465 static inline
1466 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1467 		     uint32_t *tailp, uint32_t *headp)
1468 {
1469 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1470 
1471 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1472 		*headp = srng->u.src_ring.hp;
1473 		*tailp = *srng->u.src_ring.tp_addr;
1474 	} else {
1475 		*tailp = srng->u.dst_ring.tp;
1476 		*headp = *srng->u.dst_ring.hp_addr;
1477 	}
1478 }
1479 
1480 /**
1481  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1482  *
1483  * @hal_soc: Opaque HAL SOC handle
1484  * @hal_ring_hdl: Source ring pointer
1485  *
1486  * Return: Opaque pointer for next ring entry; NULL on failire
1487  */
1488 static inline
1489 void *hal_srng_src_get_next(void *hal_soc,
1490 			    hal_ring_handle_t hal_ring_hdl)
1491 {
1492 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1493 	uint32_t *desc;
1494 	/* TODO: Using % is expensive, but we have to do this since
1495 	 * size of some SRNG rings is not power of 2 (due to descriptor
1496 	 * sizes). Need to create separate API for rings used
1497 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1498 	 * SW2RXDMA and CE rings)
1499 	 */
1500 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1501 		srng->ring_size;
1502 
1503 	if (next_hp != srng->u.src_ring.cached_tp) {
1504 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1505 		srng->u.src_ring.hp = next_hp;
1506 		/* TODO: Since reap function is not used by all rings, we can
1507 		 * remove the following update of reap_hp in this function
1508 		 * if we can ensure that only hal_srng_src_get_next_reaped
1509 		 * is used for the rings requiring reap functionality
1510 		 */
1511 		srng->u.src_ring.reap_hp = next_hp;
1512 		return (void *)desc;
1513 	}
1514 
1515 	return NULL;
1516 }
1517 
1518 /**
1519  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
1520  * moving head pointer.
1521  * hal_srng_src_get_next should be called subsequently to move the head pointer
1522  *
1523  * @hal_soc: Opaque HAL SOC handle
1524  * @hal_ring_hdl: Source ring pointer
1525  *
1526  * Return: Opaque pointer for next ring entry; NULL on failire
1527  */
1528 static inline
1529 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
1530 				   hal_ring_handle_t hal_ring_hdl)
1531 {
1532 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1533 	uint32_t *desc;
1534 
1535 	/* TODO: Using % is expensive, but we have to do this since
1536 	 * size of some SRNG rings is not power of 2 (due to descriptor
1537 	 * sizes). Need to create separate API for rings used
1538 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1539 	 * SW2RXDMA and CE rings)
1540 	 */
1541 	if (((srng->u.src_ring.hp + srng->entry_size) %
1542 		srng->ring_size) != srng->u.src_ring.cached_tp) {
1543 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
1544 						srng->entry_size) %
1545 						srng->ring_size]);
1546 		return (void *)desc;
1547 	}
1548 
1549 	return NULL;
1550 }
1551 
1552 /**
1553  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
1554  * and move hp to next in src ring
1555  *
1556  * Usage: This API should only be used at init time replenish.
1557  *
1558  * @hal_soc_hdl: HAL soc handle
1559  * @hal_ring_hdl: Source ring pointer
1560  *
1561  */
1562 static inline void *
1563 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
1564 				    hal_ring_handle_t hal_ring_hdl)
1565 {
1566 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1567 	uint32_t *cur_desc = NULL;
1568 	uint32_t next_hp;
1569 
1570 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
1571 
1572 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1573 		srng->ring_size;
1574 
1575 	if (next_hp != srng->u.src_ring.cached_tp)
1576 		srng->u.src_ring.hp = next_hp;
1577 
1578 	return (void *)cur_desc;
1579 }
1580 
1581 /**
1582  * hal_srng_src_num_avail - Returns number of available entries in src ring
1583  *
1584  * @hal_soc: Opaque HAL SOC handle
1585  * @hal_ring_hdl: Source ring pointer
1586  * @sync_hw_ptr: Sync cached tail pointer with HW
1587  *
1588  */
1589 static inline uint32_t
1590 hal_srng_src_num_avail(void *hal_soc,
1591 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
1592 {
1593 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1594 	uint32_t tp;
1595 	uint32_t hp = srng->u.src_ring.hp;
1596 
1597 	if (sync_hw_ptr) {
1598 		tp = *(srng->u.src_ring.tp_addr);
1599 		srng->u.src_ring.cached_tp = tp;
1600 	} else {
1601 		tp = srng->u.src_ring.cached_tp;
1602 	}
1603 
1604 	if (tp > hp)
1605 		return ((tp - hp) / srng->entry_size) - 1;
1606 	else
1607 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1608 }
1609 
1610 /**
1611  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1612  * ring head/tail pointers to HW.
1613  * This should be used only if hal_srng_access_start_unlocked to start ring
1614  * access
1615  *
1616  * @hal_soc: Opaque HAL SOC handle
1617  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1618  *
1619  * Return: 0 on success; error on failire
1620  */
1621 static inline void
1622 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1623 {
1624 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1625 
1626 	/* TODO: See if we need a write memory barrier here */
1627 	if (srng->flags & HAL_SRNG_LMAC_RING) {
1628 		/* For LMAC rings, ring pointer updates are done through FW and
1629 		 * hence written to a shared memory location that is read by FW
1630 		 */
1631 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1632 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
1633 		} else {
1634 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
1635 		}
1636 	} else {
1637 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
1638 			hal_srng_write_address_32_mb(hal_soc,
1639 						     srng,
1640 						     srng->u.src_ring.hp_addr,
1641 						     srng->u.src_ring.hp);
1642 		else
1643 			hal_srng_write_address_32_mb(hal_soc,
1644 						     srng,
1645 						     srng->u.dst_ring.tp_addr,
1646 						     srng->u.dst_ring.tp);
1647 	}
1648 }
1649 
1650 /**
1651  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1652  * pointers to HW
1653  * This should be used only if hal_srng_access_start to start ring access
1654  *
1655  * @hal_soc: Opaque HAL SOC handle
1656  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1657  *
1658  * Return: 0 on success; error on failire
1659  */
1660 static inline void
1661 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1662 {
1663 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1664 
1665 	if (qdf_unlikely(!hal_ring_hdl)) {
1666 		qdf_print("Error: Invalid hal_ring\n");
1667 		return;
1668 	}
1669 
1670 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
1671 	SRNG_UNLOCK(&(srng->lock));
1672 }
1673 
1674 /**
1675  * hal_srng_access_end_reap - Unlock ring access
1676  * This should be used only if hal_srng_access_start to start ring access
1677  * and should be used only while reaping SRC ring completions
1678  *
1679  * @hal_soc: Opaque HAL SOC handle
1680  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1681  *
1682  * Return: 0 on success; error on failire
1683  */
1684 static inline void
1685 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1686 {
1687 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1688 
1689 	SRNG_UNLOCK(&(srng->lock));
1690 }
1691 
1692 /* TODO: Check if the following definitions is available in HW headers */
1693 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
1694 #define NUM_MPDUS_PER_LINK_DESC 6
1695 #define NUM_MSDUS_PER_LINK_DESC 7
1696 #define REO_QUEUE_DESC_ALIGN 128
1697 
1698 #define LINK_DESC_ALIGN 128
1699 
1700 #define ADDRESS_MATCH_TAG_VAL 0x5
1701 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1702  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1703  */
1704 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1705 
1706 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1707  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1708  * should be specified in 16 word units. But the number of bits defined for
1709  * this field in HW header files is 5.
1710  */
1711 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1712 
1713 
1714 /**
1715  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1716  * in an idle list
1717  *
1718  * @hal_soc: Opaque HAL SOC handle
1719  *
1720  */
1721 static inline
1722 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
1723 {
1724 	return WBM_IDLE_SCATTER_BUF_SIZE;
1725 }
1726 
1727 /**
1728  * hal_get_link_desc_size - Get the size of each link descriptor
1729  *
1730  * @hal_soc: Opaque HAL SOC handle
1731  *
1732  */
1733 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
1734 {
1735 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1736 
1737 	if (!hal_soc || !hal_soc->ops) {
1738 		qdf_print("Error: Invalid ops\n");
1739 		QDF_BUG(0);
1740 		return -EINVAL;
1741 	}
1742 	if (!hal_soc->ops->hal_get_link_desc_size) {
1743 		qdf_print("Error: Invalid function pointer\n");
1744 		QDF_BUG(0);
1745 		return -EINVAL;
1746 	}
1747 	return hal_soc->ops->hal_get_link_desc_size();
1748 }
1749 
1750 /**
1751  * hal_get_link_desc_align - Get the required start address alignment for
1752  * link descriptors
1753  *
1754  * @hal_soc: Opaque HAL SOC handle
1755  *
1756  */
1757 static inline
1758 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
1759 {
1760 	return LINK_DESC_ALIGN;
1761 }
1762 
1763 /**
1764  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1765  *
1766  * @hal_soc: Opaque HAL SOC handle
1767  *
1768  */
1769 static inline
1770 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1771 {
1772 	return NUM_MPDUS_PER_LINK_DESC;
1773 }
1774 
1775 /**
1776  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1777  *
1778  * @hal_soc: Opaque HAL SOC handle
1779  *
1780  */
1781 static inline
1782 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1783 {
1784 	return NUM_MSDUS_PER_LINK_DESC;
1785 }
1786 
1787 /**
1788  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1789  * descriptor can hold
1790  *
1791  * @hal_soc: Opaque HAL SOC handle
1792  *
1793  */
1794 static inline
1795 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
1796 {
1797 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1798 }
1799 
1800 /**
1801  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1802  * that the given buffer size
1803  *
1804  * @hal_soc: Opaque HAL SOC handle
1805  * @scatter_buf_size: Size of scatter buffer
1806  *
1807  */
1808 static inline
1809 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
1810 					  uint32_t scatter_buf_size)
1811 {
1812 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
1813 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
1814 }
1815 
1816 /**
1817  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1818  * each given buffer size
1819  *
1820  * @hal_soc: Opaque HAL SOC handle
1821  * @total_mem: size of memory to be scattered
1822  * @scatter_buf_size: Size of scatter buffer
1823  *
1824  */
1825 static inline
1826 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
1827 					uint32_t total_mem,
1828 					uint32_t scatter_buf_size)
1829 {
1830 	uint8_t rem = (total_mem % (scatter_buf_size -
1831 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1832 
1833 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1834 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1835 
1836 	return num_scatter_bufs;
1837 }
1838 
1839 enum hal_pn_type {
1840 	HAL_PN_NONE,
1841 	HAL_PN_WPA,
1842 	HAL_PN_WAPI_EVEN,
1843 	HAL_PN_WAPI_UNEVEN,
1844 };
1845 
1846 #define HAL_RX_MAX_BA_WINDOW 256
1847 
1848 /**
1849  * hal_get_reo_qdesc_align - Get start address alignment for reo
1850  * queue descriptors
1851  *
1852  * @hal_soc: Opaque HAL SOC handle
1853  *
1854  */
1855 static inline
1856 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
1857 {
1858 	return REO_QUEUE_DESC_ALIGN;
1859 }
1860 
1861 /**
1862  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1863  *
1864  * @hal_soc: Opaque HAL SOC handle
1865  * @ba_window_size: BlockAck window size
1866  * @start_seq: Starting sequence number
1867  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1868  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1869  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1870  *
1871  */
1872 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
1873 			 int tid, uint32_t ba_window_size,
1874 			 uint32_t start_seq, void *hw_qdesc_vaddr,
1875 			 qdf_dma_addr_t hw_qdesc_paddr,
1876 			 int pn_type);
1877 
1878 /**
1879  * hal_srng_get_hp_addr - Get head pointer physical address
1880  *
1881  * @hal_soc: Opaque HAL SOC handle
1882  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1883  *
1884  */
1885 static inline qdf_dma_addr_t
1886 hal_srng_get_hp_addr(void *hal_soc,
1887 		     hal_ring_handle_t hal_ring_hdl)
1888 {
1889 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1890 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1891 
1892 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1893 		return hal->shadow_wrptr_mem_paddr +
1894 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1895 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1896 	} else {
1897 		return hal->shadow_rdptr_mem_paddr +
1898 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1899 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1900 	}
1901 }
1902 
1903 /**
1904  * hal_srng_get_tp_addr - Get tail pointer physical address
1905  *
1906  * @hal_soc: Opaque HAL SOC handle
1907  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1908  *
1909  */
1910 static inline qdf_dma_addr_t
1911 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1912 {
1913 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1914 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1915 
1916 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1917 		return hal->shadow_rdptr_mem_paddr +
1918 			((unsigned long)(srng->u.src_ring.tp_addr) -
1919 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1920 	} else {
1921 		return hal->shadow_wrptr_mem_paddr +
1922 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1923 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1924 	}
1925 }
1926 
1927 /**
1928  * hal_srng_get_num_entries - Get total entries in the HAL Srng
1929  *
1930  * @hal_soc: Opaque HAL SOC handle
1931  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1932  *
1933  * Return: total number of entries in hal ring
1934  */
1935 static inline
1936 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
1937 				  hal_ring_handle_t hal_ring_hdl)
1938 {
1939 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1940 
1941 	return srng->num_entries;
1942 }
1943 
1944 /**
1945  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1946  *
1947  * @hal_soc: Opaque HAL SOC handle
1948  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1949  * @ring_params: SRNG parameters will be returned through this structure
1950  */
1951 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1952 			 hal_ring_handle_t hal_ring_hdl,
1953 			 struct hal_srng_params *ring_params);
1954 
1955 /**
1956  * hal_mem_info - Retrieve hal memory base address
1957  *
1958  * @hal_soc: Opaque HAL SOC handle
1959  * @mem: pointer to structure to be updated with hal mem info
1960  */
1961 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
1962 
1963 /**
1964  * hal_get_target_type - Return target type
1965  *
1966  * @hal_soc: Opaque HAL SOC handle
1967  */
1968 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
1969 
1970 /**
1971  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1972  *
1973  * @hal_soc: Opaque HAL SOC handle
1974  * @ac: Access category
1975  * @value: timeout duration in millisec
1976  */
1977 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1978 			      uint32_t *value);
1979 /**
1980  * hal_set_aging_timeout - Set BA aging timeout
1981  *
1982  * @hal_soc: Opaque HAL SOC handle
1983  * @ac: Access category in millisec
1984  * @value: timeout duration value
1985  */
1986 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1987 			      uint32_t value);
1988 /**
1989  * hal_srng_dst_hw_init - Private function to initialize SRNG
1990  * destination ring HW
1991  * @hal_soc: HAL SOC handle
1992  * @srng: SRNG ring pointer
1993  */
1994 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1995 	struct hal_srng *srng)
1996 {
1997 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1998 }
1999 
2000 /**
2001  * hal_srng_src_hw_init - Private function to initialize SRNG
2002  * source ring HW
2003  * @hal_soc: HAL SOC handle
2004  * @srng: SRNG ring pointer
2005  */
2006 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
2007 	struct hal_srng *srng)
2008 {
2009 	hal->ops->hal_srng_src_hw_init(hal, srng);
2010 }
2011 
2012 /**
2013  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
2014  * @hal_soc: Opaque HAL SOC handle
2015  * @hal_ring_hdl: Source ring pointer
2016  * @headp: Head Pointer
2017  * @tailp: Tail Pointer
2018  * @ring_type: Ring
2019  *
2020  * Return: Update tail pointer and head pointer in arguments.
2021  */
2022 static inline
2023 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2024 		     hal_ring_handle_t hal_ring_hdl,
2025 		     uint32_t *headp, uint32_t *tailp,
2026 		     uint8_t ring_type)
2027 {
2028 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2029 
2030 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2031 			headp, tailp, ring_type);
2032 }
2033 
2034 /**
2035  * hal_reo_setup - Initialize HW REO block
2036  *
2037  * @hal_soc: Opaque HAL SOC handle
2038  * @reo_params: parameters needed by HAL for REO config
2039  */
2040 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2041 				 void *reoparams)
2042 {
2043 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2044 
2045 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
2046 }
2047 
2048 static inline
2049 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2050 				   uint32_t *ring, uint32_t num_rings,
2051 				   uint32_t *remap1, uint32_t *remap2)
2052 {
2053 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2054 
2055 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2056 					num_rings, remap1, remap2);
2057 }
2058 
2059 /**
2060  * hal_setup_link_idle_list - Setup scattered idle list using the
2061  * buffer list provided
2062  *
2063  * @hal_soc: Opaque HAL SOC handle
2064  * @scatter_bufs_base_paddr: Array of physical base addresses
2065  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2066  * @num_scatter_bufs: Number of scatter buffers in the above lists
2067  * @scatter_buf_size: Size of each scatter buffer
2068  * @last_buf_end_offset: Offset to the last entry
2069  * @num_entries: Total entries of all scatter bufs
2070  *
2071  */
2072 static inline
2073 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2074 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2075 			      void *scatter_bufs_base_vaddr[],
2076 			      uint32_t num_scatter_bufs,
2077 			      uint32_t scatter_buf_size,
2078 			      uint32_t last_buf_end_offset,
2079 			      uint32_t num_entries)
2080 {
2081 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2082 
2083 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2084 			scatter_bufs_base_vaddr, num_scatter_bufs,
2085 			scatter_buf_size, last_buf_end_offset,
2086 			num_entries);
2087 
2088 }
2089 
2090 /**
2091  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2092  *
2093  * @hal_soc: Opaque HAL SOC handle
2094  * @hal_ring_hdl: Source ring pointer
2095  * @ring_desc: Opaque ring descriptor handle
2096  */
2097 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2098 					   hal_ring_handle_t hal_ring_hdl,
2099 					   hal_ring_desc_t ring_desc)
2100 {
2101 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2102 
2103 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2104 			   ring_desc, (srng->entry_size << 2));
2105 }
2106 
2107 /**
2108  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2109  *
2110  * @hal_soc: Opaque HAL SOC handle
2111  * @hal_ring_hdl: Source ring pointer
2112  */
2113 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2114 				      hal_ring_handle_t hal_ring_hdl)
2115 {
2116 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2117 	uint32_t *desc;
2118 	uint32_t tp, i;
2119 
2120 	tp = srng->u.dst_ring.tp;
2121 
2122 	for (i = 0; i < 128; i++) {
2123 		if (!tp)
2124 			tp = srng->ring_size;
2125 
2126 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2127 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2128 				   QDF_TRACE_LEVEL_DEBUG,
2129 				   desc, (srng->entry_size << 2));
2130 
2131 		tp -= srng->entry_size;
2132 	}
2133 }
2134 
2135 /*
2136  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2137  * to opaque dp_ring desc type
2138  * @ring_desc - rxdma ring desc
2139  *
2140  * Return: hal_rxdma_desc_t type
2141  */
2142 static inline
2143 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2144 {
2145 	return (hal_ring_desc_t)ring_desc;
2146 }
2147 
2148 /**
2149  * hal_srng_set_event() - Set hal_srng event
2150  * @hal_ring_hdl: Source ring pointer
2151  * @event: SRNG ring event
2152  *
2153  * Return: None
2154  */
2155 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
2156 {
2157 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2158 
2159 	qdf_atomic_set_bit(event, &srng->srng_event);
2160 }
2161 
2162 /**
2163  * hal_srng_clear_event() - Clear hal_srng event
2164  * @hal_ring_hdl: Source ring pointer
2165  * @event: SRNG ring event
2166  *
2167  * Return: None
2168  */
2169 static inline
2170 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2171 {
2172 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2173 
2174 	qdf_atomic_clear_bit(event, &srng->srng_event);
2175 }
2176 
2177 /**
2178  * hal_srng_get_clear_event() - Clear srng event and return old value
2179  * @hal_ring_hdl: Source ring pointer
2180  * @event: SRNG ring event
2181  *
2182  * Return: Return old event value
2183  */
2184 static inline
2185 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2186 {
2187 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2188 
2189 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
2190 }
2191 
2192 /**
2193  * hal_srng_set_flush_last_ts() - Record last flush time stamp
2194  * @hal_ring_hdl: Source ring pointer
2195  *
2196  * Return: None
2197  */
2198 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
2199 {
2200 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2201 
2202 	srng->last_flush_ts = qdf_get_log_timestamp();
2203 }
2204 
2205 /**
2206  * hal_srng_inc_flush_cnt() - Increment flush counter
2207  * @hal_ring_hdl: Source ring pointer
2208  *
2209  * Return: None
2210  */
2211 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
2212 {
2213 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2214 
2215 	srng->flush_count++;
2216 }
2217 
2218 /**
2219  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
2220  *
2221  * @hal: Core HAL soc handle
2222  * @ring_desc: Mon dest ring descriptor
2223  * @desc_info: Desc info to be populated
2224  *
2225  * Return void
2226  */
2227 static inline void
2228 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
2229 			    hal_ring_desc_t ring_desc,
2230 			    hal_rx_mon_desc_info_t desc_info)
2231 {
2232 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
2233 }
2234 
2235 /**
2236  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
2237  *				 register value.
2238  *
2239  * @hal_soc_hdl: Opaque HAL soc handle
2240  *
2241  * Return: None
2242  */
2243 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
2244 {
2245 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2246 
2247 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
2248 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
2249 }
2250 #endif /* _HAL_APIH_ */
2251