xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "qdf_atomic.h"
25 #include "hal_internal.h"
26 #include "hif.h"
27 #include "hif_io32.h"
28 #include "qdf_platform.h"
29 
30 /* calculate the register address offset from bar0 of shadow register x */
31 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
32 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x000008FC
33 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
34 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
35 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
36 #elif defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCN9000)
37 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00003024
38 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
39 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
40 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
41 #elif defined(QCA_WIFI_QCA6750)
42 #define SHADOW_REGISTER_START_ADDRESS_OFFSET 0x00000504
43 #define SHADOW_REGISTER_END_ADDRESS_OFFSET \
44 	((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (MAX_SHADOW_REGISTERS)))
45 #define SHADOW_REGISTER(x) ((SHADOW_REGISTER_START_ADDRESS_OFFSET) + (4 * (x)))
46 #else
47 #define SHADOW_REGISTER(x) 0
48 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
49 
50 #define MAX_UNWINDOWED_ADDRESS 0x80000
51 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
52     defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6750)
53 #define WINDOW_ENABLE_BIT 0x40000000
54 #else
55 #define WINDOW_ENABLE_BIT 0x80000000
56 #endif
57 #define WINDOW_REG_ADDRESS 0x310C
58 #define WINDOW_SHIFT 19
59 #define WINDOW_VALUE_MASK 0x3F
60 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
61 #define WINDOW_RANGE_MASK 0x7FFFF
62 /*
63  * BAR + 4K is always accessible, any access outside this
64  * space requires force wake procedure.
65  * OFFSET = 4K - 32 bytes = 0xFE0
66  */
67 #define MAPPED_REF_OFF 0xFE0
68 
69 #ifdef ENABLE_VERBOSE_DEBUG
70 static inline void
71 hal_set_verbose_debug(bool flag)
72 {
73 	is_hal_verbose_debug_enabled = flag;
74 }
75 #endif
76 
77 #ifdef ENABLE_HAL_SOC_STATS
78 #define HAL_STATS_INC(_handle, _field, _delta) \
79 { \
80 	if (likely(_handle)) \
81 		_handle->stats._field += _delta; \
82 }
83 #else
84 #define HAL_STATS_INC(_handle, _field, _delta)
85 #endif
86 
87 #ifdef ENABLE_HAL_REG_WR_HISTORY
88 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
89 	hal_reg_wr_fail_history_add(hal_soc, offset, wr_val, rd_val)
90 
91 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
92 				 uint32_t offset,
93 				 uint32_t wr_val,
94 				 uint32_t rd_val);
95 
96 static inline int hal_history_get_next_index(qdf_atomic_t *table_index,
97 					     int array_size)
98 {
99 	int record_index = qdf_atomic_inc_return(table_index);
100 
101 	return record_index & (array_size - 1);
102 }
103 #else
104 #define HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, wr_val, rd_val) \
105 	hal_err("write failed at reg offset 0x%x, write 0x%x read 0x%x\n", \
106 		offset,	\
107 		wr_val,	\
108 		rd_val)
109 #endif
110 
111 /**
112  * hal_reg_write_result_check() - check register writing result
113  * @hal_soc: HAL soc handle
114  * @offset: register offset to read
115  * @exp_val: the expected value of register
116  * @ret_confirm: result confirm flag
117  *
118  * Return: none
119  */
120 static inline void hal_reg_write_result_check(struct hal_soc *hal_soc,
121 					      uint32_t offset,
122 					      uint32_t exp_val)
123 {
124 	uint32_t value;
125 
126 	value = qdf_ioread32(hal_soc->dev_base_addr + offset);
127 	if (exp_val != value) {
128 		HAL_REG_WRITE_FAIL_HIST_ADD(hal_soc, offset, exp_val, value);
129 		HAL_STATS_INC(hal_soc, reg_write_fail, 1);
130 	}
131 }
132 
133 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) || \
134     !defined(QCA_WIFI_QCA6750)
135 static inline void hal_lock_reg_access(struct hal_soc *soc,
136 				       unsigned long *flags)
137 {
138 	qdf_spin_lock_irqsave(&soc->register_access_lock);
139 }
140 
141 static inline void hal_unlock_reg_access(struct hal_soc *soc,
142 					 unsigned long *flags)
143 {
144 	qdf_spin_unlock_irqrestore(&soc->register_access_lock);
145 }
146 #else
147 static inline void hal_lock_reg_access(struct hal_soc *soc,
148 				       unsigned long *flags)
149 {
150 	pld_lock_reg_window(soc->qdf_dev->dev, flags);
151 }
152 
153 static inline void hal_unlock_reg_access(struct hal_soc *soc,
154 					 unsigned long *flags)
155 {
156 	pld_unlock_reg_window(soc->qdf_dev->dev, flags);
157 }
158 #endif
159 
160 #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
161 /**
162  * hal_select_window_confirm() - write remap window register and
163 				 check writing result
164  *
165  */
166 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
167 					     uint32_t offset)
168 {
169 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
170 
171 	qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
172 		      WINDOW_ENABLE_BIT | window);
173 	hal_soc->register_window = window;
174 
175 	hal_reg_write_result_check(hal_soc, WINDOW_REG_ADDRESS,
176 				   WINDOW_ENABLE_BIT | window);
177 }
178 #else
179 static inline void hal_select_window_confirm(struct hal_soc *hal_soc,
180 					     uint32_t offset)
181 {
182 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
183 
184 	if (window != hal_soc->register_window) {
185 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
186 			      WINDOW_ENABLE_BIT | window);
187 		hal_soc->register_window = window;
188 
189 		hal_reg_write_result_check(
190 					hal_soc,
191 					WINDOW_REG_ADDRESS,
192 					WINDOW_ENABLE_BIT | window);
193 	}
194 }
195 #endif
196 
197 static inline qdf_iomem_t hal_get_window_address(struct hal_soc *hal_soc,
198 						 qdf_iomem_t addr)
199 {
200 	return hal_soc->ops->hal_get_window_address(hal_soc, addr);
201 }
202 
203 static inline void hal_tx_init_cmd_credit_ring(hal_soc_handle_t hal_soc_hdl,
204 					       hal_ring_handle_t hal_ring_hdl)
205 {
206 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
207 
208 	return hal_soc->ops->hal_tx_init_cmd_credit_ring(hal_soc_hdl,
209 							 hal_ring_hdl);
210 }
211 
212 /**
213  * hal_write32_mb() - Access registers to update configuration
214  * @hal_soc: hal soc handle
215  * @offset: offset address from the BAR
216  * @value: value to write
217  *
218  * Return: None
219  *
220  * Description: Register address space is split below:
221  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
222  *  |--------------------|-------------------|------------------|
223  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
224  *
225  * 1. Any access to the shadow region, doesn't need force wake
226  *    and windowing logic to access.
227  * 2. Any access beyond BAR + 4K:
228  *    If init_phase enabled, no force wake is needed and access
229  *    should be based on windowed or unwindowed access.
230  *    If init_phase disabled, force wake is needed and access
231  *    should be based on windowed or unwindowed access.
232  *
233  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
234  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
235  * note3: WINDOW_VALUE_MASK = big enough that trying to write past
236  *                            that window would be a bug
237  */
238 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
239     !defined(QCA_WIFI_QCA6750)
240 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
241 				  uint32_t value)
242 {
243 	unsigned long flags;
244 	qdf_iomem_t new_addr;
245 
246 	if (!hal_soc->use_register_windowing ||
247 	    offset < MAX_UNWINDOWED_ADDRESS) {
248 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
249 	} else if (hal_soc->static_window_map) {
250 		new_addr = hal_get_window_address(hal_soc,
251 				hal_soc->dev_base_addr + offset);
252 		qdf_iowrite32(new_addr, value);
253 	} else {
254 		hal_lock_reg_access(hal_soc, &flags);
255 		hal_select_window_confirm(hal_soc, offset);
256 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
257 			  (offset & WINDOW_RANGE_MASK), value);
258 		hal_unlock_reg_access(hal_soc, &flags);
259 	}
260 }
261 
262 #define hal_write32_mb_confirm(_hal_soc, _offset, _value) \
263 		hal_write32_mb(_hal_soc, _offset, _value)
264 #else
265 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
266 				  uint32_t value)
267 {
268 	int ret;
269 	unsigned long flags;
270 	qdf_iomem_t new_addr;
271 
272 	/* Region < BAR + 4K can be directly accessed */
273 	if (offset < MAPPED_REF_OFF) {
274 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
275 		return;
276 	}
277 
278 	/* Region greater than BAR + 4K */
279 	if (!hal_soc->init_phase) {
280 		ret = hif_force_wake_request(hal_soc->hif_handle);
281 		if (ret) {
282 			hal_err("Wake up request failed");
283 			qdf_check_state_before_panic();
284 			return;
285 		}
286 	}
287 
288 	if (!hal_soc->use_register_windowing ||
289 	    offset < MAX_UNWINDOWED_ADDRESS) {
290 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
291 	} else if (hal_soc->static_window_map) {
292 		new_addr = hal_get_window_address(
293 					hal_soc,
294 					hal_soc->dev_base_addr + offset);
295 		qdf_iowrite32(new_addr, value);
296 	} else {
297 		hal_lock_reg_access(hal_soc, &flags);
298 		hal_select_window_confirm(hal_soc, offset);
299 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
300 			  (offset & WINDOW_RANGE_MASK), value);
301 		hal_unlock_reg_access(hal_soc, &flags);
302 	}
303 
304 	if (!hal_soc->init_phase) {
305 		ret = hif_force_wake_release(hal_soc->hif_handle);
306 		if (ret) {
307 			hal_err("Wake up release failed");
308 			qdf_check_state_before_panic();
309 			return;
310 		}
311 	}
312 }
313 
314 /**
315  * hal_write32_mb_confirm() - write register and check wirting result
316  *
317  */
318 static inline void hal_write32_mb_confirm(struct hal_soc *hal_soc,
319 					  uint32_t offset,
320 					  uint32_t value)
321 {
322 	int ret;
323 	unsigned long flags;
324 	qdf_iomem_t new_addr;
325 
326 	/* Region < BAR + 4K can be directly accessed */
327 	if (offset < MAPPED_REF_OFF) {
328 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
329 		return;
330 	}
331 
332 	/* Region greater than BAR + 4K */
333 	if (!hal_soc->init_phase) {
334 		ret = hif_force_wake_request(hal_soc->hif_handle);
335 		if (ret) {
336 			hal_err("Wake up request failed");
337 			qdf_check_state_before_panic();
338 			return;
339 		}
340 	}
341 
342 	if (!hal_soc->use_register_windowing ||
343 	    offset < MAX_UNWINDOWED_ADDRESS) {
344 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
345 		hal_reg_write_result_check(hal_soc, offset,
346 					   value);
347 	} else if (hal_soc->static_window_map) {
348 		new_addr = hal_get_window_address(
349 					hal_soc,
350 					hal_soc->dev_base_addr + offset);
351 		qdf_iowrite32(new_addr, value);
352 		hal_reg_write_result_check(hal_soc,
353 					   new_addr - hal_soc->dev_base_addr,
354 					   value);
355 	} else {
356 		hal_lock_reg_access(hal_soc, &flags);
357 		hal_select_window_confirm(hal_soc, offset);
358 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
359 			  (offset & WINDOW_RANGE_MASK), value);
360 
361 		hal_reg_write_result_check(
362 				hal_soc,
363 				WINDOW_START + (offset & WINDOW_RANGE_MASK),
364 				value);
365 		hal_unlock_reg_access(hal_soc, &flags);
366 	}
367 
368 	if (!hal_soc->init_phase) {
369 		ret = hif_force_wake_release(hal_soc->hif_handle);
370 		if (ret) {
371 			hal_err("Wake up release failed");
372 			qdf_check_state_before_panic();
373 			return;
374 		}
375 	}
376 }
377 #endif
378 
379 /**
380  * hal_write_address_32_mb - write a value to a register
381  *
382  */
383 static inline
384 void hal_write_address_32_mb(struct hal_soc *hal_soc,
385 			     qdf_iomem_t addr, uint32_t value, bool wr_confirm)
386 {
387 	uint32_t offset;
388 
389 	if (!hal_soc->use_register_windowing)
390 		return qdf_iowrite32(addr, value);
391 
392 	offset = addr - hal_soc->dev_base_addr;
393 
394 	if (qdf_unlikely(wr_confirm))
395 		hal_write32_mb_confirm(hal_soc, offset, value);
396 	else
397 		hal_write32_mb(hal_soc, offset, value);
398 }
399 
400 
401 #ifdef DP_HAL_MULTIWINDOW_DIRECT_ACCESS
402 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
403 						struct hal_srng *srng,
404 						void __iomem *addr,
405 						uint32_t value)
406 {
407 	qdf_iowrite32(addr, value);
408 }
409 #elif defined(FEATURE_HAL_DELAYED_REG_WRITE)
410 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
411 						struct hal_srng *srng,
412 						void __iomem *addr,
413 						uint32_t value)
414 {
415 	hal_delayed_reg_write(hal_soc, srng, addr, value);
416 }
417 #else
418 static inline void hal_srng_write_address_32_mb(struct hal_soc *hal_soc,
419 						struct hal_srng *srng,
420 						void __iomem *addr,
421 						uint32_t value)
422 {
423 	hal_write_address_32_mb(hal_soc, addr, value, false);
424 }
425 #endif
426 
427 #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
428     !defined(QCA_WIFI_QCA6750)
429 /**
430  * hal_read32_mb() - Access registers to read configuration
431  * @hal_soc: hal soc handle
432  * @offset: offset address from the BAR
433  * @value: value to write
434  *
435  * Description: Register address space is split below:
436  *     SHADOW REGION       UNWINDOWED REGION    WINDOWED REGION
437  *  |--------------------|-------------------|------------------|
438  * BAR  NO FORCE WAKE  BAR+4K  FORCE WAKE  BAR+512K  FORCE WAKE
439  *
440  * 1. Any access to the shadow region, doesn't need force wake
441  *    and windowing logic to access.
442  * 2. Any access beyond BAR + 4K:
443  *    If init_phase enabled, no force wake is needed and access
444  *    should be based on windowed or unwindowed access.
445  *    If init_phase disabled, force wake is needed and access
446  *    should be based on windowed or unwindowed access.
447  *
448  * Return: < 0 for failure/>= 0 for success
449  */
450 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
451 {
452 	uint32_t ret;
453 	unsigned long flags;
454 	qdf_iomem_t new_addr;
455 
456 	if (!hal_soc->use_register_windowing ||
457 	    offset < MAX_UNWINDOWED_ADDRESS) {
458 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
459 	} else if (hal_soc->static_window_map) {
460 		new_addr = hal_get_window_address(hal_soc, hal_soc->dev_base_addr + offset);
461 		return qdf_ioread32(new_addr);
462 	}
463 
464 	hal_lock_reg_access(hal_soc, &flags);
465 	hal_select_window_confirm(hal_soc, offset);
466 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
467 		       (offset & WINDOW_RANGE_MASK));
468 	hal_unlock_reg_access(hal_soc, &flags);
469 
470 	return ret;
471 }
472 #else
473 static
474 uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
475 {
476 	uint32_t ret;
477 	unsigned long flags;
478 	qdf_iomem_t new_addr;
479 
480 	/* Region < BAR + 4K can be directly accessed */
481 	if (offset < MAPPED_REF_OFF)
482 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
483 
484 	if ((!hal_soc->init_phase) &&
485 	    hif_force_wake_request(hal_soc->hif_handle)) {
486 		hal_err("Wake up request failed");
487 		qdf_check_state_before_panic();
488 		return 0;
489 	}
490 
491 	if (!hal_soc->use_register_windowing ||
492 	    offset < MAX_UNWINDOWED_ADDRESS) {
493 		ret = qdf_ioread32(hal_soc->dev_base_addr + offset);
494 	} else if (hal_soc->static_window_map) {
495 		new_addr = hal_get_window_address(
496 					hal_soc,
497 					hal_soc->dev_base_addr + offset);
498 		ret = qdf_ioread32(new_addr);
499 	} else {
500 		hal_lock_reg_access(hal_soc, &flags);
501 		hal_select_window_confirm(hal_soc, offset);
502 		ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
503 			       (offset & WINDOW_RANGE_MASK));
504 		hal_unlock_reg_access(hal_soc, &flags);
505 	}
506 
507 	if ((!hal_soc->init_phase) &&
508 	    hif_force_wake_release(hal_soc->hif_handle)) {
509 		hal_err("Wake up release failed");
510 		qdf_check_state_before_panic();
511 		return 0;
512 	}
513 
514 	return ret;
515 }
516 #endif
517 
518 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
519 /**
520  * hal_dump_reg_write_srng_stats() - dump SRNG reg write stats
521  * @hal_soc: HAL soc handle
522  *
523  * Return: none
524  */
525 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl);
526 
527 /**
528  * hal_dump_reg_write_stats() - dump reg write stats
529  * @hal_soc: HAL soc handle
530  *
531  * Return: none
532  */
533 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl);
534 
535 /**
536  * hal_get_reg_write_pending_work() - get the number of entries
537  *		pending in the workqueue to be processed.
538  * @hal_soc: HAL soc handle
539  *
540  * Returns: the number of entries pending to be processed
541  */
542 int hal_get_reg_write_pending_work(void *hal_soc);
543 
544 #else
545 static inline void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
546 {
547 }
548 
549 static inline void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
550 {
551 }
552 
553 static inline int hal_get_reg_write_pending_work(void *hal_soc)
554 {
555 	return 0;
556 }
557 #endif
558 
559 /**
560  * hal_read_address_32_mb() - Read 32-bit value from the register
561  * @soc: soc handle
562  * @addr: register address to read
563  *
564  * Return: 32-bit value
565  */
566 static inline
567 uint32_t hal_read_address_32_mb(struct hal_soc *soc,
568 				qdf_iomem_t addr)
569 {
570 	uint32_t offset;
571 	uint32_t ret;
572 
573 	if (!soc->use_register_windowing)
574 		return qdf_ioread32(addr);
575 
576 	offset = addr - soc->dev_base_addr;
577 	ret = hal_read32_mb(soc, offset);
578 	return ret;
579 }
580 
581 /**
582  * hal_attach - Initialize HAL layer
583  * @hif_handle: Opaque HIF handle
584  * @qdf_dev: QDF device
585  *
586  * Return: Opaque HAL SOC handle
587  *		 NULL on failure (if given ring is not available)
588  *
589  * This function should be called as part of HIF initialization (for accessing
590  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
591  */
592 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
593 
594 /**
595  * hal_detach - Detach HAL layer
596  * @hal_soc: HAL SOC handle
597  *
598  * This function should be called as part of HIF detach
599  *
600  */
601 extern void hal_detach(void *hal_soc);
602 
603 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
604 enum hal_ring_type {
605 	REO_DST = 0,
606 	REO_EXCEPTION = 1,
607 	REO_REINJECT = 2,
608 	REO_CMD = 3,
609 	REO_STATUS = 4,
610 	TCL_DATA = 5,
611 	TCL_CMD_CREDIT = 6,
612 	TCL_STATUS = 7,
613 	CE_SRC = 8,
614 	CE_DST = 9,
615 	CE_DST_STATUS = 10,
616 	WBM_IDLE_LINK = 11,
617 	SW2WBM_RELEASE = 12,
618 	WBM2SW_RELEASE = 13,
619 	RXDMA_BUF = 14,
620 	RXDMA_DST = 15,
621 	RXDMA_MONITOR_BUF = 16,
622 	RXDMA_MONITOR_STATUS = 17,
623 	RXDMA_MONITOR_DST = 18,
624 	RXDMA_MONITOR_DESC = 19,
625 	DIR_BUF_RX_DMA_SRC = 20,
626 #ifdef WLAN_FEATURE_CIF_CFR
627 	WIFI_POS_SRC,
628 #endif
629 	MAX_RING_TYPES
630 };
631 
632 #define HAL_SRNG_LMAC_RING 0x80000000
633 /* SRNG flags passed in hal_srng_params.flags */
634 #define HAL_SRNG_MSI_SWAP				0x00000008
635 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
636 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
637 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
638 #define HAL_SRNG_MSI_INTR				0x00020000
639 #define HAL_SRNG_CACHED_DESC		0x00040000
640 
641 #ifdef QCA_WIFI_QCA6490
642 #define HAL_SRNG_PREFETCH_TIMER 1
643 #else
644 #define HAL_SRNG_PREFETCH_TIMER 0
645 #endif
646 
647 #define PN_SIZE_24 0
648 #define PN_SIZE_48 1
649 #define PN_SIZE_128 2
650 
651 #ifdef FORCE_WAKE
652 /**
653  * hal_set_init_phase() - Indicate initialization of
654  *                        datapath rings
655  * @soc: hal_soc handle
656  * @init_phase: flag to indicate datapath rings
657  *              initialization status
658  *
659  * Return: None
660  */
661 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase);
662 #else
663 static inline
664 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
665 {
666 }
667 #endif /* FORCE_WAKE */
668 
669 /**
670  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
671  * used by callers for calculating the size of memory to be allocated before
672  * calling hal_srng_setup to setup the ring
673  *
674  * @hal_soc: Opaque HAL SOC handle
675  * @ring_type: one of the types from hal_ring_type
676  *
677  */
678 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
679 
680 /**
681  * hal_srng_max_entries - Returns maximum possible number of ring entries
682  * @hal_soc: Opaque HAL SOC handle
683  * @ring_type: one of the types from hal_ring_type
684  *
685  * Return: Maximum number of entries for the given ring_type
686  */
687 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
688 
689 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
690 				 uint32_t low_threshold);
691 
692 /**
693  * hal_srng_dump - Dump ring status
694  * @srng: hal srng pointer
695  */
696 void hal_srng_dump(struct hal_srng *srng);
697 
698 /**
699  * hal_srng_get_dir - Returns the direction of the ring
700  * @hal_soc: Opaque HAL SOC handle
701  * @ring_type: one of the types from hal_ring_type
702  *
703  * Return: Ring direction
704  */
705 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
706 
707 /* HAL memory information */
708 struct hal_mem_info {
709 	/* dev base virutal addr */
710 	void *dev_base_addr;
711 	/* dev base physical addr */
712 	void *dev_base_paddr;
713 	/* dev base ce virutal addr - applicable only for qca5018  */
714 	/* In qca5018 CE register are outside wcss block */
715 	/* using a separate address space to access CE registers */
716 	void *dev_base_addr_ce;
717 	/* dev base ce physical addr */
718 	void *dev_base_paddr_ce;
719 	/* Remote virtual pointer memory for HW/FW updates */
720 	void *shadow_rdptr_mem_vaddr;
721 	/* Remote physical pointer memory for HW/FW updates */
722 	void *shadow_rdptr_mem_paddr;
723 	/* Shared memory for ring pointer updates from host to FW */
724 	void *shadow_wrptr_mem_vaddr;
725 	/* Shared physical memory for ring pointer updates from host to FW */
726 	void *shadow_wrptr_mem_paddr;
727 };
728 
729 /* SRNG parameters to be passed to hal_srng_setup */
730 struct hal_srng_params {
731 	/* Physical base address of the ring */
732 	qdf_dma_addr_t ring_base_paddr;
733 	/* Virtual base address of the ring */
734 	void *ring_base_vaddr;
735 	/* Number of entries in ring */
736 	uint32_t num_entries;
737 	/* max transfer length */
738 	uint16_t max_buffer_length;
739 	/* MSI Address */
740 	qdf_dma_addr_t msi_addr;
741 	/* MSI data */
742 	uint32_t msi_data;
743 	/* Interrupt timer threshold – in micro seconds */
744 	uint32_t intr_timer_thres_us;
745 	/* Interrupt batch counter threshold – in number of ring entries */
746 	uint32_t intr_batch_cntr_thres_entries;
747 	/* Low threshold – in number of ring entries
748 	 * (valid for src rings only)
749 	 */
750 	uint32_t low_threshold;
751 	/* Misc flags */
752 	uint32_t flags;
753 	/* Unique ring id */
754 	uint8_t ring_id;
755 	/* Source or Destination ring */
756 	enum hal_srng_dir ring_dir;
757 	/* Size of ring entry */
758 	uint32_t entry_size;
759 	/* hw register base address */
760 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
761 	/* prefetch timer config - in micro seconds */
762 	uint32_t prefetch_timer;
763 };
764 
765 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
766  * @hal_soc: hal handle
767  *
768  * Return: QDF_STATUS_OK on success
769  */
770 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
771 
772 /* hal_set_one_shadow_config() - add a config for the specified ring
773  * @hal_soc: hal handle
774  * @ring_type: ring type
775  * @ring_num: ring num
776  *
777  * The ring type and ring num uniquely specify the ring.  After this call,
778  * the hp/tp will be added as the next entry int the shadow register
779  * configuration table.  The hal code will use the shadow register address
780  * in place of the hp/tp address.
781  *
782  * This function is exposed, so that the CE module can skip configuring shadow
783  * registers for unused ring and rings assigned to the firmware.
784  *
785  * Return: QDF_STATUS_OK on success
786  */
787 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
788 					    int ring_num);
789 /**
790  * hal_get_shadow_config() - retrieve the config table
791  * @hal_soc: hal handle
792  * @shadow_config: will point to the table after
793  * @num_shadow_registers_configured: will contain the number of valid entries
794  */
795 extern void hal_get_shadow_config(void *hal_soc,
796 				  struct pld_shadow_reg_v2_cfg **shadow_config,
797 				  int *num_shadow_registers_configured);
798 /**
799  * hal_srng_setup - Initialize HW SRNG ring.
800  *
801  * @hal_soc: Opaque HAL SOC handle
802  * @ring_type: one of the types from hal_ring_type
803  * @ring_num: Ring number if there are multiple rings of
804  *		same type (staring from 0)
805  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
806  * @ring_params: SRNG ring params in hal_srng_params structure.
807 
808  * Callers are expected to allocate contiguous ring memory of size
809  * 'num_entries * entry_size' bytes and pass the physical and virtual base
810  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
811  * structure. Ring base address should be 8 byte aligned and size of each ring
812  * entry should be queried using the API hal_srng_get_entrysize
813  *
814  * Return: Opaque pointer to ring on success
815  *		 NULL on failure (if given ring is not available)
816  */
817 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
818 	int mac_id, struct hal_srng_params *ring_params);
819 
820 /* Remapping ids of REO rings */
821 #define REO_REMAP_TCL 0
822 #define REO_REMAP_SW1 1
823 #define REO_REMAP_SW2 2
824 #define REO_REMAP_SW3 3
825 #define REO_REMAP_SW4 4
826 #define REO_REMAP_RELEASE 5
827 #define REO_REMAP_FW 6
828 #define REO_REMAP_UNUSED 7
829 
830 /*
831  * Macro to access HWIO_REO_R0_ERROR_DESTINATION_RING_CTRL_IX_0
832  * to map destination to rings
833  */
834 #define HAL_REO_ERR_REMAP_IX0(_VALUE, _OFFSET) \
835 	((_VALUE) << \
836 	 (HWIO_REO_R0_ERROR_DESTINATION_MAPPING_IX_0_ERROR_ ## \
837 	  DESTINATION_RING_ ## _OFFSET ## _SHFT))
838 
839 /*
840  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
841  * to map destination to rings
842  */
843 #define HAL_REO_REMAP_IX0(_VALUE, _OFFSET) \
844 	((_VALUE) << \
845 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
846 	  _OFFSET ## _SHFT))
847 
848 /*
849  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1
850  * to map destination to rings
851  */
852 #define HAL_REO_REMAP_IX2(_VALUE, _OFFSET) \
853 	((_VALUE) << \
854 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_DEST_RING_MAPPING_ ## \
855 	  _OFFSET ## _SHFT))
856 
857 /*
858  * Macro to access HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3
859  * to map destination to rings
860  */
861 #define HAL_REO_REMAP_IX3(_VALUE, _OFFSET) \
862 	((_VALUE) << \
863 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_DEST_RING_MAPPING_ ## \
864 	  _OFFSET ## _SHFT))
865 
866 /**
867  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
868  * @hal_soc_hdl: HAL SOC handle
869  * @read: boolean value to indicate if read or write
870  * @ix0: pointer to store IX0 reg value
871  * @ix1: pointer to store IX1 reg value
872  * @ix2: pointer to store IX2 reg value
873  * @ix3: pointer to store IX3 reg value
874  */
875 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
876 				uint32_t *ix0, uint32_t *ix1,
877 				uint32_t *ix2, uint32_t *ix3);
878 
879 /**
880  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
881  * @sring: sring pointer
882  * @paddr: physical address
883  */
884 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
885 
886 /**
887  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
888  * @srng: sring pointer
889  * @vaddr: virtual address
890  */
891 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
892 
893 /**
894  * hal_srng_cleanup - Deinitialize HW SRNG ring.
895  * @hal_soc: Opaque HAL SOC handle
896  * @hal_srng: Opaque HAL SRNG pointer
897  */
898 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
899 
900 static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
901 {
902 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
903 
904 	return !!srng->initialized;
905 }
906 
907 /**
908  * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
909  * @hal_soc: Opaque HAL SOC handle
910  * @hal_ring_hdl: Destination ring pointer
911  *
912  * Caller takes responsibility for any locking needs.
913  *
914  * Return: Opaque pointer for next ring entry; NULL on failire
915  */
916 static inline
917 void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
918 			hal_ring_handle_t hal_ring_hdl)
919 {
920 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
921 
922 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
923 		return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
924 
925 	return NULL;
926 }
927 
928 /**
929  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
930  * hal_srng_access_start if locked access is required
931  *
932  * @hal_soc: Opaque HAL SOC handle
933  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
934  *
935  * Return: 0 on success; error on failire
936  */
937 static inline int
938 hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
939 			       hal_ring_handle_t hal_ring_hdl)
940 {
941 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
942 	struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
943 	uint32_t *desc;
944 
945 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
946 		srng->u.src_ring.cached_tp =
947 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
948 	else {
949 		srng->u.dst_ring.cached_hp =
950 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
951 
952 		if (srng->flags & HAL_SRNG_CACHED_DESC) {
953 			desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
954 			if (qdf_likely(desc)) {
955 				qdf_mem_dma_cache_sync(soc->qdf_dev,
956 						       qdf_mem_virt_to_phys
957 						       (desc),
958 						       QDF_DMA_FROM_DEVICE,
959 						       (srng->entry_size *
960 							sizeof(uint32_t)));
961 				qdf_prefetch(desc);
962 			}
963 		}
964 	}
965 
966 	return 0;
967 }
968 
969 /**
970  * hal_srng_access_start - Start (locked) ring access
971  *
972  * @hal_soc: Opaque HAL SOC handle
973  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
974  *
975  * Return: 0 on success; error on failire
976  */
977 static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
978 					hal_ring_handle_t hal_ring_hdl)
979 {
980 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
981 
982 	if (qdf_unlikely(!hal_ring_hdl)) {
983 		qdf_print("Error: Invalid hal_ring\n");
984 		return -EINVAL;
985 	}
986 
987 	SRNG_LOCK(&(srng->lock));
988 
989 	return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
990 }
991 
992 /**
993  * hal_srng_dst_get_next - Get next entry from a destination ring
994  * @hal_soc: Opaque HAL SOC handle
995  * @hal_ring_hdl: Destination ring pointer
996  *
997  * Return: Opaque pointer for next ring entry; NULL on failure
998  */
999 static inline
1000 void *hal_srng_dst_get_next(void *hal_soc,
1001 			    hal_ring_handle_t hal_ring_hdl)
1002 {
1003 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1004 	uint32_t *desc;
1005 
1006 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1007 		return NULL;
1008 
1009 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1010 	/* TODO: Using % is expensive, but we have to do this since
1011 	 * size of some SRNG rings is not power of 2 (due to descriptor
1012 	 * sizes). Need to create separate API for rings used
1013 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1014 	 * SW2RXDMA and CE rings)
1015 	 */
1016 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1017 	if (srng->u.dst_ring.tp == srng->ring_size)
1018 		srng->u.dst_ring.tp = 0;
1019 
1020 	if (srng->flags & HAL_SRNG_CACHED_DESC) {
1021 		struct hal_soc *soc = (struct hal_soc *)hal_soc;
1022 		uint32_t *desc_next;
1023 		uint32_t tp;
1024 
1025 		tp = srng->u.dst_ring.tp;
1026 		desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1027 		qdf_mem_dma_cache_sync(soc->qdf_dev,
1028 				       qdf_mem_virt_to_phys(desc_next),
1029 				       QDF_DMA_FROM_DEVICE,
1030 				       (srng->entry_size *
1031 					sizeof(uint32_t)));
1032 		qdf_prefetch(desc_next);
1033 	}
1034 
1035 	return (void *)desc;
1036 }
1037 
1038 /**
1039  * hal_srng_dst_get_next_cached - Get cached next entry
1040  * @hal_soc: Opaque HAL SOC handle
1041  * @hal_ring_hdl: Destination ring pointer
1042  *
1043  * Get next entry from a destination ring and move cached tail pointer
1044  *
1045  * Return: Opaque pointer for next ring entry; NULL on failure
1046  */
1047 static inline
1048 void *hal_srng_dst_get_next_cached(void *hal_soc,
1049 				   hal_ring_handle_t hal_ring_hdl)
1050 {
1051 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1052 	uint32_t *desc;
1053 	uint32_t *desc_next;
1054 
1055 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
1056 		return NULL;
1057 
1058 	desc = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1059 	/* TODO: Using % is expensive, but we have to do this since
1060 	 * size of some SRNG rings is not power of 2 (due to descriptor
1061 	 * sizes). Need to create separate API for rings used
1062 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1063 	 * SW2RXDMA and CE rings)
1064 	 */
1065 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size);
1066 	if (srng->u.dst_ring.tp == srng->ring_size)
1067 		srng->u.dst_ring.tp = 0;
1068 
1069 	desc_next = &srng->ring_base_vaddr[srng->u.dst_ring.tp];
1070 	qdf_prefetch(desc_next);
1071 	return (void *)desc;
1072 }
1073 
1074 /**
1075  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
1076  * cached head pointer
1077  *
1078  * @hal_soc: Opaque HAL SOC handle
1079  * @hal_ring_hdl: Destination ring pointer
1080  *
1081  * Return: Opaque pointer for next ring entry; NULL on failire
1082  */
1083 static inline void *
1084 hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
1085 			 hal_ring_handle_t hal_ring_hdl)
1086 {
1087 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1088 	uint32_t *desc;
1089 	/* TODO: Using % is expensive, but we have to do this since
1090 	 * size of some SRNG rings is not power of 2 (due to descriptor
1091 	 * sizes). Need to create separate API for rings used
1092 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1093 	 * SW2RXDMA and CE rings)
1094 	 */
1095 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
1096 		srng->ring_size;
1097 
1098 	if (next_hp != srng->u.dst_ring.tp) {
1099 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
1100 		srng->u.dst_ring.cached_hp = next_hp;
1101 		return (void *)desc;
1102 	}
1103 
1104 	return NULL;
1105 }
1106 
1107 /**
1108  * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
1109  * @hal_soc: Opaque HAL SOC handle
1110  * @hal_ring_hdl: Destination ring pointer
1111  *
1112  * Sync cached head pointer with HW.
1113  * Caller takes responsibility for any locking needs.
1114  *
1115  * Return: Opaque pointer for next ring entry; NULL on failire
1116  */
1117 static inline
1118 void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
1119 			     hal_ring_handle_t hal_ring_hdl)
1120 {
1121 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1122 
1123 	srng->u.dst_ring.cached_hp =
1124 		*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1125 
1126 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
1127 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
1128 
1129 	return NULL;
1130 }
1131 
1132 /**
1133  * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
1134  * @hal_soc: Opaque HAL SOC handle
1135  * @hal_ring_hdl: Destination ring pointer
1136  *
1137  * Sync cached head pointer with HW.
1138  * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
1139  *
1140  * Return: Opaque pointer for next ring entry; NULL on failire
1141  */
1142 static inline
1143 void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
1144 				    hal_ring_handle_t hal_ring_hdl)
1145 {
1146 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1147 	void *ring_desc_ptr = NULL;
1148 
1149 	if (qdf_unlikely(!hal_ring_hdl)) {
1150 		qdf_print("Error: Invalid hal_ring\n");
1151 		return  NULL;
1152 	}
1153 
1154 	SRNG_LOCK(&srng->lock);
1155 
1156 	ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
1157 
1158 	SRNG_UNLOCK(&srng->lock);
1159 
1160 	return ring_desc_ptr;
1161 }
1162 
1163 /**
1164  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
1165  * by SW) in destination ring
1166  *
1167  * @hal_soc: Opaque HAL SOC handle
1168  * @hal_ring_hdl: Destination ring pointer
1169  * @sync_hw_ptr: Sync cached head pointer with HW
1170  *
1171  */
1172 static inline
1173 uint32_t hal_srng_dst_num_valid(void *hal_soc,
1174 				hal_ring_handle_t hal_ring_hdl,
1175 				int sync_hw_ptr)
1176 {
1177 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1178 	uint32_t hp;
1179 	uint32_t tp = srng->u.dst_ring.tp;
1180 
1181 	if (sync_hw_ptr) {
1182 		hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1183 		srng->u.dst_ring.cached_hp = hp;
1184 	} else {
1185 		hp = srng->u.dst_ring.cached_hp;
1186 	}
1187 
1188 	if (hp >= tp)
1189 		return (hp - tp) / srng->entry_size;
1190 
1191 	return (srng->ring_size - tp + hp) / srng->entry_size;
1192 }
1193 
1194 /**
1195  * hal_srng_dst_inv_cached_descs - API to invalidate descriptors in batch mode
1196  * @hal_soc: Opaque HAL SOC handle
1197  * @hal_ring_hdl: Destination ring pointer
1198  * @entry_count: Number of descriptors to be invalidated
1199  *
1200  * Invalidates a set of cached descriptors starting from tail to
1201  * provided count worth
1202  *
1203  * Return - None
1204  */
1205 static inline void hal_srng_dst_inv_cached_descs(void *hal_soc,
1206 						 hal_ring_handle_t hal_ring_hdl,
1207 						 uint32_t entry_count)
1208 {
1209 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1210 	uint32_t hp = srng->u.dst_ring.cached_hp;
1211 	uint32_t tp = srng->u.dst_ring.tp;
1212 	uint32_t sync_p = 0;
1213 
1214 	/*
1215 	 * If SRNG does not have cached descriptors this
1216 	 * API call should be a no op
1217 	 */
1218 	if (!(srng->flags & HAL_SRNG_CACHED_DESC))
1219 		return;
1220 
1221 	if (qdf_unlikely(entry_count == 0))
1222 		return;
1223 
1224 	sync_p = (entry_count - 1) * srng->entry_size;
1225 
1226 	if (hp > tp) {
1227 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1228 				       &srng->ring_base_vaddr[tp + sync_p]
1229 				       + (srng->entry_size * sizeof(uint32_t)));
1230 	} else {
1231 		/*
1232 		 * We have wrapped around
1233 		 */
1234 		uint32_t wrap_cnt = ((srng->ring_size - tp) / srng->entry_size);
1235 
1236 		if (entry_count <= wrap_cnt) {
1237 			qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1238 					       &srng->ring_base_vaddr[tp + sync_p] +
1239 					       (srng->entry_size * sizeof(uint32_t)));
1240 			return;
1241 		}
1242 
1243 		entry_count -= wrap_cnt;
1244 		sync_p = (entry_count - 1) * srng->entry_size;
1245 
1246 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[tp],
1247 				       &srng->ring_base_vaddr[srng->ring_size - srng->entry_size] +
1248 				       (srng->entry_size * sizeof(uint32_t)));
1249 
1250 		qdf_nbuf_dma_inv_range(&srng->ring_base_vaddr[0],
1251 				       &srng->ring_base_vaddr[sync_p]
1252 				       + (srng->entry_size * sizeof(uint32_t)));
1253 	}
1254 }
1255 
1256 /**
1257  * hal_srng_dst_num_valid_locked - Returns num valid entries to be processed
1258  *
1259  * @hal_soc: Opaque HAL SOC handle
1260  * @hal_ring_hdl: Destination ring pointer
1261  * @sync_hw_ptr: Sync cached head pointer with HW
1262  *
1263  * Returns number of valid entries to be processed by the host driver. The
1264  * function takes up SRNG lock.
1265  *
1266  * Return: Number of valid destination entries
1267  */
1268 static inline uint32_t
1269 hal_srng_dst_num_valid_locked(hal_soc_handle_t hal_soc,
1270 			      hal_ring_handle_t hal_ring_hdl,
1271 			      int sync_hw_ptr)
1272 {
1273 	uint32_t num_valid;
1274 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1275 
1276 	SRNG_LOCK(&srng->lock);
1277 	num_valid = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, sync_hw_ptr);
1278 	SRNG_UNLOCK(&srng->lock);
1279 
1280 	return num_valid;
1281 }
1282 
1283 /**
1284  * hal_srng_sync_cachedhp - sync cachehp pointer from hw hp
1285  *
1286  * @hal_soc: Opaque HAL SOC handle
1287  * @hal_ring_hdl: Destination ring pointer
1288  *
1289  */
1290 static inline
1291 void hal_srng_sync_cachedhp(void *hal_soc,
1292 				hal_ring_handle_t hal_ring_hdl)
1293 {
1294 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1295 	uint32_t hp;
1296 
1297 	hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
1298 	srng->u.dst_ring.cached_hp = hp;
1299 }
1300 
1301 /**
1302  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
1303  * pointer. This can be used to release any buffers associated with completed
1304  * ring entries. Note that this should not be used for posting new descriptor
1305  * entries. Posting of new entries should be done only using
1306  * hal_srng_src_get_next_reaped when this function is used for reaping.
1307  *
1308  * @hal_soc: Opaque HAL SOC handle
1309  * @hal_ring_hdl: Source ring pointer
1310  *
1311  * Return: Opaque pointer for next ring entry; NULL on failire
1312  */
1313 static inline void *
1314 hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1315 {
1316 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1317 	uint32_t *desc;
1318 
1319 	/* TODO: Using % is expensive, but we have to do this since
1320 	 * size of some SRNG rings is not power of 2 (due to descriptor
1321 	 * sizes). Need to create separate API for rings used
1322 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1323 	 * SW2RXDMA and CE rings)
1324 	 */
1325 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1326 		srng->ring_size;
1327 
1328 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
1329 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1330 		srng->u.src_ring.reap_hp = next_reap_hp;
1331 		return (void *)desc;
1332 	}
1333 
1334 	return NULL;
1335 }
1336 
1337 /**
1338  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
1339  * already reaped using hal_srng_src_reap_next, for posting new entries to
1340  * the ring
1341  *
1342  * @hal_soc: Opaque HAL SOC handle
1343  * @hal_ring_hdl: Source ring pointer
1344  *
1345  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
1346  */
1347 static inline void *
1348 hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1349 {
1350 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1351 	uint32_t *desc;
1352 
1353 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
1354 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1355 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
1356 			srng->ring_size;
1357 
1358 		return (void *)desc;
1359 	}
1360 
1361 	return NULL;
1362 }
1363 
1364 /**
1365  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
1366  * move reap pointer. This API is used in detach path to release any buffers
1367  * associated with ring entries which are pending reap.
1368  *
1369  * @hal_soc: Opaque HAL SOC handle
1370  * @hal_ring_hdl: Source ring pointer
1371  *
1372  * Return: Opaque pointer for next ring entry; NULL on failire
1373  */
1374 static inline void *
1375 hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1376 {
1377 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1378 	uint32_t *desc;
1379 
1380 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1381 		srng->ring_size;
1382 
1383 	if (next_reap_hp != srng->u.src_ring.hp) {
1384 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
1385 		srng->u.src_ring.reap_hp = next_reap_hp;
1386 		return (void *)desc;
1387 	}
1388 
1389 	return NULL;
1390 }
1391 
1392 /**
1393  * hal_srng_src_done_val -
1394  *
1395  * @hal_soc: Opaque HAL SOC handle
1396  * @hal_ring_hdl: Source ring pointer
1397  *
1398  * Return: Opaque pointer for next ring entry; NULL on failire
1399  */
1400 static inline uint32_t
1401 hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1402 {
1403 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1404 	/* TODO: Using % is expensive, but we have to do this since
1405 	 * size of some SRNG rings is not power of 2 (due to descriptor
1406 	 * sizes). Need to create separate API for rings used
1407 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1408 	 * SW2RXDMA and CE rings)
1409 	 */
1410 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
1411 		srng->ring_size;
1412 
1413 	if (next_reap_hp == srng->u.src_ring.cached_tp)
1414 		return 0;
1415 
1416 	if (srng->u.src_ring.cached_tp > next_reap_hp)
1417 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
1418 			srng->entry_size;
1419 	else
1420 		return ((srng->ring_size - next_reap_hp) +
1421 			srng->u.src_ring.cached_tp) / srng->entry_size;
1422 }
1423 
1424 /**
1425  * hal_get_entrysize_from_srng() - Retrieve ring entry size
1426  * @hal_ring_hdl: Source ring pointer
1427  *
1428  * Return: uint8_t
1429  */
1430 static inline
1431 uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
1432 {
1433 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1434 
1435 	return srng->entry_size;
1436 }
1437 
1438 /**
1439  * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
1440  * @hal_soc: Opaque HAL SOC handle
1441  * @hal_ring_hdl: Source ring pointer
1442  * @tailp: Tail Pointer
1443  * @headp: Head Pointer
1444  *
1445  * Return: Update tail pointer and head pointer in arguments.
1446  */
1447 static inline
1448 void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
1449 		     uint32_t *tailp, uint32_t *headp)
1450 {
1451 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1452 
1453 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1454 		*headp = srng->u.src_ring.hp;
1455 		*tailp = *srng->u.src_ring.tp_addr;
1456 	} else {
1457 		*tailp = srng->u.dst_ring.tp;
1458 		*headp = *srng->u.dst_ring.hp_addr;
1459 	}
1460 }
1461 
1462 /**
1463  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
1464  *
1465  * @hal_soc: Opaque HAL SOC handle
1466  * @hal_ring_hdl: Source ring pointer
1467  *
1468  * Return: Opaque pointer for next ring entry; NULL on failire
1469  */
1470 static inline
1471 void *hal_srng_src_get_next(void *hal_soc,
1472 			    hal_ring_handle_t hal_ring_hdl)
1473 {
1474 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1475 	uint32_t *desc;
1476 	/* TODO: Using % is expensive, but we have to do this since
1477 	 * size of some SRNG rings is not power of 2 (due to descriptor
1478 	 * sizes). Need to create separate API for rings used
1479 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1480 	 * SW2RXDMA and CE rings)
1481 	 */
1482 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1483 		srng->ring_size;
1484 
1485 	if (next_hp != srng->u.src_ring.cached_tp) {
1486 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
1487 		srng->u.src_ring.hp = next_hp;
1488 		/* TODO: Since reap function is not used by all rings, we can
1489 		 * remove the following update of reap_hp in this function
1490 		 * if we can ensure that only hal_srng_src_get_next_reaped
1491 		 * is used for the rings requiring reap functionality
1492 		 */
1493 		srng->u.src_ring.reap_hp = next_hp;
1494 		return (void *)desc;
1495 	}
1496 
1497 	return NULL;
1498 }
1499 
1500 /**
1501  * hal_srng_src_peek_n_get_next - Get next entry from a ring without
1502  * moving head pointer.
1503  * hal_srng_src_get_next should be called subsequently to move the head pointer
1504  *
1505  * @hal_soc: Opaque HAL SOC handle
1506  * @hal_ring_hdl: Source ring pointer
1507  *
1508  * Return: Opaque pointer for next ring entry; NULL on failire
1509  */
1510 static inline
1511 void *hal_srng_src_peek_n_get_next(hal_soc_handle_t hal_soc_hdl,
1512 				   hal_ring_handle_t hal_ring_hdl)
1513 {
1514 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1515 	uint32_t *desc;
1516 
1517 	/* TODO: Using % is expensive, but we have to do this since
1518 	 * size of some SRNG rings is not power of 2 (due to descriptor
1519 	 * sizes). Need to create separate API for rings used
1520 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
1521 	 * SW2RXDMA and CE rings)
1522 	 */
1523 	if (((srng->u.src_ring.hp + srng->entry_size) %
1524 		srng->ring_size) != srng->u.src_ring.cached_tp) {
1525 		desc = &(srng->ring_base_vaddr[(srng->u.src_ring.hp +
1526 						srng->entry_size) %
1527 						srng->ring_size]);
1528 		return (void *)desc;
1529 	}
1530 
1531 	return NULL;
1532 }
1533 
1534 /**
1535  * hal_srng_src_get_cur_hp_n_move_next () - API returns current hp
1536  * and move hp to next in src ring
1537  *
1538  * Usage: This API should only be used at init time replenish.
1539  *
1540  * @hal_soc_hdl: HAL soc handle
1541  * @hal_ring_hdl: Source ring pointer
1542  *
1543  */
1544 static inline void *
1545 hal_srng_src_get_cur_hp_n_move_next(hal_soc_handle_t hal_soc_hdl,
1546 				    hal_ring_handle_t hal_ring_hdl)
1547 {
1548 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1549 	uint32_t *cur_desc = NULL;
1550 	uint32_t next_hp;
1551 
1552 	cur_desc = &srng->ring_base_vaddr[(srng->u.src_ring.hp)];
1553 
1554 	next_hp = (srng->u.src_ring.hp + srng->entry_size) %
1555 		srng->ring_size;
1556 
1557 	if (next_hp != srng->u.src_ring.cached_tp)
1558 		srng->u.src_ring.hp = next_hp;
1559 
1560 	return (void *)cur_desc;
1561 }
1562 
1563 /**
1564  * hal_srng_src_num_avail - Returns number of available entries in src ring
1565  *
1566  * @hal_soc: Opaque HAL SOC handle
1567  * @hal_ring_hdl: Source ring pointer
1568  * @sync_hw_ptr: Sync cached tail pointer with HW
1569  *
1570  */
1571 static inline uint32_t
1572 hal_srng_src_num_avail(void *hal_soc,
1573 		       hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
1574 {
1575 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1576 	uint32_t tp;
1577 	uint32_t hp = srng->u.src_ring.hp;
1578 
1579 	if (sync_hw_ptr) {
1580 		tp = *(srng->u.src_ring.tp_addr);
1581 		srng->u.src_ring.cached_tp = tp;
1582 	} else {
1583 		tp = srng->u.src_ring.cached_tp;
1584 	}
1585 
1586 	if (tp > hp)
1587 		return ((tp - hp) / srng->entry_size) - 1;
1588 	else
1589 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
1590 }
1591 
1592 /**
1593  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
1594  * ring head/tail pointers to HW.
1595  * This should be used only if hal_srng_access_start_unlocked to start ring
1596  * access
1597  *
1598  * @hal_soc: Opaque HAL SOC handle
1599  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1600  *
1601  * Return: 0 on success; error on failire
1602  */
1603 static inline void
1604 hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1605 {
1606 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1607 
1608 	/* TODO: See if we need a write memory barrier here */
1609 	if (srng->flags & HAL_SRNG_LMAC_RING) {
1610 		/* For LMAC rings, ring pointer updates are done through FW and
1611 		 * hence written to a shared memory location that is read by FW
1612 		 */
1613 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1614 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
1615 		} else {
1616 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
1617 		}
1618 	} else {
1619 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
1620 			hal_srng_write_address_32_mb(hal_soc,
1621 						     srng,
1622 						     srng->u.src_ring.hp_addr,
1623 						     srng->u.src_ring.hp);
1624 		else
1625 			hal_srng_write_address_32_mb(hal_soc,
1626 						     srng,
1627 						     srng->u.dst_ring.tp_addr,
1628 						     srng->u.dst_ring.tp);
1629 	}
1630 }
1631 
1632 /**
1633  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
1634  * pointers to HW
1635  * This should be used only if hal_srng_access_start to start ring access
1636  *
1637  * @hal_soc: Opaque HAL SOC handle
1638  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1639  *
1640  * Return: 0 on success; error on failire
1641  */
1642 static inline void
1643 hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1644 {
1645 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1646 
1647 	if (qdf_unlikely(!hal_ring_hdl)) {
1648 		qdf_print("Error: Invalid hal_ring\n");
1649 		return;
1650 	}
1651 
1652 	hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
1653 	SRNG_UNLOCK(&(srng->lock));
1654 }
1655 
1656 /**
1657  * hal_srng_access_end_reap - Unlock ring access
1658  * This should be used only if hal_srng_access_start to start ring access
1659  * and should be used only while reaping SRC ring completions
1660  *
1661  * @hal_soc: Opaque HAL SOC handle
1662  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1663  *
1664  * Return: 0 on success; error on failire
1665  */
1666 static inline void
1667 hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1668 {
1669 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1670 
1671 	SRNG_UNLOCK(&(srng->lock));
1672 }
1673 
1674 /* TODO: Check if the following definitions is available in HW headers */
1675 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
1676 #define NUM_MPDUS_PER_LINK_DESC 6
1677 #define NUM_MSDUS_PER_LINK_DESC 7
1678 #define REO_QUEUE_DESC_ALIGN 128
1679 
1680 #define LINK_DESC_ALIGN 128
1681 
1682 #define ADDRESS_MATCH_TAG_VAL 0x5
1683 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
1684  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
1685  */
1686 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
1687 
1688 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
1689  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
1690  * should be specified in 16 word units. But the number of bits defined for
1691  * this field in HW header files is 5.
1692  */
1693 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
1694 
1695 
1696 /**
1697  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
1698  * in an idle list
1699  *
1700  * @hal_soc: Opaque HAL SOC handle
1701  *
1702  */
1703 static inline
1704 uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
1705 {
1706 	return WBM_IDLE_SCATTER_BUF_SIZE;
1707 }
1708 
1709 /**
1710  * hal_get_link_desc_size - Get the size of each link descriptor
1711  *
1712  * @hal_soc: Opaque HAL SOC handle
1713  *
1714  */
1715 static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
1716 {
1717 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
1718 
1719 	if (!hal_soc || !hal_soc->ops) {
1720 		qdf_print("Error: Invalid ops\n");
1721 		QDF_BUG(0);
1722 		return -EINVAL;
1723 	}
1724 	if (!hal_soc->ops->hal_get_link_desc_size) {
1725 		qdf_print("Error: Invalid function pointer\n");
1726 		QDF_BUG(0);
1727 		return -EINVAL;
1728 	}
1729 	return hal_soc->ops->hal_get_link_desc_size();
1730 }
1731 
1732 /**
1733  * hal_get_link_desc_align - Get the required start address alignment for
1734  * link descriptors
1735  *
1736  * @hal_soc: Opaque HAL SOC handle
1737  *
1738  */
1739 static inline
1740 uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
1741 {
1742 	return LINK_DESC_ALIGN;
1743 }
1744 
1745 /**
1746  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
1747  *
1748  * @hal_soc: Opaque HAL SOC handle
1749  *
1750  */
1751 static inline
1752 uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1753 {
1754 	return NUM_MPDUS_PER_LINK_DESC;
1755 }
1756 
1757 /**
1758  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
1759  *
1760  * @hal_soc: Opaque HAL SOC handle
1761  *
1762  */
1763 static inline
1764 uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
1765 {
1766 	return NUM_MSDUS_PER_LINK_DESC;
1767 }
1768 
1769 /**
1770  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
1771  * descriptor can hold
1772  *
1773  * @hal_soc: Opaque HAL SOC handle
1774  *
1775  */
1776 static inline
1777 uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
1778 {
1779 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
1780 }
1781 
1782 /**
1783  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
1784  * that the given buffer size
1785  *
1786  * @hal_soc: Opaque HAL SOC handle
1787  * @scatter_buf_size: Size of scatter buffer
1788  *
1789  */
1790 static inline
1791 uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
1792 					  uint32_t scatter_buf_size)
1793 {
1794 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
1795 		hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
1796 }
1797 
1798 /**
1799  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
1800  * each given buffer size
1801  *
1802  * @hal_soc: Opaque HAL SOC handle
1803  * @total_mem: size of memory to be scattered
1804  * @scatter_buf_size: Size of scatter buffer
1805  *
1806  */
1807 static inline
1808 uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
1809 					uint32_t total_mem,
1810 					uint32_t scatter_buf_size)
1811 {
1812 	uint8_t rem = (total_mem % (scatter_buf_size -
1813 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
1814 
1815 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
1816 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
1817 
1818 	return num_scatter_bufs;
1819 }
1820 
1821 enum hal_pn_type {
1822 	HAL_PN_NONE,
1823 	HAL_PN_WPA,
1824 	HAL_PN_WAPI_EVEN,
1825 	HAL_PN_WAPI_UNEVEN,
1826 };
1827 
1828 #define HAL_RX_MAX_BA_WINDOW 256
1829 
1830 /**
1831  * hal_get_reo_qdesc_align - Get start address alignment for reo
1832  * queue descriptors
1833  *
1834  * @hal_soc: Opaque HAL SOC handle
1835  *
1836  */
1837 static inline
1838 uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
1839 {
1840 	return REO_QUEUE_DESC_ALIGN;
1841 }
1842 
1843 /**
1844  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1845  *
1846  * @hal_soc: Opaque HAL SOC handle
1847  * @ba_window_size: BlockAck window size
1848  * @start_seq: Starting sequence number
1849  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1850  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1851  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1852  *
1853  */
1854 void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
1855 			 int tid, uint32_t ba_window_size,
1856 			 uint32_t start_seq, void *hw_qdesc_vaddr,
1857 			 qdf_dma_addr_t hw_qdesc_paddr,
1858 			 int pn_type);
1859 
1860 /**
1861  * hal_srng_get_hp_addr - Get head pointer physical address
1862  *
1863  * @hal_soc: Opaque HAL SOC handle
1864  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1865  *
1866  */
1867 static inline qdf_dma_addr_t
1868 hal_srng_get_hp_addr(void *hal_soc,
1869 		     hal_ring_handle_t hal_ring_hdl)
1870 {
1871 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1872 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1873 
1874 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1875 		return hal->shadow_wrptr_mem_paddr +
1876 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1877 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1878 	} else {
1879 		return hal->shadow_rdptr_mem_paddr +
1880 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1881 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1882 	}
1883 }
1884 
1885 /**
1886  * hal_srng_get_tp_addr - Get tail pointer physical address
1887  *
1888  * @hal_soc: Opaque HAL SOC handle
1889  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1890  *
1891  */
1892 static inline qdf_dma_addr_t
1893 hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1894 {
1895 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1896 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1897 
1898 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1899 		return hal->shadow_rdptr_mem_paddr +
1900 			((unsigned long)(srng->u.src_ring.tp_addr) -
1901 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1902 	} else {
1903 		return hal->shadow_wrptr_mem_paddr +
1904 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1905 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1906 	}
1907 }
1908 
1909 /**
1910  * hal_srng_get_num_entries - Get total entries in the HAL Srng
1911  *
1912  * @hal_soc: Opaque HAL SOC handle
1913  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1914  *
1915  * Return: total number of entries in hal ring
1916  */
1917 static inline
1918 uint32_t hal_srng_get_num_entries(hal_soc_handle_t hal_soc_hdl,
1919 				  hal_ring_handle_t hal_ring_hdl)
1920 {
1921 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1922 
1923 	return srng->num_entries;
1924 }
1925 
1926 /**
1927  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1928  *
1929  * @hal_soc: Opaque HAL SOC handle
1930  * @hal_ring_hdl: Ring pointer (Source or Destination ring)
1931  * @ring_params: SRNG parameters will be returned through this structure
1932  */
1933 void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1934 			 hal_ring_handle_t hal_ring_hdl,
1935 			 struct hal_srng_params *ring_params);
1936 
1937 /**
1938  * hal_mem_info - Retrieve hal memory base address
1939  *
1940  * @hal_soc: Opaque HAL SOC handle
1941  * @mem: pointer to structure to be updated with hal mem info
1942  */
1943 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
1944 
1945 /**
1946  * hal_get_target_type - Return target type
1947  *
1948  * @hal_soc: Opaque HAL SOC handle
1949  */
1950 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
1951 
1952 /**
1953  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1954  *
1955  * @hal_soc: Opaque HAL SOC handle
1956  * @ac: Access category
1957  * @value: timeout duration in millisec
1958  */
1959 void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1960 			      uint32_t *value);
1961 /**
1962  * hal_set_aging_timeout - Set BA aging timeout
1963  *
1964  * @hal_soc: Opaque HAL SOC handle
1965  * @ac: Access category in millisec
1966  * @value: timeout duration value
1967  */
1968 void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
1969 			      uint32_t value);
1970 /**
1971  * hal_srng_dst_hw_init - Private function to initialize SRNG
1972  * destination ring HW
1973  * @hal_soc: HAL SOC handle
1974  * @srng: SRNG ring pointer
1975  */
1976 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1977 	struct hal_srng *srng)
1978 {
1979 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1980 }
1981 
1982 /**
1983  * hal_srng_src_hw_init - Private function to initialize SRNG
1984  * source ring HW
1985  * @hal_soc: HAL SOC handle
1986  * @srng: SRNG ring pointer
1987  */
1988 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1989 	struct hal_srng *srng)
1990 {
1991 	hal->ops->hal_srng_src_hw_init(hal, srng);
1992 }
1993 
1994 /**
1995  * hal_get_hw_hptp()  - Get HW head and tail pointer value for any ring
1996  * @hal_soc: Opaque HAL SOC handle
1997  * @hal_ring_hdl: Source ring pointer
1998  * @headp: Head Pointer
1999  * @tailp: Tail Pointer
2000  * @ring_type: Ring
2001  *
2002  * Return: Update tail pointer and head pointer in arguments.
2003  */
2004 static inline
2005 void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
2006 		     hal_ring_handle_t hal_ring_hdl,
2007 		     uint32_t *headp, uint32_t *tailp,
2008 		     uint8_t ring_type)
2009 {
2010 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2011 
2012 	hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
2013 			headp, tailp, ring_type);
2014 }
2015 
2016 /**
2017  * hal_reo_setup - Initialize HW REO block
2018  *
2019  * @hal_soc: Opaque HAL SOC handle
2020  * @reo_params: parameters needed by HAL for REO config
2021  */
2022 static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
2023 				 void *reoparams)
2024 {
2025 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2026 
2027 	hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
2028 }
2029 
2030 static inline
2031 void hal_compute_reo_remap_ix2_ix3(hal_soc_handle_t hal_soc_hdl,
2032 				   uint32_t *ring, uint32_t num_rings,
2033 				   uint32_t *remap1, uint32_t *remap2)
2034 {
2035 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2036 
2037 	return hal_soc->ops->hal_compute_reo_remap_ix2_ix3(ring,
2038 					num_rings, remap1, remap2);
2039 }
2040 
2041 /**
2042  * hal_setup_link_idle_list - Setup scattered idle list using the
2043  * buffer list provided
2044  *
2045  * @hal_soc: Opaque HAL SOC handle
2046  * @scatter_bufs_base_paddr: Array of physical base addresses
2047  * @scatter_bufs_base_vaddr: Array of virtual base addresses
2048  * @num_scatter_bufs: Number of scatter buffers in the above lists
2049  * @scatter_buf_size: Size of each scatter buffer
2050  * @last_buf_end_offset: Offset to the last entry
2051  * @num_entries: Total entries of all scatter bufs
2052  *
2053  */
2054 static inline
2055 void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
2056 			      qdf_dma_addr_t scatter_bufs_base_paddr[],
2057 			      void *scatter_bufs_base_vaddr[],
2058 			      uint32_t num_scatter_bufs,
2059 			      uint32_t scatter_buf_size,
2060 			      uint32_t last_buf_end_offset,
2061 			      uint32_t num_entries)
2062 {
2063 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2064 
2065 	hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
2066 			scatter_bufs_base_vaddr, num_scatter_bufs,
2067 			scatter_buf_size, last_buf_end_offset,
2068 			num_entries);
2069 
2070 }
2071 
2072 /**
2073  * hal_srng_dump_ring_desc() - Dump ring descriptor info
2074  *
2075  * @hal_soc: Opaque HAL SOC handle
2076  * @hal_ring_hdl: Source ring pointer
2077  * @ring_desc: Opaque ring descriptor handle
2078  */
2079 static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
2080 					   hal_ring_handle_t hal_ring_hdl,
2081 					   hal_ring_desc_t ring_desc)
2082 {
2083 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2084 
2085 	QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2086 			   ring_desc, (srng->entry_size << 2));
2087 }
2088 
2089 /**
2090  * hal_srng_dump_ring() - Dump last 128 descs of the ring
2091  *
2092  * @hal_soc: Opaque HAL SOC handle
2093  * @hal_ring_hdl: Source ring pointer
2094  */
2095 static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
2096 				      hal_ring_handle_t hal_ring_hdl)
2097 {
2098 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2099 	uint32_t *desc;
2100 	uint32_t tp, i;
2101 
2102 	tp = srng->u.dst_ring.tp;
2103 
2104 	for (i = 0; i < 128; i++) {
2105 		if (!tp)
2106 			tp = srng->ring_size;
2107 
2108 		desc = &srng->ring_base_vaddr[tp - srng->entry_size];
2109 		QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
2110 				   QDF_TRACE_LEVEL_DEBUG,
2111 				   desc, (srng->entry_size << 2));
2112 
2113 		tp -= srng->entry_size;
2114 	}
2115 }
2116 
2117 /*
2118  * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
2119  * to opaque dp_ring desc type
2120  * @ring_desc - rxdma ring desc
2121  *
2122  * Return: hal_rxdma_desc_t type
2123  */
2124 static inline
2125 hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
2126 {
2127 	return (hal_ring_desc_t)ring_desc;
2128 }
2129 
2130 /**
2131  * hal_srng_set_event() - Set hal_srng event
2132  * @hal_ring_hdl: Source ring pointer
2133  * @event: SRNG ring event
2134  *
2135  * Return: None
2136  */
2137 static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
2138 {
2139 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2140 
2141 	qdf_atomic_set_bit(event, &srng->srng_event);
2142 }
2143 
2144 /**
2145  * hal_srng_clear_event() - Clear hal_srng event
2146  * @hal_ring_hdl: Source ring pointer
2147  * @event: SRNG ring event
2148  *
2149  * Return: None
2150  */
2151 static inline
2152 void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2153 {
2154 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2155 
2156 	qdf_atomic_clear_bit(event, &srng->srng_event);
2157 }
2158 
2159 /**
2160  * hal_srng_get_clear_event() - Clear srng event and return old value
2161  * @hal_ring_hdl: Source ring pointer
2162  * @event: SRNG ring event
2163  *
2164  * Return: Return old event value
2165  */
2166 static inline
2167 int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
2168 {
2169 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2170 
2171 	return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
2172 }
2173 
2174 /**
2175  * hal_srng_set_flush_last_ts() - Record last flush time stamp
2176  * @hal_ring_hdl: Source ring pointer
2177  *
2178  * Return: None
2179  */
2180 static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
2181 {
2182 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2183 
2184 	srng->last_flush_ts = qdf_get_log_timestamp();
2185 }
2186 
2187 /**
2188  * hal_srng_inc_flush_cnt() - Increment flush counter
2189  * @hal_ring_hdl: Source ring pointer
2190  *
2191  * Return: None
2192  */
2193 static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
2194 {
2195 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
2196 
2197 	srng->flush_count++;
2198 }
2199 
2200 /**
2201  * hal_rx_sw_mon_desc_info_get () - Get SW monitor desc info
2202  *
2203  * @hal: Core HAL soc handle
2204  * @ring_desc: Mon dest ring descriptor
2205  * @desc_info: Desc info to be populated
2206  *
2207  * Return void
2208  */
2209 static inline void
2210 hal_rx_sw_mon_desc_info_get(struct hal_soc *hal,
2211 			    hal_ring_desc_t ring_desc,
2212 			    hal_rx_mon_desc_info_t desc_info)
2213 {
2214 	return hal->ops->hal_rx_sw_mon_desc_info_get(ring_desc, desc_info);
2215 }
2216 
2217 /**
2218  * hal_reo_set_err_dst_remap() - Set REO error destination ring remap
2219  *				 register value.
2220  *
2221  * @hal_soc_hdl: Opaque HAL soc handle
2222  *
2223  * Return: None
2224  */
2225 static inline void hal_reo_set_err_dst_remap(hal_soc_handle_t hal_soc_hdl)
2226 {
2227 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
2228 
2229 	if (hal_soc->ops->hal_reo_set_err_dst_remap)
2230 		hal_soc->ops->hal_reo_set_err_dst_remap(hal_soc);
2231 }
2232 #endif /* _HAL_APIH_ */
2233