xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above
10  *       copyright notice, this list of conditions and the following
11  *       disclaimer in the documentation and/or other materials provided
12  *       with the distribution.
13  *     * Neither the name of The Linux Foundation nor the names of its
14  *       contributors may be used to endorse or promote products derived
15  *       from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef _HAL_API_H_
31 #define _HAL_API_H_
32 
33 #include "qdf_types.h"
34 #include "qdf_util.h"
35 #include "hal_internal.h"
36 #include "rx_msdu_link.h"
37 #include "rx_reo_queue.h"
38 #include "rx_reo_queue_ext.h"
39 
40 #define MAX_UNWINDOWED_ADDRESS 0x80000
41 #define WINDOW_ENABLE_BIT 0x80000000
42 #define WINDOW_REG_ADDRESS 0x310C
43 #define WINDOW_SHIFT 19
44 #define WINDOW_VALUE_MASK 0x3F
45 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
46 #define WINDOW_RANGE_MASK 0x7FFFF
47 
48 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
49 {
50 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
51 	if (window != hal_soc->register_window) {
52 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
53 			      WINDOW_ENABLE_BIT | window);
54 		hal_soc->register_window = window;
55 	}
56 }
57 
58 /**
59  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
60  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
61  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
62  *				would be a bug
63  */
64 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
65 				  uint32_t value)
66 {
67 
68 	if (!hal_soc->use_register_windowing ||
69 	    offset < MAX_UNWINDOWED_ADDRESS) {
70 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
71 	} else {
72 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
73 		hal_select_window(hal_soc, offset);
74 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
75 			  (offset & WINDOW_RANGE_MASK), value);
76 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
77 	}
78 }
79 
80 /**
81  * hal_write_address_32_mb - write a value to a register
82  *
83  */
84 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
85 					   void __iomem *addr, uint32_t value)
86 {
87 	uint32_t offset;
88 
89 	if (!hal_soc->use_register_windowing)
90 		return qdf_iowrite32(addr, value);
91 
92 	offset = addr - hal_soc->dev_base_addr;
93 	hal_write32_mb(hal_soc, offset, value);
94 }
95 
96 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
97 {
98 	uint32_t ret;
99 
100 	if (!hal_soc->use_register_windowing ||
101 	    offset < MAX_UNWINDOWED_ADDRESS) {
102 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
103 	}
104 
105 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
106 	hal_select_window(hal_soc, offset);
107 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
108 		       (offset & WINDOW_RANGE_MASK));
109 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
110 
111 	return ret;
112 }
113 
114 #include "hif_io32.h"
115 
116 /**
117  * hal_attach - Initialize HAL layer
118  * @hif_handle: Opaque HIF handle
119  * @qdf_dev: QDF device
120  *
121  * Return: Opaque HAL SOC handle
122  *		 NULL on failure (if given ring is not available)
123  *
124  * This function should be called as part of HIF initialization (for accessing
125  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
126  */
127 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
128 
129 /**
130  * hal_detach - Detach HAL layer
131  * @hal_soc: HAL SOC handle
132  *
133  * This function should be called as part of HIF detach
134  *
135  */
136 extern void hal_detach(void *hal_soc);
137 
138 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
139 enum hal_ring_type {
140 	REO_DST,
141 	REO_EXCEPTION,
142 	REO_REINJECT,
143 	REO_CMD,
144 	REO_STATUS,
145 	TCL_DATA,
146 	TCL_CMD,
147 	TCL_STATUS,
148 	CE_SRC,
149 	CE_DST,
150 	CE_DST_STATUS,
151 	WBM_IDLE_LINK,
152 	SW2WBM_RELEASE,
153 	WBM2SW_RELEASE,
154 	RXDMA_BUF,
155 	RXDMA_DST,
156 	RXDMA_MONITOR_BUF,
157 	RXDMA_MONITOR_STATUS,
158 	RXDMA_MONITOR_DST,
159 	RXDMA_MONITOR_DESC,
160 	DIR_BUF_RX_DMA_SRC,
161 #ifdef WLAN_FEATURE_CIF_CFR
162 	WIFI_POS_SRC,
163 #endif
164 	MAX_RING_TYPES
165 };
166 
167 /* SRNG flags passed in hal_srng_params.flags */
168 #define HAL_SRNG_MSI_SWAP				0x00000008
169 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
170 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
171 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
172 #define HAL_SRNG_MSI_INTR				0x00020000
173 
174 #define PN_SIZE_24 0
175 #define PN_SIZE_48 1
176 #define PN_SIZE_128 2
177 
178 /**
179  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
180  * used by callers for calculating the size of memory to be allocated before
181  * calling hal_srng_setup to setup the ring
182  *
183  * @hal_soc: Opaque HAL SOC handle
184  * @ring_type: one of the types from hal_ring_type
185  *
186  */
187 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
188 
189 /**
190  * hal_srng_max_entries - Returns maximum possible number of ring entries
191  * @hal_soc: Opaque HAL SOC handle
192  * @ring_type: one of the types from hal_ring_type
193  *
194  * Return: Maximum number of entries for the given ring_type
195  */
196 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
197 
198 /**
199  * hal_srng_dump - Dump ring status
200  * @srng: hal srng pointer
201  */
202 void hal_srng_dump(struct hal_srng *srng);
203 
204 /**
205  * hal_srng_get_dir - Returns the direction of the ring
206  * @hal_soc: Opaque HAL SOC handle
207  * @ring_type: one of the types from hal_ring_type
208  *
209  * Return: Ring direction
210  */
211 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
212 
213 /* HAL memory information */
214 struct hal_mem_info {
215 	/* dev base virutal addr */
216 	void *dev_base_addr;
217 	/* dev base physical addr */
218 	void *dev_base_paddr;
219 	/* Remote virtual pointer memory for HW/FW updates */
220 	void *shadow_rdptr_mem_vaddr;
221 	/* Remote physical pointer memory for HW/FW updates */
222 	void *shadow_rdptr_mem_paddr;
223 	/* Shared memory for ring pointer updates from host to FW */
224 	void *shadow_wrptr_mem_vaddr;
225 	/* Shared physical memory for ring pointer updates from host to FW */
226 	void *shadow_wrptr_mem_paddr;
227 };
228 
229 /* SRNG parameters to be passed to hal_srng_setup */
230 struct hal_srng_params {
231 	/* Physical base address of the ring */
232 	qdf_dma_addr_t ring_base_paddr;
233 	/* Virtual base address of the ring */
234 	void *ring_base_vaddr;
235 	/* Number of entries in ring */
236 	uint32_t num_entries;
237 	/* max transfer length */
238 	uint16_t max_buffer_length;
239 	/* MSI Address */
240 	qdf_dma_addr_t msi_addr;
241 	/* MSI data */
242 	uint32_t msi_data;
243 	/* Interrupt timer threshold – in micro seconds */
244 	uint32_t intr_timer_thres_us;
245 	/* Interrupt batch counter threshold – in number of ring entries */
246 	uint32_t intr_batch_cntr_thres_entries;
247 	/* Low threshold – in number of ring entries
248 	 * (valid for src rings only)
249 	 */
250 	uint32_t low_threshold;
251 	/* Misc flags */
252 	uint32_t flags;
253 	/* Unique ring id */
254 	uint8_t ring_id;
255 	/* Source or Destination ring */
256 	enum hal_srng_dir ring_dir;
257 	/* Size of ring entry */
258 	uint32_t entry_size;
259 	/* hw register base address */
260 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
261 };
262 
263 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
264  * @hal_soc: hal handle
265  *
266  * Return: QDF_STATUS_OK on success
267  */
268 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
269 
270 /* hal_set_one_shadow_config() - add a config for the specified ring
271  * @hal_soc: hal handle
272  * @ring_type: ring type
273  * @ring_num: ring num
274  *
275  * The ring type and ring num uniquely specify the ring.  After this call,
276  * the hp/tp will be added as the next entry int the shadow register
277  * configuration table.  The hal code will use the shadow register address
278  * in place of the hp/tp address.
279  *
280  * This function is exposed, so that the CE module can skip configuring shadow
281  * registers for unused ring and rings assigned to the firmware.
282  *
283  * Return: QDF_STATUS_OK on success
284  */
285 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
286 					    int ring_num);
287 /**
288  * hal_get_shadow_config() - retrieve the config table
289  * @hal_soc: hal handle
290  * @shadow_config: will point to the table after
291  * @num_shadow_registers_configured: will contain the number of valid entries
292  */
293 extern void hal_get_shadow_config(void *hal_soc,
294 				  struct pld_shadow_reg_v2_cfg **shadow_config,
295 				  int *num_shadow_registers_configured);
296 /**
297  * hal_srng_setup - Initialize HW SRNG ring.
298  *
299  * @hal_soc: Opaque HAL SOC handle
300  * @ring_type: one of the types from hal_ring_type
301  * @ring_num: Ring number if there are multiple rings of
302  *		same type (staring from 0)
303  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
304  * @ring_params: SRNG ring params in hal_srng_params structure.
305 
306  * Callers are expected to allocate contiguous ring memory of size
307  * 'num_entries * entry_size' bytes and pass the physical and virtual base
308  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
309  * structure. Ring base address should be 8 byte aligned and size of each ring
310  * entry should be queried using the API hal_srng_get_entrysize
311  *
312  * Return: Opaque pointer to ring on success
313  *		 NULL on failure (if given ring is not available)
314  */
315 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
316 	int mac_id, struct hal_srng_params *ring_params);
317 
318 /* Remapping ids of REO rings */
319 #define REO_REMAP_TCL 0
320 #define REO_REMAP_SW1 1
321 #define REO_REMAP_SW2 2
322 #define REO_REMAP_SW3 3
323 #define REO_REMAP_SW4 4
324 #define REO_REMAP_RELEASE 5
325 #define REO_REMAP_FW 6
326 #define REO_REMAP_UNUSED 7
327 
328 /*
329  * currently this macro only works for IX0 since all the rings we are remapping
330  * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
331  */
332 #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
333 	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
334 /* allow the destination macros to be expanded */
335 #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
336 	(_NEW_DEST << \
337 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
338 	  _ORIGINAL_DEST ## _SHFT))
339 
340 /**
341  * hal_reo_remap_IX0 - Remap REO ring destination
342  * @hal: HAL SOC handle
343  * @remap_val: Remap value
344  */
345 extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val);
346 
347 /**
348  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
349  * @sring: sring pointer
350  * @paddr: physical address
351  */
352 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
353 
354 /**
355  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
356  * @srng: sring pointer
357  * @vaddr: virtual address
358  */
359 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
360 
361 /**
362  * hal_srng_cleanup - Deinitialize HW SRNG ring.
363  * @hal_soc: Opaque HAL SOC handle
364  * @hal_srng: Opaque HAL SRNG pointer
365  */
366 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
367 
368 static inline bool hal_srng_initialized(void *hal_ring)
369 {
370 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
371 
372 	return !!srng->initialized;
373 }
374 
375 /**
376  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
377  * hal_srng_access_start if locked access is required
378  *
379  * @hal_soc: Opaque HAL SOC handle
380  * @hal_ring: Ring pointer (Source or Destination ring)
381  *
382  * Return: 0 on success; error on failire
383  */
384 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
385 {
386 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
387 
388 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
389 		srng->u.src_ring.cached_tp =
390 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
391 	else
392 		srng->u.dst_ring.cached_hp =
393 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
394 
395 	return 0;
396 }
397 
398 /**
399  * hal_srng_access_start - Start (locked) ring access
400  *
401  * @hal_soc: Opaque HAL SOC handle
402  * @hal_ring: Ring pointer (Source or Destination ring)
403  *
404  * Return: 0 on success; error on failire
405  */
406 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
407 {
408 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
409 
410 	SRNG_LOCK(&(srng->lock));
411 
412 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
413 }
414 
415 /**
416  * hal_srng_dst_get_next - Get next entry from a destination ring and move
417  * cached tail pointer
418  *
419  * @hal_soc: Opaque HAL SOC handle
420  * @hal_ring: Destination ring pointer
421  *
422  * Return: Opaque pointer for next ring entry; NULL on failire
423  */
424 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
425 {
426 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
427 	uint32_t *desc;
428 
429 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
430 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
431 		/* TODO: Using % is expensive, but we have to do this since
432 		 * size of some SRNG rings is not power of 2 (due to descriptor
433 		 * sizes). Need to create separate API for rings used
434 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
435 		 * SW2RXDMA and CE rings)
436 		 */
437 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
438 			srng->ring_size;
439 
440 		return (void *)desc;
441 	}
442 
443 	return NULL;
444 }
445 
446 /**
447  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
448  * cached head pointer
449  *
450  * @hal_soc: Opaque HAL SOC handle
451  * @hal_ring: Destination ring pointer
452  *
453  * Return: Opaque pointer for next ring entry; NULL on failire
454  */
455 static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
456 {
457 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
458 	uint32_t *desc;
459 	/* TODO: Using % is expensive, but we have to do this since
460 	 * size of some SRNG rings is not power of 2 (due to descriptor
461 	 * sizes). Need to create separate API for rings used
462 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
463 	 * SW2RXDMA and CE rings)
464 	 */
465 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
466 		srng->ring_size;
467 
468 	if (next_hp != srng->u.dst_ring.tp) {
469 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
470 		srng->u.dst_ring.cached_hp = next_hp;
471 		return (void *)desc;
472 	}
473 
474 	return NULL;
475 }
476 
477 /**
478  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
479  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
480  * TODO: See if we need an optimized version of get_next that doesn't check for
481  * loop_cnt
482  *
483  * @hal_soc: Opaque HAL SOC handle
484  * @hal_ring: Destination ring pointer
485  *
486  * Return: Opaque pointer for next ring entry; NULL on failire
487  */
488 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
489 {
490 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
491 
492 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
493 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
494 
495 	return NULL;
496 }
497 
498 /**
499  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
500  * by SW) in destination ring
501  *
502  * @hal_soc: Opaque HAL SOC handle
503  * @hal_ring: Destination ring pointer
504  * @sync_hw_ptr: Sync cached head pointer with HW
505  *
506  */
507 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
508 	int sync_hw_ptr)
509 {
510 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
511 	uint32 hp;
512 	uint32 tp = srng->u.dst_ring.tp;
513 
514 	if (sync_hw_ptr) {
515 		hp = *(srng->u.dst_ring.hp_addr);
516 		srng->u.dst_ring.cached_hp = hp;
517 	} else {
518 		hp = srng->u.dst_ring.cached_hp;
519 	}
520 
521 	if (hp >= tp)
522 		return (hp - tp) / srng->entry_size;
523 	else
524 		return (srng->ring_size - tp + hp) / srng->entry_size;
525 }
526 
527 /**
528  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
529  * pointer. This can be used to release any buffers associated with completed
530  * ring entries. Note that this should not be used for posting new descriptor
531  * entries. Posting of new entries should be done only using
532  * hal_srng_src_get_next_reaped when this function is used for reaping.
533  *
534  * @hal_soc: Opaque HAL SOC handle
535  * @hal_ring: Source ring pointer
536  *
537  * Return: Opaque pointer for next ring entry; NULL on failire
538  */
539 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
540 {
541 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
542 	uint32_t *desc;
543 
544 	/* TODO: Using % is expensive, but we have to do this since
545 	 * size of some SRNG rings is not power of 2 (due to descriptor
546 	 * sizes). Need to create separate API for rings used
547 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
548 	 * SW2RXDMA and CE rings)
549 	 */
550 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
551 		srng->ring_size;
552 
553 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
554 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
555 		srng->u.src_ring.reap_hp = next_reap_hp;
556 		return (void *)desc;
557 	}
558 
559 	return NULL;
560 }
561 
562 /**
563  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
564  * already reaped using hal_srng_src_reap_next, for posting new entries to
565  * the ring
566  *
567  * @hal_soc: Opaque HAL SOC handle
568  * @hal_ring: Source ring pointer
569  *
570  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
571  */
572 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
573 {
574 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
575 	uint32_t *desc;
576 
577 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
578 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
579 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
580 			srng->ring_size;
581 
582 		return (void *)desc;
583 	}
584 
585 	return NULL;
586 }
587 
588 /**
589  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
590  * move reap pointer. This API is used in detach path to release any buffers
591  * associated with ring entries which are pending reap.
592  *
593  * @hal_soc: Opaque HAL SOC handle
594  * @hal_ring: Source ring pointer
595  *
596  * Return: Opaque pointer for next ring entry; NULL on failire
597  */
598 static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring)
599 {
600 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
601 	uint32_t *desc;
602 
603 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
604 		srng->ring_size;
605 
606 	if (next_reap_hp != srng->u.src_ring.hp) {
607 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
608 		srng->u.src_ring.reap_hp = next_reap_hp;
609 		return (void *)desc;
610 	}
611 
612 	return NULL;
613 }
614 
615 /**
616  * hal_srng_src_done_val -
617  *
618  * @hal_soc: Opaque HAL SOC handle
619  * @hal_ring: Source ring pointer
620  *
621  * Return: Opaque pointer for next ring entry; NULL on failire
622  */
623 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
624 {
625 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
626 	/* TODO: Using % is expensive, but we have to do this since
627 	 * size of some SRNG rings is not power of 2 (due to descriptor
628 	 * sizes). Need to create separate API for rings used
629 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
630 	 * SW2RXDMA and CE rings)
631 	 */
632 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
633 		srng->ring_size;
634 
635 	if (next_reap_hp == srng->u.src_ring.cached_tp)
636 		return 0;
637 
638 	if (srng->u.src_ring.cached_tp > next_reap_hp)
639 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
640 			srng->entry_size;
641 	else
642 		return ((srng->ring_size - next_reap_hp) +
643 			srng->u.src_ring.cached_tp) / srng->entry_size;
644 }
645 
646 /**
647  * hal_api_get_tphp - Get head and tail pointer location for any ring
648  * @hal_soc: Opaque HAL SOC handle
649  * @hal_ring: Source ring pointer
650  * @tailp: Tail Pointer
651  * @headp: Head Pointer
652  *
653  * Return: Update tail pointer and head pointer in arguments.
654  */
655 static inline void hal_api_get_tphp(void *hal_soc, void *hal_ring,
656 	uint32_t *tailp, uint32_t *headp)
657 {
658 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
659 
660 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
661 		*headp = srng->u.src_ring.hp / srng->entry_size;
662 		*tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size;
663 	} else {
664 		*tailp = srng->u.dst_ring.tp / srng->entry_size;
665 		*headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size;
666 	}
667 }
668 
669 /**
670  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
671  *
672  * @hal_soc: Opaque HAL SOC handle
673  * @hal_ring: Source ring pointer
674  *
675  * Return: Opaque pointer for next ring entry; NULL on failire
676  */
677 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
678 {
679 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
680 	uint32_t *desc;
681 	/* TODO: Using % is expensive, but we have to do this since
682 	 * size of some SRNG rings is not power of 2 (due to descriptor
683 	 * sizes). Need to create separate API for rings used
684 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
685 	 * SW2RXDMA and CE rings)
686 	 */
687 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
688 		srng->ring_size;
689 
690 	if (next_hp != srng->u.src_ring.cached_tp) {
691 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
692 		srng->u.src_ring.hp = next_hp;
693 		/* TODO: Since reap function is not used by all rings, we can
694 		 * remove the following update of reap_hp in this function
695 		 * if we can ensure that only hal_srng_src_get_next_reaped
696 		 * is used for the rings requiring reap functionality
697 		 */
698 		srng->u.src_ring.reap_hp = next_hp;
699 		return (void *)desc;
700 	}
701 
702 	return NULL;
703 }
704 
705 /**
706  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
707  * hal_srng_src_get_next should be called subsequently to move the head pointer
708  *
709  * @hal_soc: Opaque HAL SOC handle
710  * @hal_ring: Source ring pointer
711  *
712  * Return: Opaque pointer for next ring entry; NULL on failire
713  */
714 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
715 {
716 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
717 	uint32_t *desc;
718 
719 	/* TODO: Using % is expensive, but we have to do this since
720 	 * size of some SRNG rings is not power of 2 (due to descriptor
721 	 * sizes). Need to create separate API for rings used
722 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
723 	 * SW2RXDMA and CE rings)
724 	 */
725 	if (((srng->u.src_ring.hp + srng->entry_size) %
726 		srng->ring_size) != srng->u.src_ring.cached_tp) {
727 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
728 		return (void *)desc;
729 	}
730 
731 	return NULL;
732 }
733 
734 /**
735  * hal_srng_src_num_avail - Returns number of available entries in src ring
736  *
737  * @hal_soc: Opaque HAL SOC handle
738  * @hal_ring: Source ring pointer
739  * @sync_hw_ptr: Sync cached tail pointer with HW
740  *
741  */
742 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
743 	void *hal_ring, int sync_hw_ptr)
744 {
745 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
746 	uint32 tp;
747 	uint32 hp = srng->u.src_ring.hp;
748 
749 	if (sync_hw_ptr) {
750 		tp = *(srng->u.src_ring.tp_addr);
751 		srng->u.src_ring.cached_tp = tp;
752 	} else {
753 		tp = srng->u.src_ring.cached_tp;
754 	}
755 
756 	if (tp > hp)
757 		return ((tp - hp) / srng->entry_size) - 1;
758 	else
759 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
760 }
761 
762 /**
763  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
764  * ring head/tail pointers to HW.
765  * This should be used only if hal_srng_access_start_unlocked to start ring
766  * access
767  *
768  * @hal_soc: Opaque HAL SOC handle
769  * @hal_ring: Ring pointer (Source or Destination ring)
770  *
771  * Return: 0 on success; error on failire
772  */
773 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
774 {
775 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
776 
777 	/* TODO: See if we need a write memory barrier here */
778 	if (srng->flags & HAL_SRNG_LMAC_RING) {
779 		/* For LMAC rings, ring pointer updates are done through FW and
780 		 * hence written to a shared memory location that is read by FW
781 		 */
782 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
783 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
784 		} else {
785 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
786 		}
787 	} else {
788 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
789 			hal_write_address_32_mb(hal_soc,
790 				srng->u.src_ring.hp_addr,
791 				srng->u.src_ring.hp);
792 		else
793 			hal_write_address_32_mb(hal_soc,
794 				srng->u.dst_ring.tp_addr,
795 				srng->u.dst_ring.tp);
796 	}
797 }
798 
799 /**
800  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
801  * pointers to HW
802  * This should be used only if hal_srng_access_start to start ring access
803  *
804  * @hal_soc: Opaque HAL SOC handle
805  * @hal_ring: Ring pointer (Source or Destination ring)
806  *
807  * Return: 0 on success; error on failire
808  */
809 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
810 {
811 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
812 
813 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
814 	SRNG_UNLOCK(&(srng->lock));
815 }
816 
817 /**
818  * hal_srng_access_end_reap - Unlock ring access
819  * This should be used only if hal_srng_access_start to start ring access
820  * and should be used only while reaping SRC ring completions
821  *
822  * @hal_soc: Opaque HAL SOC handle
823  * @hal_ring: Ring pointer (Source or Destination ring)
824  *
825  * Return: 0 on success; error on failire
826  */
827 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
828 {
829 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
830 
831 	SRNG_UNLOCK(&(srng->lock));
832 }
833 
834 /* TODO: Check if the following definitions is available in HW headers */
835 #define WBM_IDLE_DESC_LIST 1
836 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
837 #define NUM_MPDUS_PER_LINK_DESC 6
838 #define NUM_MSDUS_PER_LINK_DESC 7
839 #define REO_QUEUE_DESC_ALIGN 128
840 
841 #define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
842 #define LINK_DESC_ALIGN 128
843 
844 #define ADDRESS_MATCH_TAG_VAL 0x5
845 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
846  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
847  */
848 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
849 
850 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
851  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
852  * should be specified in 16 word units. But the number of bits defined for
853  * this field in HW header files is 5.
854  */
855 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
856 
857 /**
858  * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
859  * HW structure
860  *
861  * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
862  * @cookie: SW cookie for the buffer/descriptor
863  * @link_desc_paddr: Physical address of link descriptor entry
864  *
865  */
866 static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
867 	qdf_dma_addr_t link_desc_paddr)
868 {
869 	uint32_t *buf_addr = (uint32_t *)desc;
870 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
871 		link_desc_paddr & 0xffffffff);
872 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
873 		(uint64_t)link_desc_paddr >> 32);
874 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
875 		WBM_IDLE_DESC_LIST);
876 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
877 		cookie);
878 }
879 
880 /**
881  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
882  * in an idle list
883  *
884  * @hal_soc: Opaque HAL SOC handle
885  *
886  */
887 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
888 {
889 	return WBM_IDLE_SCATTER_BUF_SIZE;
890 }
891 
892 /**
893  * hal_get_link_desc_size - Get the size of each link descriptor
894  *
895  * @hal_soc: Opaque HAL SOC handle
896  *
897  */
898 static inline uint32_t hal_get_link_desc_size(void *hal_soc)
899 {
900 	return LINK_DESC_SIZE;
901 }
902 
903 /**
904  * hal_get_link_desc_align - Get the required start address alignment for
905  * link descriptors
906  *
907  * @hal_soc: Opaque HAL SOC handle
908  *
909  */
910 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
911 {
912 	return LINK_DESC_ALIGN;
913 }
914 
915 /**
916  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
917  *
918  * @hal_soc: Opaque HAL SOC handle
919  *
920  */
921 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
922 {
923 	return NUM_MPDUS_PER_LINK_DESC;
924 }
925 
926 /**
927  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
928  *
929  * @hal_soc: Opaque HAL SOC handle
930  *
931  */
932 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
933 {
934 	return NUM_MSDUS_PER_LINK_DESC;
935 }
936 
937 /**
938  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
939  * descriptor can hold
940  *
941  * @hal_soc: Opaque HAL SOC handle
942  *
943  */
944 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
945 {
946 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
947 }
948 
949 /**
950  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
951  * that the given buffer size
952  *
953  * @hal_soc: Opaque HAL SOC handle
954  * @scatter_buf_size: Size of scatter buffer
955  *
956  */
957 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
958 	uint32_t scatter_buf_size)
959 {
960 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
961 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
962 }
963 
964 /**
965  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
966  * each given buffer size
967  *
968  * @hal_soc: Opaque HAL SOC handle
969  * @total_mem: size of memory to be scattered
970  * @scatter_buf_size: Size of scatter buffer
971  *
972  */
973 static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc,
974 	uint32_t total_mem, uint32_t scatter_buf_size)
975 {
976 	uint8_t rem = (total_mem % (scatter_buf_size -
977 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
978 
979 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
980 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
981 
982 	return num_scatter_bufs;
983 }
984 
985 /**
986  * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
987  * provided
988  *
989  * @hal_soc: Opaque HAL SOC handle
990  * @idle_scatter_bufs_base_paddr: Array of physical base addresses
991  * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
992  * @num_scatter_bufs: Number of scatter buffers in the above lists
993  * @scatter_buf_size: Size of each scatter buffer
994  * @last_buf_end_offset: Offset to the last entry
995  * @num_entries: Total entries of all scatter bufs
996  *
997  */
998 extern void hal_setup_link_idle_list(void *hal_soc,
999 	qdf_dma_addr_t scatter_bufs_base_paddr[],
1000 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
1001 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset,
1002 	uint32_t num_entries);
1003 
1004 /* REO parameters to be passed to hal_reo_setup */
1005 struct hal_reo_params {
1006 	/** rx hash steering enabled or disabled */
1007 	bool rx_hash_enabled;
1008 	/** reo remap 1 register */
1009 	uint32_t remap1;
1010 	/** reo remap 2 register */
1011 	uint32_t remap2;
1012 	/** fragment destination ring */
1013 	uint8_t frag_dst_ring;
1014 	/** padding */
1015 	uint8_t padding[3];
1016 };
1017 
1018 /**
1019  * hal_reo_setup - Initialize HW REO block
1020  *
1021  * @hal_soc: Opaque HAL SOC handle
1022  * @reo_params: parameters needed by HAL for REO config
1023  */
1024 extern void hal_reo_setup(void *hal_soc,
1025 	 struct hal_reo_params *reo_params);
1026 
1027 enum hal_pn_type {
1028 	HAL_PN_NONE,
1029 	HAL_PN_WPA,
1030 	HAL_PN_WAPI_EVEN,
1031 	HAL_PN_WAPI_UNEVEN,
1032 };
1033 
1034 #define HAL_RX_MAX_BA_WINDOW 256
1035 /**
1036  * hal_get_reo_qdesc_size - Get size of reo queue descriptor
1037  *
1038  * @hal_soc: Opaque HAL SOC handle
1039  * @ba_window_size: BlockAck window size
1040  *
1041  */
1042 static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
1043 	uint32_t ba_window_size)
1044 {
1045 	if (ba_window_size <= 1)
1046 		return sizeof(struct rx_reo_queue);
1047 
1048 	if (ba_window_size <= 105)
1049 		return sizeof(struct rx_reo_queue) +
1050 			sizeof(struct rx_reo_queue_ext);
1051 
1052 	if (ba_window_size <= 210)
1053 		return sizeof(struct rx_reo_queue) +
1054 			(2 * sizeof(struct rx_reo_queue_ext));
1055 
1056 	return sizeof(struct rx_reo_queue) +
1057 		(3 * sizeof(struct rx_reo_queue_ext));
1058 }
1059 
1060 /**
1061  * hal_get_reo_qdesc_align - Get start address alignment for reo
1062  * queue descriptors
1063  *
1064  * @hal_soc: Opaque HAL SOC handle
1065  *
1066  */
1067 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
1068 {
1069 	return REO_QUEUE_DESC_ALIGN;
1070 }
1071 
1072 /**
1073  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1074  *
1075  * @hal_soc: Opaque HAL SOC handle
1076  * @ba_window_size: BlockAck window size
1077  * @start_seq: Starting sequence number
1078  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1079  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1080  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1081  *
1082  */
1083 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
1084 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
1085 	int pn_type);
1086 
1087 /**
1088  * hal_srng_get_hp_addr - Get head pointer physical address
1089  *
1090  * @hal_soc: Opaque HAL SOC handle
1091  * @hal_ring: Ring pointer (Source or Destination ring)
1092  *
1093  */
1094 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
1095 {
1096 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1097 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1098 
1099 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1100 		return hal->shadow_wrptr_mem_paddr +
1101 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1102 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1103 	} else {
1104 		return hal->shadow_rdptr_mem_paddr +
1105 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1106 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1107 	}
1108 }
1109 
1110 /**
1111  * hal_srng_get_tp_addr - Get tail pointer physical address
1112  *
1113  * @hal_soc: Opaque HAL SOC handle
1114  * @hal_ring: Ring pointer (Source or Destination ring)
1115  *
1116  */
1117 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
1118 {
1119 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1120 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1121 
1122 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1123 		return hal->shadow_rdptr_mem_paddr +
1124 			((unsigned long)(srng->u.src_ring.tp_addr) -
1125 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1126 	} else {
1127 		return hal->shadow_wrptr_mem_paddr +
1128 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1129 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1130 	}
1131 }
1132 
1133 /**
1134  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1135  *
1136  * @hal_soc: Opaque HAL SOC handle
1137  * @hal_ring: Ring pointer (Source or Destination ring)
1138  * @ring_params: SRNG parameters will be returned through this structure
1139  */
1140 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
1141 	struct hal_srng_params *ring_params);
1142 
1143 /**
1144  * hal_mem_info - Retrieve hal memory base address
1145  *
1146  * @hal_soc: Opaque HAL SOC handle
1147  * @mem: pointer to structure to be updated with hal mem info
1148  */
1149 extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
1150 #endif /* _HAL_APIH_ */
1151