xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above
10  *       copyright notice, this list of conditions and the following
11  *       disclaimer in the documentation and/or other materials provided
12  *       with the distribution.
13  *     * Neither the name of The Linux Foundation nor the names of its
14  *       contributors may be used to endorse or promote products derived
15  *       from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef _HAL_API_H_
31 #define _HAL_API_H_
32 
33 #include "qdf_types.h"
34 #include "qdf_util.h"
35 #include "hal_internal.h"
36 
37 #define MAX_UNWINDOWED_ADDRESS 0x80000
38 #ifdef QCA_WIFI_QCA6390
39 #define WINDOW_ENABLE_BIT 0x40000000
40 #else
41 #define WINDOW_ENABLE_BIT 0x80000000
42 #endif
43 #define WINDOW_REG_ADDRESS 0x310C
44 #define WINDOW_SHIFT 19
45 #define WINDOW_VALUE_MASK 0x3F
46 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
47 #define WINDOW_RANGE_MASK 0x7FFFF
48 
49 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
50 {
51 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
52 	if (window != hal_soc->register_window) {
53 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
54 			      WINDOW_ENABLE_BIT | window);
55 		hal_soc->register_window = window;
56 	}
57 }
58 
59 /**
60  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
61  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
62  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
63  *				would be a bug
64  */
65 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
66 				  uint32_t value)
67 {
68 
69 	if (!hal_soc->use_register_windowing ||
70 	    offset < MAX_UNWINDOWED_ADDRESS) {
71 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
72 	} else {
73 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
74 		hal_select_window(hal_soc, offset);
75 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
76 			  (offset & WINDOW_RANGE_MASK), value);
77 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
78 	}
79 }
80 
81 /**
82  * hal_write_address_32_mb - write a value to a register
83  *
84  */
85 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
86 					   void __iomem *addr, uint32_t value)
87 {
88 	uint32_t offset;
89 
90 	if (!hal_soc->use_register_windowing)
91 		return qdf_iowrite32(addr, value);
92 
93 	offset = addr - hal_soc->dev_base_addr;
94 	hal_write32_mb(hal_soc, offset, value);
95 }
96 
97 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
98 {
99 	uint32_t ret;
100 
101 	if (!hal_soc->use_register_windowing ||
102 	    offset < MAX_UNWINDOWED_ADDRESS) {
103 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
104 	}
105 
106 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
107 	hal_select_window(hal_soc, offset);
108 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
109 		       (offset & WINDOW_RANGE_MASK));
110 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
111 
112 	return ret;
113 }
114 
115 #include "hif_io32.h"
116 
117 /**
118  * hal_attach - Initialize HAL layer
119  * @hif_handle: Opaque HIF handle
120  * @qdf_dev: QDF device
121  *
122  * Return: Opaque HAL SOC handle
123  *		 NULL on failure (if given ring is not available)
124  *
125  * This function should be called as part of HIF initialization (for accessing
126  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
127  */
128 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
129 
130 /**
131  * hal_detach - Detach HAL layer
132  * @hal_soc: HAL SOC handle
133  *
134  * This function should be called as part of HIF detach
135  *
136  */
137 extern void hal_detach(void *hal_soc);
138 
139 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
140 enum hal_ring_type {
141 	REO_DST,
142 	REO_EXCEPTION,
143 	REO_REINJECT,
144 	REO_CMD,
145 	REO_STATUS,
146 	TCL_DATA,
147 	TCL_CMD,
148 	TCL_STATUS,
149 	CE_SRC,
150 	CE_DST,
151 	CE_DST_STATUS,
152 	WBM_IDLE_LINK,
153 	SW2WBM_RELEASE,
154 	WBM2SW_RELEASE,
155 	RXDMA_BUF,
156 	RXDMA_DST,
157 	RXDMA_MONITOR_BUF,
158 	RXDMA_MONITOR_STATUS,
159 	RXDMA_MONITOR_DST,
160 	RXDMA_MONITOR_DESC,
161 	DIR_BUF_RX_DMA_SRC,
162 #ifdef WLAN_FEATURE_CIF_CFR
163 	WIFI_POS_SRC,
164 #endif
165 	MAX_RING_TYPES
166 };
167 
168 #define HAL_SRNG_LMAC_RING 0x80000000
169 /* SRNG flags passed in hal_srng_params.flags */
170 #define HAL_SRNG_MSI_SWAP				0x00000008
171 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
172 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
173 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
174 #define HAL_SRNG_MSI_INTR				0x00020000
175 
176 #define PN_SIZE_24 0
177 #define PN_SIZE_48 1
178 #define PN_SIZE_128 2
179 
180 /**
181  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
182  * used by callers for calculating the size of memory to be allocated before
183  * calling hal_srng_setup to setup the ring
184  *
185  * @hal_soc: Opaque HAL SOC handle
186  * @ring_type: one of the types from hal_ring_type
187  *
188  */
189 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
190 
191 /**
192  * hal_srng_max_entries - Returns maximum possible number of ring entries
193  * @hal_soc: Opaque HAL SOC handle
194  * @ring_type: one of the types from hal_ring_type
195  *
196  * Return: Maximum number of entries for the given ring_type
197  */
198 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
199 
200 /**
201  * hal_srng_dump - Dump ring status
202  * @srng: hal srng pointer
203  */
204 void hal_srng_dump(struct hal_srng *srng);
205 
206 /**
207  * hal_srng_get_dir - Returns the direction of the ring
208  * @hal_soc: Opaque HAL SOC handle
209  * @ring_type: one of the types from hal_ring_type
210  *
211  * Return: Ring direction
212  */
213 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
214 
215 /* HAL memory information */
216 struct hal_mem_info {
217 	/* dev base virutal addr */
218 	void *dev_base_addr;
219 	/* dev base physical addr */
220 	void *dev_base_paddr;
221 	/* Remote virtual pointer memory for HW/FW updates */
222 	void *shadow_rdptr_mem_vaddr;
223 	/* Remote physical pointer memory for HW/FW updates */
224 	void *shadow_rdptr_mem_paddr;
225 	/* Shared memory for ring pointer updates from host to FW */
226 	void *shadow_wrptr_mem_vaddr;
227 	/* Shared physical memory for ring pointer updates from host to FW */
228 	void *shadow_wrptr_mem_paddr;
229 };
230 
231 /* SRNG parameters to be passed to hal_srng_setup */
232 struct hal_srng_params {
233 	/* Physical base address of the ring */
234 	qdf_dma_addr_t ring_base_paddr;
235 	/* Virtual base address of the ring */
236 	void *ring_base_vaddr;
237 	/* Number of entries in ring */
238 	uint32_t num_entries;
239 	/* max transfer length */
240 	uint16_t max_buffer_length;
241 	/* MSI Address */
242 	qdf_dma_addr_t msi_addr;
243 	/* MSI data */
244 	uint32_t msi_data;
245 	/* Interrupt timer threshold – in micro seconds */
246 	uint32_t intr_timer_thres_us;
247 	/* Interrupt batch counter threshold – in number of ring entries */
248 	uint32_t intr_batch_cntr_thres_entries;
249 	/* Low threshold – in number of ring entries
250 	 * (valid for src rings only)
251 	 */
252 	uint32_t low_threshold;
253 	/* Misc flags */
254 	uint32_t flags;
255 	/* Unique ring id */
256 	uint8_t ring_id;
257 	/* Source or Destination ring */
258 	enum hal_srng_dir ring_dir;
259 	/* Size of ring entry */
260 	uint32_t entry_size;
261 	/* hw register base address */
262 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
263 };
264 
265 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
266  * @hal_soc: hal handle
267  *
268  * Return: QDF_STATUS_OK on success
269  */
270 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
271 
272 /* hal_set_one_shadow_config() - add a config for the specified ring
273  * @hal_soc: hal handle
274  * @ring_type: ring type
275  * @ring_num: ring num
276  *
277  * The ring type and ring num uniquely specify the ring.  After this call,
278  * the hp/tp will be added as the next entry int the shadow register
279  * configuration table.  The hal code will use the shadow register address
280  * in place of the hp/tp address.
281  *
282  * This function is exposed, so that the CE module can skip configuring shadow
283  * registers for unused ring and rings assigned to the firmware.
284  *
285  * Return: QDF_STATUS_OK on success
286  */
287 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
288 					    int ring_num);
289 /**
290  * hal_get_shadow_config() - retrieve the config table
291  * @hal_soc: hal handle
292  * @shadow_config: will point to the table after
293  * @num_shadow_registers_configured: will contain the number of valid entries
294  */
295 extern void hal_get_shadow_config(void *hal_soc,
296 				  struct pld_shadow_reg_v2_cfg **shadow_config,
297 				  int *num_shadow_registers_configured);
298 /**
299  * hal_srng_setup - Initialize HW SRNG ring.
300  *
301  * @hal_soc: Opaque HAL SOC handle
302  * @ring_type: one of the types from hal_ring_type
303  * @ring_num: Ring number if there are multiple rings of
304  *		same type (staring from 0)
305  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
306  * @ring_params: SRNG ring params in hal_srng_params structure.
307 
308  * Callers are expected to allocate contiguous ring memory of size
309  * 'num_entries * entry_size' bytes and pass the physical and virtual base
310  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
311  * structure. Ring base address should be 8 byte aligned and size of each ring
312  * entry should be queried using the API hal_srng_get_entrysize
313  *
314  * Return: Opaque pointer to ring on success
315  *		 NULL on failure (if given ring is not available)
316  */
317 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
318 	int mac_id, struct hal_srng_params *ring_params);
319 
320 /* Remapping ids of REO rings */
321 #define REO_REMAP_TCL 0
322 #define REO_REMAP_SW1 1
323 #define REO_REMAP_SW2 2
324 #define REO_REMAP_SW3 3
325 #define REO_REMAP_SW4 4
326 #define REO_REMAP_RELEASE 5
327 #define REO_REMAP_FW 6
328 #define REO_REMAP_UNUSED 7
329 
330 /*
331  * currently this macro only works for IX0 since all the rings we are remapping
332  * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
333  */
334 #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
335 	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
336 /* allow the destination macros to be expanded */
337 #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
338 	(_NEW_DEST << \
339 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
340 	  _ORIGINAL_DEST ## _SHFT))
341 
342 /**
343  * hal_reo_remap_IX0 - Remap REO ring destination
344  * @hal: HAL SOC handle
345  * @remap_val: Remap value
346  */
347 extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val);
348 
349 /**
350  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
351  * @sring: sring pointer
352  * @paddr: physical address
353  */
354 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
355 
356 /**
357  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
358  * @srng: sring pointer
359  * @vaddr: virtual address
360  */
361 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
362 
363 /**
364  * hal_srng_cleanup - Deinitialize HW SRNG ring.
365  * @hal_soc: Opaque HAL SOC handle
366  * @hal_srng: Opaque HAL SRNG pointer
367  */
368 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
369 
370 static inline bool hal_srng_initialized(void *hal_ring)
371 {
372 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
373 
374 	return !!srng->initialized;
375 }
376 
377 /**
378  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
379  * hal_srng_access_start if locked access is required
380  *
381  * @hal_soc: Opaque HAL SOC handle
382  * @hal_ring: Ring pointer (Source or Destination ring)
383  *
384  * Return: 0 on success; error on failire
385  */
386 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
387 {
388 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
389 
390 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
391 		srng->u.src_ring.cached_tp =
392 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
393 	else
394 		srng->u.dst_ring.cached_hp =
395 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
396 
397 	return 0;
398 }
399 
400 /**
401  * hal_srng_access_start - Start (locked) ring access
402  *
403  * @hal_soc: Opaque HAL SOC handle
404  * @hal_ring: Ring pointer (Source or Destination ring)
405  *
406  * Return: 0 on success; error on failire
407  */
408 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
409 {
410 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
411 
412 	SRNG_LOCK(&(srng->lock));
413 
414 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
415 }
416 
417 /**
418  * hal_srng_dst_get_next - Get next entry from a destination ring and move
419  * cached tail pointer
420  *
421  * @hal_soc: Opaque HAL SOC handle
422  * @hal_ring: Destination ring pointer
423  *
424  * Return: Opaque pointer for next ring entry; NULL on failire
425  */
426 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
427 {
428 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
429 	uint32_t *desc;
430 
431 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
432 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
433 		/* TODO: Using % is expensive, but we have to do this since
434 		 * size of some SRNG rings is not power of 2 (due to descriptor
435 		 * sizes). Need to create separate API for rings used
436 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
437 		 * SW2RXDMA and CE rings)
438 		 */
439 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
440 			srng->ring_size;
441 
442 		return (void *)desc;
443 	}
444 
445 	return NULL;
446 }
447 
448 /**
449  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
450  * cached head pointer
451  *
452  * @hal_soc: Opaque HAL SOC handle
453  * @hal_ring: Destination ring pointer
454  *
455  * Return: Opaque pointer for next ring entry; NULL on failire
456  */
457 static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
458 {
459 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
460 	uint32_t *desc;
461 	/* TODO: Using % is expensive, but we have to do this since
462 	 * size of some SRNG rings is not power of 2 (due to descriptor
463 	 * sizes). Need to create separate API for rings used
464 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
465 	 * SW2RXDMA and CE rings)
466 	 */
467 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
468 		srng->ring_size;
469 
470 	if (next_hp != srng->u.dst_ring.tp) {
471 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
472 		srng->u.dst_ring.cached_hp = next_hp;
473 		return (void *)desc;
474 	}
475 
476 	return NULL;
477 }
478 
479 /**
480  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
481  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
482  * TODO: See if we need an optimized version of get_next that doesn't check for
483  * loop_cnt
484  *
485  * @hal_soc: Opaque HAL SOC handle
486  * @hal_ring: Destination ring pointer
487  *
488  * Return: Opaque pointer for next ring entry; NULL on failire
489  */
490 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
491 {
492 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
493 
494 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
495 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
496 
497 	return NULL;
498 }
499 
500 /**
501  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
502  * by SW) in destination ring
503  *
504  * @hal_soc: Opaque HAL SOC handle
505  * @hal_ring: Destination ring pointer
506  * @sync_hw_ptr: Sync cached head pointer with HW
507  *
508  */
509 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
510 	int sync_hw_ptr)
511 {
512 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
513 	uint32_t hp;
514 	uint32_t tp = srng->u.dst_ring.tp;
515 
516 	if (sync_hw_ptr) {
517 		hp = *(srng->u.dst_ring.hp_addr);
518 		srng->u.dst_ring.cached_hp = hp;
519 	} else {
520 		hp = srng->u.dst_ring.cached_hp;
521 	}
522 
523 	if (hp >= tp)
524 		return (hp - tp) / srng->entry_size;
525 	else
526 		return (srng->ring_size - tp + hp) / srng->entry_size;
527 }
528 
529 /**
530  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
531  * pointer. This can be used to release any buffers associated with completed
532  * ring entries. Note that this should not be used for posting new descriptor
533  * entries. Posting of new entries should be done only using
534  * hal_srng_src_get_next_reaped when this function is used for reaping.
535  *
536  * @hal_soc: Opaque HAL SOC handle
537  * @hal_ring: Source ring pointer
538  *
539  * Return: Opaque pointer for next ring entry; NULL on failire
540  */
541 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
542 {
543 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
544 	uint32_t *desc;
545 
546 	/* TODO: Using % is expensive, but we have to do this since
547 	 * size of some SRNG rings is not power of 2 (due to descriptor
548 	 * sizes). Need to create separate API for rings used
549 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
550 	 * SW2RXDMA and CE rings)
551 	 */
552 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
553 		srng->ring_size;
554 
555 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
556 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
557 		srng->u.src_ring.reap_hp = next_reap_hp;
558 		return (void *)desc;
559 	}
560 
561 	return NULL;
562 }
563 
564 /**
565  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
566  * already reaped using hal_srng_src_reap_next, for posting new entries to
567  * the ring
568  *
569  * @hal_soc: Opaque HAL SOC handle
570  * @hal_ring: Source ring pointer
571  *
572  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
573  */
574 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
575 {
576 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
577 	uint32_t *desc;
578 
579 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
580 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
581 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
582 			srng->ring_size;
583 
584 		return (void *)desc;
585 	}
586 
587 	return NULL;
588 }
589 
590 /**
591  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
592  * move reap pointer. This API is used in detach path to release any buffers
593  * associated with ring entries which are pending reap.
594  *
595  * @hal_soc: Opaque HAL SOC handle
596  * @hal_ring: Source ring pointer
597  *
598  * Return: Opaque pointer for next ring entry; NULL on failire
599  */
600 static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring)
601 {
602 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
603 	uint32_t *desc;
604 
605 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
606 		srng->ring_size;
607 
608 	if (next_reap_hp != srng->u.src_ring.hp) {
609 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
610 		srng->u.src_ring.reap_hp = next_reap_hp;
611 		return (void *)desc;
612 	}
613 
614 	return NULL;
615 }
616 
617 /**
618  * hal_srng_src_done_val -
619  *
620  * @hal_soc: Opaque HAL SOC handle
621  * @hal_ring: Source ring pointer
622  *
623  * Return: Opaque pointer for next ring entry; NULL on failire
624  */
625 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
626 {
627 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
628 	/* TODO: Using % is expensive, but we have to do this since
629 	 * size of some SRNG rings is not power of 2 (due to descriptor
630 	 * sizes). Need to create separate API for rings used
631 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
632 	 * SW2RXDMA and CE rings)
633 	 */
634 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
635 		srng->ring_size;
636 
637 	if (next_reap_hp == srng->u.src_ring.cached_tp)
638 		return 0;
639 
640 	if (srng->u.src_ring.cached_tp > next_reap_hp)
641 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
642 			srng->entry_size;
643 	else
644 		return ((srng->ring_size - next_reap_hp) +
645 			srng->u.src_ring.cached_tp) / srng->entry_size;
646 }
647 
648 /**
649  * hal_api_get_tphp - Get head and tail pointer location for any ring
650  * @hal_soc: Opaque HAL SOC handle
651  * @hal_ring: Source ring pointer
652  * @tailp: Tail Pointer
653  * @headp: Head Pointer
654  *
655  * Return: Update tail pointer and head pointer in arguments.
656  */
657 static inline void hal_api_get_tphp(void *hal_soc, void *hal_ring,
658 	uint32_t *tailp, uint32_t *headp)
659 {
660 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
661 
662 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
663 		*headp = srng->u.src_ring.hp / srng->entry_size;
664 		*tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size;
665 	} else {
666 		*tailp = srng->u.dst_ring.tp / srng->entry_size;
667 		*headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size;
668 	}
669 }
670 
671 /**
672  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
673  *
674  * @hal_soc: Opaque HAL SOC handle
675  * @hal_ring: Source ring pointer
676  *
677  * Return: Opaque pointer for next ring entry; NULL on failire
678  */
679 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
680 {
681 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
682 	uint32_t *desc;
683 	/* TODO: Using % is expensive, but we have to do this since
684 	 * size of some SRNG rings is not power of 2 (due to descriptor
685 	 * sizes). Need to create separate API for rings used
686 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
687 	 * SW2RXDMA and CE rings)
688 	 */
689 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
690 		srng->ring_size;
691 
692 	if (next_hp != srng->u.src_ring.cached_tp) {
693 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
694 		srng->u.src_ring.hp = next_hp;
695 		/* TODO: Since reap function is not used by all rings, we can
696 		 * remove the following update of reap_hp in this function
697 		 * if we can ensure that only hal_srng_src_get_next_reaped
698 		 * is used for the rings requiring reap functionality
699 		 */
700 		srng->u.src_ring.reap_hp = next_hp;
701 		return (void *)desc;
702 	}
703 
704 	return NULL;
705 }
706 
707 /**
708  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
709  * hal_srng_src_get_next should be called subsequently to move the head pointer
710  *
711  * @hal_soc: Opaque HAL SOC handle
712  * @hal_ring: Source ring pointer
713  *
714  * Return: Opaque pointer for next ring entry; NULL on failire
715  */
716 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
717 {
718 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
719 	uint32_t *desc;
720 
721 	/* TODO: Using % is expensive, but we have to do this since
722 	 * size of some SRNG rings is not power of 2 (due to descriptor
723 	 * sizes). Need to create separate API for rings used
724 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
725 	 * SW2RXDMA and CE rings)
726 	 */
727 	if (((srng->u.src_ring.hp + srng->entry_size) %
728 		srng->ring_size) != srng->u.src_ring.cached_tp) {
729 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
730 		return (void *)desc;
731 	}
732 
733 	return NULL;
734 }
735 
736 /**
737  * hal_srng_src_num_avail - Returns number of available entries in src ring
738  *
739  * @hal_soc: Opaque HAL SOC handle
740  * @hal_ring: Source ring pointer
741  * @sync_hw_ptr: Sync cached tail pointer with HW
742  *
743  */
744 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
745 	void *hal_ring, int sync_hw_ptr)
746 {
747 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
748 	uint32_t tp;
749 	uint32_t hp = srng->u.src_ring.hp;
750 
751 	if (sync_hw_ptr) {
752 		tp = *(srng->u.src_ring.tp_addr);
753 		srng->u.src_ring.cached_tp = tp;
754 	} else {
755 		tp = srng->u.src_ring.cached_tp;
756 	}
757 
758 	if (tp > hp)
759 		return ((tp - hp) / srng->entry_size) - 1;
760 	else
761 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
762 }
763 
764 /**
765  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
766  * ring head/tail pointers to HW.
767  * This should be used only if hal_srng_access_start_unlocked to start ring
768  * access
769  *
770  * @hal_soc: Opaque HAL SOC handle
771  * @hal_ring: Ring pointer (Source or Destination ring)
772  *
773  * Return: 0 on success; error on failire
774  */
775 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
776 {
777 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
778 
779 	/* TODO: See if we need a write memory barrier here */
780 	if (srng->flags & HAL_SRNG_LMAC_RING) {
781 		/* For LMAC rings, ring pointer updates are done through FW and
782 		 * hence written to a shared memory location that is read by FW
783 		 */
784 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
785 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
786 		} else {
787 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
788 		}
789 	} else {
790 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
791 			hal_write_address_32_mb(hal_soc,
792 				srng->u.src_ring.hp_addr,
793 				srng->u.src_ring.hp);
794 		else
795 			hal_write_address_32_mb(hal_soc,
796 				srng->u.dst_ring.tp_addr,
797 				srng->u.dst_ring.tp);
798 	}
799 }
800 
801 /**
802  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
803  * pointers to HW
804  * This should be used only if hal_srng_access_start to start ring access
805  *
806  * @hal_soc: Opaque HAL SOC handle
807  * @hal_ring: Ring pointer (Source or Destination ring)
808  *
809  * Return: 0 on success; error on failire
810  */
811 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
812 {
813 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
814 
815 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
816 	SRNG_UNLOCK(&(srng->lock));
817 }
818 
819 /**
820  * hal_srng_access_end_reap - Unlock ring access
821  * This should be used only if hal_srng_access_start to start ring access
822  * and should be used only while reaping SRC ring completions
823  *
824  * @hal_soc: Opaque HAL SOC handle
825  * @hal_ring: Ring pointer (Source or Destination ring)
826  *
827  * Return: 0 on success; error on failire
828  */
829 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
830 {
831 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
832 
833 	SRNG_UNLOCK(&(srng->lock));
834 }
835 
836 /* TODO: Check if the following definitions is available in HW headers */
837 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
838 #define NUM_MPDUS_PER_LINK_DESC 6
839 #define NUM_MSDUS_PER_LINK_DESC 7
840 #define REO_QUEUE_DESC_ALIGN 128
841 
842 #define LINK_DESC_ALIGN 128
843 
844 #define ADDRESS_MATCH_TAG_VAL 0x5
845 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
846  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
847  */
848 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
849 
850 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
851  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
852  * should be specified in 16 word units. But the number of bits defined for
853  * this field in HW header files is 5.
854  */
855 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
856 
857 
858 /**
859  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
860  * in an idle list
861  *
862  * @hal_soc: Opaque HAL SOC handle
863  *
864  */
865 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
866 {
867 	return WBM_IDLE_SCATTER_BUF_SIZE;
868 }
869 
870 /**
871  * hal_get_link_desc_size - Get the size of each link descriptor
872  *
873  * @hal_soc: Opaque HAL SOC handle
874  *
875  */
876 static inline uint32_t hal_get_link_desc_size(struct hal_soc *hal_soc)
877 {
878 	if (!hal_soc || !hal_soc->ops) {
879 		qdf_print("Error: Invalid ops\n");
880 		QDF_BUG(0);
881 		return -EINVAL;
882 	}
883 	if (!hal_soc->ops->hal_get_link_desc_size) {
884 		qdf_print("Error: Invalid function pointer\n");
885 		QDF_BUG(0);
886 		return -EINVAL;
887 	}
888 	return hal_soc->ops->hal_get_link_desc_size();
889 }
890 
891 /**
892  * hal_get_link_desc_align - Get the required start address alignment for
893  * link descriptors
894  *
895  * @hal_soc: Opaque HAL SOC handle
896  *
897  */
898 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
899 {
900 	return LINK_DESC_ALIGN;
901 }
902 
903 /**
904  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
905  *
906  * @hal_soc: Opaque HAL SOC handle
907  *
908  */
909 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
910 {
911 	return NUM_MPDUS_PER_LINK_DESC;
912 }
913 
914 /**
915  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
916  *
917  * @hal_soc: Opaque HAL SOC handle
918  *
919  */
920 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
921 {
922 	return NUM_MSDUS_PER_LINK_DESC;
923 }
924 
925 /**
926  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
927  * descriptor can hold
928  *
929  * @hal_soc: Opaque HAL SOC handle
930  *
931  */
932 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
933 {
934 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
935 }
936 
937 /**
938  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
939  * that the given buffer size
940  *
941  * @hal_soc: Opaque HAL SOC handle
942  * @scatter_buf_size: Size of scatter buffer
943  *
944  */
945 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
946 	uint32_t scatter_buf_size)
947 {
948 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
949 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
950 }
951 
952 /**
953  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
954  * each given buffer size
955  *
956  * @hal_soc: Opaque HAL SOC handle
957  * @total_mem: size of memory to be scattered
958  * @scatter_buf_size: Size of scatter buffer
959  *
960  */
961 static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc,
962 	uint32_t total_mem, uint32_t scatter_buf_size)
963 {
964 	uint8_t rem = (total_mem % (scatter_buf_size -
965 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
966 
967 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
968 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
969 
970 	return num_scatter_bufs;
971 }
972 
973 /**
974  * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
975  * provided
976  *
977  * @hal_soc: Opaque HAL SOC handle
978  * @idle_scatter_bufs_base_paddr: Array of physical base addresses
979  * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
980  * @num_scatter_bufs: Number of scatter buffers in the above lists
981  * @scatter_buf_size: Size of each scatter buffer
982  * @last_buf_end_offset: Offset to the last entry
983  * @num_entries: Total entries of all scatter bufs
984  *
985  */
986 extern void hal_setup_link_idle_list(void *hal_soc,
987 	qdf_dma_addr_t scatter_bufs_base_paddr[],
988 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
989 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset,
990 	uint32_t num_entries);
991 
992 /* REO parameters to be passed to hal_reo_setup */
993 struct hal_reo_params {
994 	/** rx hash steering enabled or disabled */
995 	bool rx_hash_enabled;
996 	/** reo remap 1 register */
997 	uint32_t remap1;
998 	/** reo remap 2 register */
999 	uint32_t remap2;
1000 	/** fragment destination ring */
1001 	uint8_t frag_dst_ring;
1002 	/** padding */
1003 	uint8_t padding[3];
1004 };
1005 
1006 /**
1007  * hal_reo_setup - Initialize HW REO block
1008  *
1009  * @hal_soc: Opaque HAL SOC handle
1010  * @reo_params: parameters needed by HAL for REO config
1011  */
1012 extern void hal_reo_setup(void *hal_soc,
1013 	 struct hal_reo_params *reo_params);
1014 
1015 enum hal_pn_type {
1016 	HAL_PN_NONE,
1017 	HAL_PN_WPA,
1018 	HAL_PN_WAPI_EVEN,
1019 	HAL_PN_WAPI_UNEVEN,
1020 };
1021 
1022 #define HAL_RX_MAX_BA_WINDOW 256
1023 
1024 /**
1025  * hal_get_reo_qdesc_align - Get start address alignment for reo
1026  * queue descriptors
1027  *
1028  * @hal_soc: Opaque HAL SOC handle
1029  *
1030  */
1031 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
1032 {
1033 	return REO_QUEUE_DESC_ALIGN;
1034 }
1035 
1036 /**
1037  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1038  *
1039  * @hal_soc: Opaque HAL SOC handle
1040  * @ba_window_size: BlockAck window size
1041  * @start_seq: Starting sequence number
1042  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1043  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1044  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1045  *
1046  */
1047 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
1048 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
1049 	int pn_type);
1050 
1051 /**
1052  * hal_srng_get_hp_addr - Get head pointer physical address
1053  *
1054  * @hal_soc: Opaque HAL SOC handle
1055  * @hal_ring: Ring pointer (Source or Destination ring)
1056  *
1057  */
1058 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
1059 {
1060 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1061 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1062 
1063 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1064 		return hal->shadow_wrptr_mem_paddr +
1065 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1066 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1067 	} else {
1068 		return hal->shadow_rdptr_mem_paddr +
1069 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1070 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1071 	}
1072 }
1073 
1074 /**
1075  * hal_srng_get_tp_addr - Get tail pointer physical address
1076  *
1077  * @hal_soc: Opaque HAL SOC handle
1078  * @hal_ring: Ring pointer (Source or Destination ring)
1079  *
1080  */
1081 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
1082 {
1083 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1084 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1085 
1086 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1087 		return hal->shadow_rdptr_mem_paddr +
1088 			((unsigned long)(srng->u.src_ring.tp_addr) -
1089 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1090 	} else {
1091 		return hal->shadow_wrptr_mem_paddr +
1092 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1093 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1094 	}
1095 }
1096 
1097 /**
1098  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1099  *
1100  * @hal_soc: Opaque HAL SOC handle
1101  * @hal_ring: Ring pointer (Source or Destination ring)
1102  * @ring_params: SRNG parameters will be returned through this structure
1103  */
1104 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
1105 	struct hal_srng_params *ring_params);
1106 
1107 /**
1108  * hal_mem_info - Retrieve hal memory base address
1109  *
1110  * @hal_soc: Opaque HAL SOC handle
1111  * @mem: pointer to structure to be updated with hal mem info
1112  */
1113 extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
1114 
1115 /**
1116  * hal_get_target_type - Return target type
1117  *
1118  * @hal_soc: Opaque HAL SOC handle
1119  */
1120 uint32_t hal_get_target_type(struct hal_soc *hal);
1121 #endif /* _HAL_APIH_ */
1122