xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above
10  *       copyright notice, this list of conditions and the following
11  *       disclaimer in the documentation and/or other materials provided
12  *       with the distribution.
13  *     * Neither the name of The Linux Foundation nor the names of its
14  *       contributors may be used to endorse or promote products derived
15  *       from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef _HAL_API_H_
31 #define _HAL_API_H_
32 
33 #include "qdf_types.h"
34 #include "qdf_util.h"
35 #include "hal_internal.h"
36 #include "rx_msdu_link.h"
37 #include "rx_reo_queue.h"
38 #include "rx_reo_queue_ext.h"
39 
40 #define MAX_UNWINDOWED_ADDRESS 0x80000
41 #ifdef TARGET_TYPE_QCA6390
42 #define WINDOW_ENABLE_BIT 0x40000000
43 #else
44 #define WINDOW_ENABLE_BIT 0x80000000
45 #endif
46 #define WINDOW_REG_ADDRESS 0x310C
47 #define WINDOW_SHIFT 19
48 #define WINDOW_VALUE_MASK 0x3F
49 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
50 #define WINDOW_RANGE_MASK 0x7FFFF
51 
52 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
53 {
54 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
55 	if (window != hal_soc->register_window) {
56 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
57 			      WINDOW_ENABLE_BIT | window);
58 		hal_soc->register_window = window;
59 	}
60 }
61 
62 /**
63  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
64  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
65  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
66  *				would be a bug
67  */
68 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
69 				  uint32_t value)
70 {
71 
72 	if (!hal_soc->use_register_windowing ||
73 	    offset < MAX_UNWINDOWED_ADDRESS) {
74 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
75 	} else {
76 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
77 		hal_select_window(hal_soc, offset);
78 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
79 			  (offset & WINDOW_RANGE_MASK), value);
80 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
81 	}
82 }
83 
84 /**
85  * hal_write_address_32_mb - write a value to a register
86  *
87  */
88 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
89 					   void __iomem *addr, uint32_t value)
90 {
91 	uint32_t offset;
92 
93 	if (!hal_soc->use_register_windowing)
94 		return qdf_iowrite32(addr, value);
95 
96 	offset = addr - hal_soc->dev_base_addr;
97 	hal_write32_mb(hal_soc, offset, value);
98 }
99 
100 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
101 {
102 	uint32_t ret;
103 
104 	if (!hal_soc->use_register_windowing ||
105 	    offset < MAX_UNWINDOWED_ADDRESS) {
106 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
107 	}
108 
109 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
110 	hal_select_window(hal_soc, offset);
111 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
112 		       (offset & WINDOW_RANGE_MASK));
113 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
114 
115 	return ret;
116 }
117 
118 #include "hif_io32.h"
119 
120 /**
121  * hal_attach - Initialize HAL layer
122  * @hif_handle: Opaque HIF handle
123  * @qdf_dev: QDF device
124  *
125  * Return: Opaque HAL SOC handle
126  *		 NULL on failure (if given ring is not available)
127  *
128  * This function should be called as part of HIF initialization (for accessing
129  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
130  */
131 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
132 
133 /**
134  * hal_detach - Detach HAL layer
135  * @hal_soc: HAL SOC handle
136  *
137  * This function should be called as part of HIF detach
138  *
139  */
140 extern void hal_detach(void *hal_soc);
141 
142 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
143 enum hal_ring_type {
144 	REO_DST,
145 	REO_EXCEPTION,
146 	REO_REINJECT,
147 	REO_CMD,
148 	REO_STATUS,
149 	TCL_DATA,
150 	TCL_CMD,
151 	TCL_STATUS,
152 	CE_SRC,
153 	CE_DST,
154 	CE_DST_STATUS,
155 	WBM_IDLE_LINK,
156 	SW2WBM_RELEASE,
157 	WBM2SW_RELEASE,
158 	RXDMA_BUF,
159 	RXDMA_DST,
160 	RXDMA_MONITOR_BUF,
161 	RXDMA_MONITOR_STATUS,
162 	RXDMA_MONITOR_DST,
163 	RXDMA_MONITOR_DESC,
164 	DIR_BUF_RX_DMA_SRC,
165 #ifdef WLAN_FEATURE_CIF_CFR
166 	WIFI_POS_SRC,
167 #endif
168 	MAX_RING_TYPES
169 };
170 
171 /* SRNG flags passed in hal_srng_params.flags */
172 #define HAL_SRNG_MSI_SWAP				0x00000008
173 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
174 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
175 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
176 #define HAL_SRNG_MSI_INTR				0x00020000
177 
178 #define PN_SIZE_24 0
179 #define PN_SIZE_48 1
180 #define PN_SIZE_128 2
181 
182 /**
183  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
184  * used by callers for calculating the size of memory to be allocated before
185  * calling hal_srng_setup to setup the ring
186  *
187  * @hal_soc: Opaque HAL SOC handle
188  * @ring_type: one of the types from hal_ring_type
189  *
190  */
191 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
192 
193 /**
194  * hal_srng_max_entries - Returns maximum possible number of ring entries
195  * @hal_soc: Opaque HAL SOC handle
196  * @ring_type: one of the types from hal_ring_type
197  *
198  * Return: Maximum number of entries for the given ring_type
199  */
200 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
201 
202 /**
203  * hal_srng_dump - Dump ring status
204  * @srng: hal srng pointer
205  */
206 void hal_srng_dump(struct hal_srng *srng);
207 
208 /**
209  * hal_srng_get_dir - Returns the direction of the ring
210  * @hal_soc: Opaque HAL SOC handle
211  * @ring_type: one of the types from hal_ring_type
212  *
213  * Return: Ring direction
214  */
215 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
216 
217 /* HAL memory information */
218 struct hal_mem_info {
219 	/* dev base virutal addr */
220 	void *dev_base_addr;
221 	/* dev base physical addr */
222 	void *dev_base_paddr;
223 	/* Remote virtual pointer memory for HW/FW updates */
224 	void *shadow_rdptr_mem_vaddr;
225 	/* Remote physical pointer memory for HW/FW updates */
226 	void *shadow_rdptr_mem_paddr;
227 	/* Shared memory for ring pointer updates from host to FW */
228 	void *shadow_wrptr_mem_vaddr;
229 	/* Shared physical memory for ring pointer updates from host to FW */
230 	void *shadow_wrptr_mem_paddr;
231 };
232 
233 /* SRNG parameters to be passed to hal_srng_setup */
234 struct hal_srng_params {
235 	/* Physical base address of the ring */
236 	qdf_dma_addr_t ring_base_paddr;
237 	/* Virtual base address of the ring */
238 	void *ring_base_vaddr;
239 	/* Number of entries in ring */
240 	uint32_t num_entries;
241 	/* max transfer length */
242 	uint16_t max_buffer_length;
243 	/* MSI Address */
244 	qdf_dma_addr_t msi_addr;
245 	/* MSI data */
246 	uint32_t msi_data;
247 	/* Interrupt timer threshold – in micro seconds */
248 	uint32_t intr_timer_thres_us;
249 	/* Interrupt batch counter threshold – in number of ring entries */
250 	uint32_t intr_batch_cntr_thres_entries;
251 	/* Low threshold – in number of ring entries
252 	 * (valid for src rings only)
253 	 */
254 	uint32_t low_threshold;
255 	/* Misc flags */
256 	uint32_t flags;
257 	/* Unique ring id */
258 	uint8_t ring_id;
259 	/* Source or Destination ring */
260 	enum hal_srng_dir ring_dir;
261 	/* Size of ring entry */
262 	uint32_t entry_size;
263 	/* hw register base address */
264 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
265 };
266 
267 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
268  * @hal_soc: hal handle
269  *
270  * Return: QDF_STATUS_OK on success
271  */
272 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
273 
274 /* hal_set_one_shadow_config() - add a config for the specified ring
275  * @hal_soc: hal handle
276  * @ring_type: ring type
277  * @ring_num: ring num
278  *
279  * The ring type and ring num uniquely specify the ring.  After this call,
280  * the hp/tp will be added as the next entry int the shadow register
281  * configuration table.  The hal code will use the shadow register address
282  * in place of the hp/tp address.
283  *
284  * This function is exposed, so that the CE module can skip configuring shadow
285  * registers for unused ring and rings assigned to the firmware.
286  *
287  * Return: QDF_STATUS_OK on success
288  */
289 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
290 					    int ring_num);
291 /**
292  * hal_get_shadow_config() - retrieve the config table
293  * @hal_soc: hal handle
294  * @shadow_config: will point to the table after
295  * @num_shadow_registers_configured: will contain the number of valid entries
296  */
297 extern void hal_get_shadow_config(void *hal_soc,
298 				  struct pld_shadow_reg_v2_cfg **shadow_config,
299 				  int *num_shadow_registers_configured);
300 /**
301  * hal_srng_setup - Initialize HW SRNG ring.
302  *
303  * @hal_soc: Opaque HAL SOC handle
304  * @ring_type: one of the types from hal_ring_type
305  * @ring_num: Ring number if there are multiple rings of
306  *		same type (staring from 0)
307  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
308  * @ring_params: SRNG ring params in hal_srng_params structure.
309 
310  * Callers are expected to allocate contiguous ring memory of size
311  * 'num_entries * entry_size' bytes and pass the physical and virtual base
312  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
313  * structure. Ring base address should be 8 byte aligned and size of each ring
314  * entry should be queried using the API hal_srng_get_entrysize
315  *
316  * Return: Opaque pointer to ring on success
317  *		 NULL on failure (if given ring is not available)
318  */
319 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
320 	int mac_id, struct hal_srng_params *ring_params);
321 
322 /* Remapping ids of REO rings */
323 #define REO_REMAP_TCL 0
324 #define REO_REMAP_SW1 1
325 #define REO_REMAP_SW2 2
326 #define REO_REMAP_SW3 3
327 #define REO_REMAP_SW4 4
328 #define REO_REMAP_RELEASE 5
329 #define REO_REMAP_FW 6
330 #define REO_REMAP_UNUSED 7
331 
332 /*
333  * currently this macro only works for IX0 since all the rings we are remapping
334  * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
335  */
336 #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
337 	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
338 /* allow the destination macros to be expanded */
339 #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
340 	(_NEW_DEST << \
341 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
342 	  _ORIGINAL_DEST ## _SHFT))
343 
344 /**
345  * hal_reo_remap_IX0 - Remap REO ring destination
346  * @hal: HAL SOC handle
347  * @remap_val: Remap value
348  */
349 extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val);
350 
351 /**
352  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
353  * @sring: sring pointer
354  * @paddr: physical address
355  */
356 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
357 
358 /**
359  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
360  * @srng: sring pointer
361  * @vaddr: virtual address
362  */
363 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
364 
365 /**
366  * hal_srng_cleanup - Deinitialize HW SRNG ring.
367  * @hal_soc: Opaque HAL SOC handle
368  * @hal_srng: Opaque HAL SRNG pointer
369  */
370 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
371 
372 static inline bool hal_srng_initialized(void *hal_ring)
373 {
374 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
375 
376 	return !!srng->initialized;
377 }
378 
379 /**
380  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
381  * hal_srng_access_start if locked access is required
382  *
383  * @hal_soc: Opaque HAL SOC handle
384  * @hal_ring: Ring pointer (Source or Destination ring)
385  *
386  * Return: 0 on success; error on failire
387  */
388 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
389 {
390 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
391 
392 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
393 		srng->u.src_ring.cached_tp =
394 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
395 	else
396 		srng->u.dst_ring.cached_hp =
397 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
398 
399 	return 0;
400 }
401 
402 /**
403  * hal_srng_access_start - Start (locked) ring access
404  *
405  * @hal_soc: Opaque HAL SOC handle
406  * @hal_ring: Ring pointer (Source or Destination ring)
407  *
408  * Return: 0 on success; error on failire
409  */
410 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
411 {
412 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
413 
414 	SRNG_LOCK(&(srng->lock));
415 
416 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
417 }
418 
419 /**
420  * hal_srng_dst_get_next - Get next entry from a destination ring and move
421  * cached tail pointer
422  *
423  * @hal_soc: Opaque HAL SOC handle
424  * @hal_ring: Destination ring pointer
425  *
426  * Return: Opaque pointer for next ring entry; NULL on failire
427  */
428 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
429 {
430 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
431 	uint32_t *desc;
432 
433 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
434 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
435 		/* TODO: Using % is expensive, but we have to do this since
436 		 * size of some SRNG rings is not power of 2 (due to descriptor
437 		 * sizes). Need to create separate API for rings used
438 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
439 		 * SW2RXDMA and CE rings)
440 		 */
441 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
442 			srng->ring_size;
443 
444 		return (void *)desc;
445 	}
446 
447 	return NULL;
448 }
449 
450 /**
451  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
452  * cached head pointer
453  *
454  * @hal_soc: Opaque HAL SOC handle
455  * @hal_ring: Destination ring pointer
456  *
457  * Return: Opaque pointer for next ring entry; NULL on failire
458  */
459 static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
460 {
461 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
462 	uint32_t *desc;
463 	/* TODO: Using % is expensive, but we have to do this since
464 	 * size of some SRNG rings is not power of 2 (due to descriptor
465 	 * sizes). Need to create separate API for rings used
466 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
467 	 * SW2RXDMA and CE rings)
468 	 */
469 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
470 		srng->ring_size;
471 
472 	if (next_hp != srng->u.dst_ring.tp) {
473 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
474 		srng->u.dst_ring.cached_hp = next_hp;
475 		return (void *)desc;
476 	}
477 
478 	return NULL;
479 }
480 
481 /**
482  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
483  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
484  * TODO: See if we need an optimized version of get_next that doesn't check for
485  * loop_cnt
486  *
487  * @hal_soc: Opaque HAL SOC handle
488  * @hal_ring: Destination ring pointer
489  *
490  * Return: Opaque pointer for next ring entry; NULL on failire
491  */
492 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
493 {
494 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
495 
496 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
497 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
498 
499 	return NULL;
500 }
501 
502 /**
503  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
504  * by SW) in destination ring
505  *
506  * @hal_soc: Opaque HAL SOC handle
507  * @hal_ring: Destination ring pointer
508  * @sync_hw_ptr: Sync cached head pointer with HW
509  *
510  */
511 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
512 	int sync_hw_ptr)
513 {
514 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
515 	uint32 hp;
516 	uint32 tp = srng->u.dst_ring.tp;
517 
518 	if (sync_hw_ptr) {
519 		hp = *(srng->u.dst_ring.hp_addr);
520 		srng->u.dst_ring.cached_hp = hp;
521 	} else {
522 		hp = srng->u.dst_ring.cached_hp;
523 	}
524 
525 	if (hp >= tp)
526 		return (hp - tp) / srng->entry_size;
527 	else
528 		return (srng->ring_size - tp + hp) / srng->entry_size;
529 }
530 
531 /**
532  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
533  * pointer. This can be used to release any buffers associated with completed
534  * ring entries. Note that this should not be used for posting new descriptor
535  * entries. Posting of new entries should be done only using
536  * hal_srng_src_get_next_reaped when this function is used for reaping.
537  *
538  * @hal_soc: Opaque HAL SOC handle
539  * @hal_ring: Source ring pointer
540  *
541  * Return: Opaque pointer for next ring entry; NULL on failire
542  */
543 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
544 {
545 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
546 	uint32_t *desc;
547 
548 	/* TODO: Using % is expensive, but we have to do this since
549 	 * size of some SRNG rings is not power of 2 (due to descriptor
550 	 * sizes). Need to create separate API for rings used
551 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
552 	 * SW2RXDMA and CE rings)
553 	 */
554 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
555 		srng->ring_size;
556 
557 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
558 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
559 		srng->u.src_ring.reap_hp = next_reap_hp;
560 		return (void *)desc;
561 	}
562 
563 	return NULL;
564 }
565 
566 /**
567  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
568  * already reaped using hal_srng_src_reap_next, for posting new entries to
569  * the ring
570  *
571  * @hal_soc: Opaque HAL SOC handle
572  * @hal_ring: Source ring pointer
573  *
574  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
575  */
576 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
577 {
578 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
579 	uint32_t *desc;
580 
581 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
582 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
583 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
584 			srng->ring_size;
585 
586 		return (void *)desc;
587 	}
588 
589 	return NULL;
590 }
591 
592 /**
593  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
594  * move reap pointer. This API is used in detach path to release any buffers
595  * associated with ring entries which are pending reap.
596  *
597  * @hal_soc: Opaque HAL SOC handle
598  * @hal_ring: Source ring pointer
599  *
600  * Return: Opaque pointer for next ring entry; NULL on failire
601  */
602 static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring)
603 {
604 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
605 	uint32_t *desc;
606 
607 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
608 		srng->ring_size;
609 
610 	if (next_reap_hp != srng->u.src_ring.hp) {
611 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
612 		srng->u.src_ring.reap_hp = next_reap_hp;
613 		return (void *)desc;
614 	}
615 
616 	return NULL;
617 }
618 
619 /**
620  * hal_srng_src_done_val -
621  *
622  * @hal_soc: Opaque HAL SOC handle
623  * @hal_ring: Source ring pointer
624  *
625  * Return: Opaque pointer for next ring entry; NULL on failire
626  */
627 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
628 {
629 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
630 	/* TODO: Using % is expensive, but we have to do this since
631 	 * size of some SRNG rings is not power of 2 (due to descriptor
632 	 * sizes). Need to create separate API for rings used
633 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
634 	 * SW2RXDMA and CE rings)
635 	 */
636 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
637 		srng->ring_size;
638 
639 	if (next_reap_hp == srng->u.src_ring.cached_tp)
640 		return 0;
641 
642 	if (srng->u.src_ring.cached_tp > next_reap_hp)
643 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
644 			srng->entry_size;
645 	else
646 		return ((srng->ring_size - next_reap_hp) +
647 			srng->u.src_ring.cached_tp) / srng->entry_size;
648 }
649 
650 /**
651  * hal_api_get_tphp - Get head and tail pointer location for any ring
652  * @hal_soc: Opaque HAL SOC handle
653  * @hal_ring: Source ring pointer
654  * @tailp: Tail Pointer
655  * @headp: Head Pointer
656  *
657  * Return: Update tail pointer and head pointer in arguments.
658  */
659 static inline void hal_api_get_tphp(void *hal_soc, void *hal_ring,
660 	uint32_t *tailp, uint32_t *headp)
661 {
662 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
663 
664 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
665 		*headp = srng->u.src_ring.hp / srng->entry_size;
666 		*tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size;
667 	} else {
668 		*tailp = srng->u.dst_ring.tp / srng->entry_size;
669 		*headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size;
670 	}
671 }
672 
673 /**
674  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
675  *
676  * @hal_soc: Opaque HAL SOC handle
677  * @hal_ring: Source ring pointer
678  *
679  * Return: Opaque pointer for next ring entry; NULL on failire
680  */
681 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
682 {
683 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
684 	uint32_t *desc;
685 	/* TODO: Using % is expensive, but we have to do this since
686 	 * size of some SRNG rings is not power of 2 (due to descriptor
687 	 * sizes). Need to create separate API for rings used
688 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
689 	 * SW2RXDMA and CE rings)
690 	 */
691 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
692 		srng->ring_size;
693 
694 	if (next_hp != srng->u.src_ring.cached_tp) {
695 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
696 		srng->u.src_ring.hp = next_hp;
697 		/* TODO: Since reap function is not used by all rings, we can
698 		 * remove the following update of reap_hp in this function
699 		 * if we can ensure that only hal_srng_src_get_next_reaped
700 		 * is used for the rings requiring reap functionality
701 		 */
702 		srng->u.src_ring.reap_hp = next_hp;
703 		return (void *)desc;
704 	}
705 
706 	return NULL;
707 }
708 
709 /**
710  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
711  * hal_srng_src_get_next should be called subsequently to move the head pointer
712  *
713  * @hal_soc: Opaque HAL SOC handle
714  * @hal_ring: Source ring pointer
715  *
716  * Return: Opaque pointer for next ring entry; NULL on failire
717  */
718 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
719 {
720 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
721 	uint32_t *desc;
722 
723 	/* TODO: Using % is expensive, but we have to do this since
724 	 * size of some SRNG rings is not power of 2 (due to descriptor
725 	 * sizes). Need to create separate API for rings used
726 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
727 	 * SW2RXDMA and CE rings)
728 	 */
729 	if (((srng->u.src_ring.hp + srng->entry_size) %
730 		srng->ring_size) != srng->u.src_ring.cached_tp) {
731 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
732 		return (void *)desc;
733 	}
734 
735 	return NULL;
736 }
737 
738 /**
739  * hal_srng_src_num_avail - Returns number of available entries in src ring
740  *
741  * @hal_soc: Opaque HAL SOC handle
742  * @hal_ring: Source ring pointer
743  * @sync_hw_ptr: Sync cached tail pointer with HW
744  *
745  */
746 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
747 	void *hal_ring, int sync_hw_ptr)
748 {
749 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
750 	uint32 tp;
751 	uint32 hp = srng->u.src_ring.hp;
752 
753 	if (sync_hw_ptr) {
754 		tp = *(srng->u.src_ring.tp_addr);
755 		srng->u.src_ring.cached_tp = tp;
756 	} else {
757 		tp = srng->u.src_ring.cached_tp;
758 	}
759 
760 	if (tp > hp)
761 		return ((tp - hp) / srng->entry_size) - 1;
762 	else
763 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
764 }
765 
766 /**
767  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
768  * ring head/tail pointers to HW.
769  * This should be used only if hal_srng_access_start_unlocked to start ring
770  * access
771  *
772  * @hal_soc: Opaque HAL SOC handle
773  * @hal_ring: Ring pointer (Source or Destination ring)
774  *
775  * Return: 0 on success; error on failire
776  */
777 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
778 {
779 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
780 
781 	/* TODO: See if we need a write memory barrier here */
782 	if (srng->flags & HAL_SRNG_LMAC_RING) {
783 		/* For LMAC rings, ring pointer updates are done through FW and
784 		 * hence written to a shared memory location that is read by FW
785 		 */
786 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
787 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
788 		} else {
789 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
790 		}
791 	} else {
792 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
793 			hal_write_address_32_mb(hal_soc,
794 				srng->u.src_ring.hp_addr,
795 				srng->u.src_ring.hp);
796 		else
797 			hal_write_address_32_mb(hal_soc,
798 				srng->u.dst_ring.tp_addr,
799 				srng->u.dst_ring.tp);
800 	}
801 }
802 
803 /**
804  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
805  * pointers to HW
806  * This should be used only if hal_srng_access_start to start ring access
807  *
808  * @hal_soc: Opaque HAL SOC handle
809  * @hal_ring: Ring pointer (Source or Destination ring)
810  *
811  * Return: 0 on success; error on failire
812  */
813 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
814 {
815 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
816 
817 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
818 	SRNG_UNLOCK(&(srng->lock));
819 }
820 
821 /**
822  * hal_srng_access_end_reap - Unlock ring access
823  * This should be used only if hal_srng_access_start to start ring access
824  * and should be used only while reaping SRC ring completions
825  *
826  * @hal_soc: Opaque HAL SOC handle
827  * @hal_ring: Ring pointer (Source or Destination ring)
828  *
829  * Return: 0 on success; error on failire
830  */
831 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
832 {
833 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
834 
835 	SRNG_UNLOCK(&(srng->lock));
836 }
837 
838 /* TODO: Check if the following definitions is available in HW headers */
839 #define WBM_IDLE_DESC_LIST 1
840 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
841 #define NUM_MPDUS_PER_LINK_DESC 6
842 #define NUM_MSDUS_PER_LINK_DESC 7
843 #define REO_QUEUE_DESC_ALIGN 128
844 
845 #define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
846 #define LINK_DESC_ALIGN 128
847 
848 #define ADDRESS_MATCH_TAG_VAL 0x5
849 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
850  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
851  */
852 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
853 
854 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
855  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
856  * should be specified in 16 word units. But the number of bits defined for
857  * this field in HW header files is 5.
858  */
859 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
860 
861 /**
862  * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
863  * HW structure
864  *
865  * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
866  * @cookie: SW cookie for the buffer/descriptor
867  * @link_desc_paddr: Physical address of link descriptor entry
868  *
869  */
870 static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
871 	qdf_dma_addr_t link_desc_paddr)
872 {
873 	uint32_t *buf_addr = (uint32_t *)desc;
874 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
875 		link_desc_paddr & 0xffffffff);
876 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
877 		(uint64_t)link_desc_paddr >> 32);
878 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
879 		WBM_IDLE_DESC_LIST);
880 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
881 		cookie);
882 }
883 
884 /**
885  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
886  * in an idle list
887  *
888  * @hal_soc: Opaque HAL SOC handle
889  *
890  */
891 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
892 {
893 	return WBM_IDLE_SCATTER_BUF_SIZE;
894 }
895 
896 /**
897  * hal_get_link_desc_size - Get the size of each link descriptor
898  *
899  * @hal_soc: Opaque HAL SOC handle
900  *
901  */
902 static inline uint32_t hal_get_link_desc_size(void *hal_soc)
903 {
904 	return LINK_DESC_SIZE;
905 }
906 
907 /**
908  * hal_get_link_desc_align - Get the required start address alignment for
909  * link descriptors
910  *
911  * @hal_soc: Opaque HAL SOC handle
912  *
913  */
914 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
915 {
916 	return LINK_DESC_ALIGN;
917 }
918 
919 /**
920  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
921  *
922  * @hal_soc: Opaque HAL SOC handle
923  *
924  */
925 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
926 {
927 	return NUM_MPDUS_PER_LINK_DESC;
928 }
929 
930 /**
931  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
932  *
933  * @hal_soc: Opaque HAL SOC handle
934  *
935  */
936 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
937 {
938 	return NUM_MSDUS_PER_LINK_DESC;
939 }
940 
941 /**
942  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
943  * descriptor can hold
944  *
945  * @hal_soc: Opaque HAL SOC handle
946  *
947  */
948 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
949 {
950 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
951 }
952 
953 /**
954  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
955  * that the given buffer size
956  *
957  * @hal_soc: Opaque HAL SOC handle
958  * @scatter_buf_size: Size of scatter buffer
959  *
960  */
961 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
962 	uint32_t scatter_buf_size)
963 {
964 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
965 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
966 }
967 
968 /**
969  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
970  * each given buffer size
971  *
972  * @hal_soc: Opaque HAL SOC handle
973  * @total_mem: size of memory to be scattered
974  * @scatter_buf_size: Size of scatter buffer
975  *
976  */
977 static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc,
978 	uint32_t total_mem, uint32_t scatter_buf_size)
979 {
980 	uint8_t rem = (total_mem % (scatter_buf_size -
981 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
982 
983 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
984 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
985 
986 	return num_scatter_bufs;
987 }
988 
989 /**
990  * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
991  * provided
992  *
993  * @hal_soc: Opaque HAL SOC handle
994  * @idle_scatter_bufs_base_paddr: Array of physical base addresses
995  * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
996  * @num_scatter_bufs: Number of scatter buffers in the above lists
997  * @scatter_buf_size: Size of each scatter buffer
998  * @last_buf_end_offset: Offset to the last entry
999  * @num_entries: Total entries of all scatter bufs
1000  *
1001  */
1002 extern void hal_setup_link_idle_list(void *hal_soc,
1003 	qdf_dma_addr_t scatter_bufs_base_paddr[],
1004 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
1005 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset,
1006 	uint32_t num_entries);
1007 
1008 /* REO parameters to be passed to hal_reo_setup */
1009 struct hal_reo_params {
1010 	/** rx hash steering enabled or disabled */
1011 	bool rx_hash_enabled;
1012 	/** reo remap 1 register */
1013 	uint32_t remap1;
1014 	/** reo remap 2 register */
1015 	uint32_t remap2;
1016 	/** fragment destination ring */
1017 	uint8_t frag_dst_ring;
1018 	/** padding */
1019 	uint8_t padding[3];
1020 };
1021 
1022 /**
1023  * hal_reo_setup - Initialize HW REO block
1024  *
1025  * @hal_soc: Opaque HAL SOC handle
1026  * @reo_params: parameters needed by HAL for REO config
1027  */
1028 extern void hal_reo_setup(void *hal_soc,
1029 	 struct hal_reo_params *reo_params);
1030 
1031 enum hal_pn_type {
1032 	HAL_PN_NONE,
1033 	HAL_PN_WPA,
1034 	HAL_PN_WAPI_EVEN,
1035 	HAL_PN_WAPI_UNEVEN,
1036 };
1037 
1038 #define HAL_RX_MAX_BA_WINDOW 256
1039 /**
1040  * hal_get_reo_qdesc_size - Get size of reo queue descriptor
1041  *
1042  * @hal_soc: Opaque HAL SOC handle
1043  * @ba_window_size: BlockAck window size
1044  *
1045  */
1046 static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
1047 	uint32_t ba_window_size)
1048 {
1049 	if (ba_window_size <= 1)
1050 		return sizeof(struct rx_reo_queue);
1051 
1052 	if (ba_window_size <= 105)
1053 		return sizeof(struct rx_reo_queue) +
1054 			sizeof(struct rx_reo_queue_ext);
1055 
1056 	if (ba_window_size <= 210)
1057 		return sizeof(struct rx_reo_queue) +
1058 			(2 * sizeof(struct rx_reo_queue_ext));
1059 
1060 	return sizeof(struct rx_reo_queue) +
1061 		(3 * sizeof(struct rx_reo_queue_ext));
1062 }
1063 
1064 /**
1065  * hal_get_reo_qdesc_align - Get start address alignment for reo
1066  * queue descriptors
1067  *
1068  * @hal_soc: Opaque HAL SOC handle
1069  *
1070  */
1071 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
1072 {
1073 	return REO_QUEUE_DESC_ALIGN;
1074 }
1075 
1076 /**
1077  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1078  *
1079  * @hal_soc: Opaque HAL SOC handle
1080  * @ba_window_size: BlockAck window size
1081  * @start_seq: Starting sequence number
1082  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1083  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1084  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1085  *
1086  */
1087 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
1088 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
1089 	int pn_type);
1090 
1091 /**
1092  * hal_srng_get_hp_addr - Get head pointer physical address
1093  *
1094  * @hal_soc: Opaque HAL SOC handle
1095  * @hal_ring: Ring pointer (Source or Destination ring)
1096  *
1097  */
1098 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
1099 {
1100 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1101 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1102 
1103 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1104 		return hal->shadow_wrptr_mem_paddr +
1105 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1106 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1107 	} else {
1108 		return hal->shadow_rdptr_mem_paddr +
1109 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1110 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1111 	}
1112 }
1113 
1114 /**
1115  * hal_srng_get_tp_addr - Get tail pointer physical address
1116  *
1117  * @hal_soc: Opaque HAL SOC handle
1118  * @hal_ring: Ring pointer (Source or Destination ring)
1119  *
1120  */
1121 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
1122 {
1123 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1124 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1125 
1126 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1127 		return hal->shadow_rdptr_mem_paddr +
1128 			((unsigned long)(srng->u.src_ring.tp_addr) -
1129 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1130 	} else {
1131 		return hal->shadow_wrptr_mem_paddr +
1132 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1133 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1134 	}
1135 }
1136 
1137 /**
1138  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1139  *
1140  * @hal_soc: Opaque HAL SOC handle
1141  * @hal_ring: Ring pointer (Source or Destination ring)
1142  * @ring_params: SRNG parameters will be returned through this structure
1143  */
1144 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
1145 	struct hal_srng_params *ring_params);
1146 
1147 /**
1148  * hal_mem_info - Retrieve hal memory base address
1149  *
1150  * @hal_soc: Opaque HAL SOC handle
1151  * @mem: pointer to structure to be updated with hal mem info
1152  */
1153 extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
1154 #endif /* _HAL_APIH_ */
1155