xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision dae10a5fbc53d54c53c4ba24fa018ad8b1e7c008)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef _HAL_API_H_
20 #define _HAL_API_H_
21 
22 #include "qdf_types.h"
23 #include "qdf_util.h"
24 #include "hal_internal.h"
25 
26 #define MAX_UNWINDOWED_ADDRESS 0x80000
27 #ifdef QCA_WIFI_QCA6390
28 #define WINDOW_ENABLE_BIT 0x40000000
29 #else
30 #define WINDOW_ENABLE_BIT 0x80000000
31 #endif
32 #define WINDOW_REG_ADDRESS 0x310C
33 #define WINDOW_SHIFT 19
34 #define WINDOW_VALUE_MASK 0x3F
35 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
36 #define WINDOW_RANGE_MASK 0x7FFFF
37 
38 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
39 {
40 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
41 	if (window != hal_soc->register_window) {
42 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
43 			      WINDOW_ENABLE_BIT | window);
44 		hal_soc->register_window = window;
45 	}
46 }
47 
48 /**
49  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
50  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
51  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
52  *				would be a bug
53  */
54 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
55 				  uint32_t value)
56 {
57 
58 	if (!hal_soc->use_register_windowing ||
59 	    offset < MAX_UNWINDOWED_ADDRESS) {
60 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
61 	} else {
62 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
63 		hal_select_window(hal_soc, offset);
64 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
65 			  (offset & WINDOW_RANGE_MASK), value);
66 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
67 	}
68 }
69 
70 /**
71  * hal_write_address_32_mb - write a value to a register
72  *
73  */
74 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
75 					   void __iomem *addr, uint32_t value)
76 {
77 	uint32_t offset;
78 
79 	if (!hal_soc->use_register_windowing)
80 		return qdf_iowrite32(addr, value);
81 
82 	offset = addr - hal_soc->dev_base_addr;
83 	hal_write32_mb(hal_soc, offset, value);
84 }
85 
86 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
87 {
88 	uint32_t ret;
89 
90 	if (!hal_soc->use_register_windowing ||
91 	    offset < MAX_UNWINDOWED_ADDRESS) {
92 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
93 	}
94 
95 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
96 	hal_select_window(hal_soc, offset);
97 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
98 		       (offset & WINDOW_RANGE_MASK));
99 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
100 
101 	return ret;
102 }
103 
104 #include "hif_io32.h"
105 
106 /**
107  * hal_attach - Initialize HAL layer
108  * @hif_handle: Opaque HIF handle
109  * @qdf_dev: QDF device
110  *
111  * Return: Opaque HAL SOC handle
112  *		 NULL on failure (if given ring is not available)
113  *
114  * This function should be called as part of HIF initialization (for accessing
115  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
116  */
117 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
118 
119 /**
120  * hal_detach - Detach HAL layer
121  * @hal_soc: HAL SOC handle
122  *
123  * This function should be called as part of HIF detach
124  *
125  */
126 extern void hal_detach(void *hal_soc);
127 
128 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
129 enum hal_ring_type {
130 	REO_DST,
131 	REO_EXCEPTION,
132 	REO_REINJECT,
133 	REO_CMD,
134 	REO_STATUS,
135 	TCL_DATA,
136 	TCL_CMD,
137 	TCL_STATUS,
138 	CE_SRC,
139 	CE_DST,
140 	CE_DST_STATUS,
141 	WBM_IDLE_LINK,
142 	SW2WBM_RELEASE,
143 	WBM2SW_RELEASE,
144 	RXDMA_BUF,
145 	RXDMA_DST,
146 	RXDMA_MONITOR_BUF,
147 	RXDMA_MONITOR_STATUS,
148 	RXDMA_MONITOR_DST,
149 	RXDMA_MONITOR_DESC,
150 	DIR_BUF_RX_DMA_SRC,
151 #ifdef WLAN_FEATURE_CIF_CFR
152 	WIFI_POS_SRC,
153 #endif
154 	MAX_RING_TYPES
155 };
156 
157 #define HAL_SRNG_LMAC_RING 0x80000000
158 /* SRNG flags passed in hal_srng_params.flags */
159 #define HAL_SRNG_MSI_SWAP				0x00000008
160 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
161 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
162 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
163 #define HAL_SRNG_MSI_INTR				0x00020000
164 
165 #define PN_SIZE_24 0
166 #define PN_SIZE_48 1
167 #define PN_SIZE_128 2
168 
169 /**
170  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
171  * used by callers for calculating the size of memory to be allocated before
172  * calling hal_srng_setup to setup the ring
173  *
174  * @hal_soc: Opaque HAL SOC handle
175  * @ring_type: one of the types from hal_ring_type
176  *
177  */
178 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
179 
180 /**
181  * hal_srng_max_entries - Returns maximum possible number of ring entries
182  * @hal_soc: Opaque HAL SOC handle
183  * @ring_type: one of the types from hal_ring_type
184  *
185  * Return: Maximum number of entries for the given ring_type
186  */
187 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
188 
189 /**
190  * hal_srng_dump - Dump ring status
191  * @srng: hal srng pointer
192  */
193 void hal_srng_dump(struct hal_srng *srng);
194 
195 /**
196  * hal_srng_get_dir - Returns the direction of the ring
197  * @hal_soc: Opaque HAL SOC handle
198  * @ring_type: one of the types from hal_ring_type
199  *
200  * Return: Ring direction
201  */
202 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
203 
204 /* HAL memory information */
205 struct hal_mem_info {
206 	/* dev base virutal addr */
207 	void *dev_base_addr;
208 	/* dev base physical addr */
209 	void *dev_base_paddr;
210 	/* Remote virtual pointer memory for HW/FW updates */
211 	void *shadow_rdptr_mem_vaddr;
212 	/* Remote physical pointer memory for HW/FW updates */
213 	void *shadow_rdptr_mem_paddr;
214 	/* Shared memory for ring pointer updates from host to FW */
215 	void *shadow_wrptr_mem_vaddr;
216 	/* Shared physical memory for ring pointer updates from host to FW */
217 	void *shadow_wrptr_mem_paddr;
218 };
219 
220 /* SRNG parameters to be passed to hal_srng_setup */
221 struct hal_srng_params {
222 	/* Physical base address of the ring */
223 	qdf_dma_addr_t ring_base_paddr;
224 	/* Virtual base address of the ring */
225 	void *ring_base_vaddr;
226 	/* Number of entries in ring */
227 	uint32_t num_entries;
228 	/* max transfer length */
229 	uint16_t max_buffer_length;
230 	/* MSI Address */
231 	qdf_dma_addr_t msi_addr;
232 	/* MSI data */
233 	uint32_t msi_data;
234 	/* Interrupt timer threshold – in micro seconds */
235 	uint32_t intr_timer_thres_us;
236 	/* Interrupt batch counter threshold – in number of ring entries */
237 	uint32_t intr_batch_cntr_thres_entries;
238 	/* Low threshold – in number of ring entries
239 	 * (valid for src rings only)
240 	 */
241 	uint32_t low_threshold;
242 	/* Misc flags */
243 	uint32_t flags;
244 	/* Unique ring id */
245 	uint8_t ring_id;
246 	/* Source or Destination ring */
247 	enum hal_srng_dir ring_dir;
248 	/* Size of ring entry */
249 	uint32_t entry_size;
250 	/* hw register base address */
251 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
252 };
253 
254 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
255  * @hal_soc: hal handle
256  *
257  * Return: QDF_STATUS_OK on success
258  */
259 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
260 
261 /* hal_set_one_shadow_config() - add a config for the specified ring
262  * @hal_soc: hal handle
263  * @ring_type: ring type
264  * @ring_num: ring num
265  *
266  * The ring type and ring num uniquely specify the ring.  After this call,
267  * the hp/tp will be added as the next entry int the shadow register
268  * configuration table.  The hal code will use the shadow register address
269  * in place of the hp/tp address.
270  *
271  * This function is exposed, so that the CE module can skip configuring shadow
272  * registers for unused ring and rings assigned to the firmware.
273  *
274  * Return: QDF_STATUS_OK on success
275  */
276 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
277 					    int ring_num);
278 /**
279  * hal_get_shadow_config() - retrieve the config table
280  * @hal_soc: hal handle
281  * @shadow_config: will point to the table after
282  * @num_shadow_registers_configured: will contain the number of valid entries
283  */
284 extern void hal_get_shadow_config(void *hal_soc,
285 				  struct pld_shadow_reg_v2_cfg **shadow_config,
286 				  int *num_shadow_registers_configured);
287 /**
288  * hal_srng_setup - Initialize HW SRNG ring.
289  *
290  * @hal_soc: Opaque HAL SOC handle
291  * @ring_type: one of the types from hal_ring_type
292  * @ring_num: Ring number if there are multiple rings of
293  *		same type (staring from 0)
294  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
295  * @ring_params: SRNG ring params in hal_srng_params structure.
296 
297  * Callers are expected to allocate contiguous ring memory of size
298  * 'num_entries * entry_size' bytes and pass the physical and virtual base
299  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
300  * structure. Ring base address should be 8 byte aligned and size of each ring
301  * entry should be queried using the API hal_srng_get_entrysize
302  *
303  * Return: Opaque pointer to ring on success
304  *		 NULL on failure (if given ring is not available)
305  */
306 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
307 	int mac_id, struct hal_srng_params *ring_params);
308 
309 /* Remapping ids of REO rings */
310 #define REO_REMAP_TCL 0
311 #define REO_REMAP_SW1 1
312 #define REO_REMAP_SW2 2
313 #define REO_REMAP_SW3 3
314 #define REO_REMAP_SW4 4
315 #define REO_REMAP_RELEASE 5
316 #define REO_REMAP_FW 6
317 #define REO_REMAP_UNUSED 7
318 
319 /*
320  * currently this macro only works for IX0 since all the rings we are remapping
321  * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
322  */
323 #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
324 	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
325 /* allow the destination macros to be expanded */
326 #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
327 	(_NEW_DEST << \
328 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
329 	  _ORIGINAL_DEST ## _SHFT))
330 
331 /**
332  * hal_reo_remap_IX0 - Remap REO ring destination
333  * @hal: HAL SOC handle
334  * @remap_val: Remap value
335  */
336 extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val);
337 
338 /**
339  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
340  * @sring: sring pointer
341  * @paddr: physical address
342  */
343 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
344 
345 /**
346  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
347  * @srng: sring pointer
348  * @vaddr: virtual address
349  */
350 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
351 
352 /**
353  * hal_srng_cleanup - Deinitialize HW SRNG ring.
354  * @hal_soc: Opaque HAL SOC handle
355  * @hal_srng: Opaque HAL SRNG pointer
356  */
357 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
358 
359 static inline bool hal_srng_initialized(void *hal_ring)
360 {
361 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
362 
363 	return !!srng->initialized;
364 }
365 
366 /**
367  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
368  * hal_srng_access_start if locked access is required
369  *
370  * @hal_soc: Opaque HAL SOC handle
371  * @hal_ring: Ring pointer (Source or Destination ring)
372  *
373  * Return: 0 on success; error on failire
374  */
375 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
376 {
377 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
378 
379 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
380 		srng->u.src_ring.cached_tp =
381 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
382 	else
383 		srng->u.dst_ring.cached_hp =
384 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
385 
386 	return 0;
387 }
388 
389 /**
390  * hal_srng_access_start - Start (locked) ring access
391  *
392  * @hal_soc: Opaque HAL SOC handle
393  * @hal_ring: Ring pointer (Source or Destination ring)
394  *
395  * Return: 0 on success; error on failire
396  */
397 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
398 {
399 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
400 
401 	if (qdf_unlikely(!hal_ring)) {
402 		qdf_print("Error: Invalid hal_ring\n");
403 		return -EINVAL;
404 	}
405 
406 	SRNG_LOCK(&(srng->lock));
407 
408 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
409 }
410 
411 /**
412  * hal_srng_dst_get_next - Get next entry from a destination ring and move
413  * cached tail pointer
414  *
415  * @hal_soc: Opaque HAL SOC handle
416  * @hal_ring: Destination ring pointer
417  *
418  * Return: Opaque pointer for next ring entry; NULL on failire
419  */
420 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
421 {
422 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
423 	uint32_t *desc;
424 
425 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
426 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
427 		/* TODO: Using % is expensive, but we have to do this since
428 		 * size of some SRNG rings is not power of 2 (due to descriptor
429 		 * sizes). Need to create separate API for rings used
430 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
431 		 * SW2RXDMA and CE rings)
432 		 */
433 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
434 			srng->ring_size;
435 
436 		return (void *)desc;
437 	}
438 
439 	return NULL;
440 }
441 
442 /**
443  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
444  * cached head pointer
445  *
446  * @hal_soc: Opaque HAL SOC handle
447  * @hal_ring: Destination ring pointer
448  *
449  * Return: Opaque pointer for next ring entry; NULL on failire
450  */
451 static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
452 {
453 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
454 	uint32_t *desc;
455 	/* TODO: Using % is expensive, but we have to do this since
456 	 * size of some SRNG rings is not power of 2 (due to descriptor
457 	 * sizes). Need to create separate API for rings used
458 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
459 	 * SW2RXDMA and CE rings)
460 	 */
461 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
462 		srng->ring_size;
463 
464 	if (next_hp != srng->u.dst_ring.tp) {
465 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
466 		srng->u.dst_ring.cached_hp = next_hp;
467 		return (void *)desc;
468 	}
469 
470 	return NULL;
471 }
472 
473 /**
474  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
475  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
476  * TODO: See if we need an optimized version of get_next that doesn't check for
477  * loop_cnt
478  *
479  * @hal_soc: Opaque HAL SOC handle
480  * @hal_ring: Destination ring pointer
481  *
482  * Return: Opaque pointer for next ring entry; NULL on failire
483  */
484 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
485 {
486 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
487 
488 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
489 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
490 
491 	return NULL;
492 }
493 
494 /**
495  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
496  * by SW) in destination ring
497  *
498  * @hal_soc: Opaque HAL SOC handle
499  * @hal_ring: Destination ring pointer
500  * @sync_hw_ptr: Sync cached head pointer with HW
501  *
502  */
503 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
504 	int sync_hw_ptr)
505 {
506 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
507 	uint32_t hp;
508 	uint32_t tp = srng->u.dst_ring.tp;
509 
510 	if (sync_hw_ptr) {
511 		hp = *(srng->u.dst_ring.hp_addr);
512 		srng->u.dst_ring.cached_hp = hp;
513 	} else {
514 		hp = srng->u.dst_ring.cached_hp;
515 	}
516 
517 	if (hp >= tp)
518 		return (hp - tp) / srng->entry_size;
519 	else
520 		return (srng->ring_size - tp + hp) / srng->entry_size;
521 }
522 
523 /**
524  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
525  * pointer. This can be used to release any buffers associated with completed
526  * ring entries. Note that this should not be used for posting new descriptor
527  * entries. Posting of new entries should be done only using
528  * hal_srng_src_get_next_reaped when this function is used for reaping.
529  *
530  * @hal_soc: Opaque HAL SOC handle
531  * @hal_ring: Source ring pointer
532  *
533  * Return: Opaque pointer for next ring entry; NULL on failire
534  */
535 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
536 {
537 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
538 	uint32_t *desc;
539 
540 	/* TODO: Using % is expensive, but we have to do this since
541 	 * size of some SRNG rings is not power of 2 (due to descriptor
542 	 * sizes). Need to create separate API for rings used
543 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
544 	 * SW2RXDMA and CE rings)
545 	 */
546 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
547 		srng->ring_size;
548 
549 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
550 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
551 		srng->u.src_ring.reap_hp = next_reap_hp;
552 		return (void *)desc;
553 	}
554 
555 	return NULL;
556 }
557 
558 /**
559  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
560  * already reaped using hal_srng_src_reap_next, for posting new entries to
561  * the ring
562  *
563  * @hal_soc: Opaque HAL SOC handle
564  * @hal_ring: Source ring pointer
565  *
566  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
567  */
568 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
569 {
570 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
571 	uint32_t *desc;
572 
573 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
574 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
575 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
576 			srng->ring_size;
577 
578 		return (void *)desc;
579 	}
580 
581 	return NULL;
582 }
583 
584 /**
585  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
586  * move reap pointer. This API is used in detach path to release any buffers
587  * associated with ring entries which are pending reap.
588  *
589  * @hal_soc: Opaque HAL SOC handle
590  * @hal_ring: Source ring pointer
591  *
592  * Return: Opaque pointer for next ring entry; NULL on failire
593  */
594 static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring)
595 {
596 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
597 	uint32_t *desc;
598 
599 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
600 		srng->ring_size;
601 
602 	if (next_reap_hp != srng->u.src_ring.hp) {
603 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
604 		srng->u.src_ring.reap_hp = next_reap_hp;
605 		return (void *)desc;
606 	}
607 
608 	return NULL;
609 }
610 
611 /**
612  * hal_srng_src_done_val -
613  *
614  * @hal_soc: Opaque HAL SOC handle
615  * @hal_ring: Source ring pointer
616  *
617  * Return: Opaque pointer for next ring entry; NULL on failire
618  */
619 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
620 {
621 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
622 	/* TODO: Using % is expensive, but we have to do this since
623 	 * size of some SRNG rings is not power of 2 (due to descriptor
624 	 * sizes). Need to create separate API for rings used
625 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
626 	 * SW2RXDMA and CE rings)
627 	 */
628 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
629 		srng->ring_size;
630 
631 	if (next_reap_hp == srng->u.src_ring.cached_tp)
632 		return 0;
633 
634 	if (srng->u.src_ring.cached_tp > next_reap_hp)
635 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
636 			srng->entry_size;
637 	else
638 		return ((srng->ring_size - next_reap_hp) +
639 			srng->u.src_ring.cached_tp) / srng->entry_size;
640 }
641 
642 /**
643  * hal_api_get_tphp - Get head and tail pointer location for any ring
644  * @hal_soc: Opaque HAL SOC handle
645  * @hal_ring: Source ring pointer
646  * @tailp: Tail Pointer
647  * @headp: Head Pointer
648  *
649  * Return: Update tail pointer and head pointer in arguments.
650  */
651 static inline void hal_api_get_tphp(void *hal_soc, void *hal_ring,
652 	uint32_t *tailp, uint32_t *headp)
653 {
654 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
655 
656 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
657 		*headp = srng->u.src_ring.hp / srng->entry_size;
658 		*tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size;
659 	} else {
660 		*tailp = srng->u.dst_ring.tp / srng->entry_size;
661 		*headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size;
662 	}
663 }
664 
665 /**
666  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
667  *
668  * @hal_soc: Opaque HAL SOC handle
669  * @hal_ring: Source ring pointer
670  *
671  * Return: Opaque pointer for next ring entry; NULL on failire
672  */
673 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
674 {
675 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
676 	uint32_t *desc;
677 	/* TODO: Using % is expensive, but we have to do this since
678 	 * size of some SRNG rings is not power of 2 (due to descriptor
679 	 * sizes). Need to create separate API for rings used
680 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
681 	 * SW2RXDMA and CE rings)
682 	 */
683 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
684 		srng->ring_size;
685 
686 	if (next_hp != srng->u.src_ring.cached_tp) {
687 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
688 		srng->u.src_ring.hp = next_hp;
689 		/* TODO: Since reap function is not used by all rings, we can
690 		 * remove the following update of reap_hp in this function
691 		 * if we can ensure that only hal_srng_src_get_next_reaped
692 		 * is used for the rings requiring reap functionality
693 		 */
694 		srng->u.src_ring.reap_hp = next_hp;
695 		return (void *)desc;
696 	}
697 
698 	return NULL;
699 }
700 
701 /**
702  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
703  * hal_srng_src_get_next should be called subsequently to move the head pointer
704  *
705  * @hal_soc: Opaque HAL SOC handle
706  * @hal_ring: Source ring pointer
707  *
708  * Return: Opaque pointer for next ring entry; NULL on failire
709  */
710 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
711 {
712 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
713 	uint32_t *desc;
714 
715 	/* TODO: Using % is expensive, but we have to do this since
716 	 * size of some SRNG rings is not power of 2 (due to descriptor
717 	 * sizes). Need to create separate API for rings used
718 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
719 	 * SW2RXDMA and CE rings)
720 	 */
721 	if (((srng->u.src_ring.hp + srng->entry_size) %
722 		srng->ring_size) != srng->u.src_ring.cached_tp) {
723 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
724 		return (void *)desc;
725 	}
726 
727 	return NULL;
728 }
729 
730 /**
731  * hal_srng_src_num_avail - Returns number of available entries in src ring
732  *
733  * @hal_soc: Opaque HAL SOC handle
734  * @hal_ring: Source ring pointer
735  * @sync_hw_ptr: Sync cached tail pointer with HW
736  *
737  */
738 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
739 	void *hal_ring, int sync_hw_ptr)
740 {
741 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
742 	uint32_t tp;
743 	uint32_t hp = srng->u.src_ring.hp;
744 
745 	if (sync_hw_ptr) {
746 		tp = *(srng->u.src_ring.tp_addr);
747 		srng->u.src_ring.cached_tp = tp;
748 	} else {
749 		tp = srng->u.src_ring.cached_tp;
750 	}
751 
752 	if (tp > hp)
753 		return ((tp - hp) / srng->entry_size) - 1;
754 	else
755 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
756 }
757 
758 /**
759  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
760  * ring head/tail pointers to HW.
761  * This should be used only if hal_srng_access_start_unlocked to start ring
762  * access
763  *
764  * @hal_soc: Opaque HAL SOC handle
765  * @hal_ring: Ring pointer (Source or Destination ring)
766  *
767  * Return: 0 on success; error on failire
768  */
769 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
770 {
771 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
772 
773 	/* TODO: See if we need a write memory barrier here */
774 	if (srng->flags & HAL_SRNG_LMAC_RING) {
775 		/* For LMAC rings, ring pointer updates are done through FW and
776 		 * hence written to a shared memory location that is read by FW
777 		 */
778 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
779 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
780 		} else {
781 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
782 		}
783 	} else {
784 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
785 			hal_write_address_32_mb(hal_soc,
786 				srng->u.src_ring.hp_addr,
787 				srng->u.src_ring.hp);
788 		else
789 			hal_write_address_32_mb(hal_soc,
790 				srng->u.dst_ring.tp_addr,
791 				srng->u.dst_ring.tp);
792 	}
793 }
794 
795 /**
796  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
797  * pointers to HW
798  * This should be used only if hal_srng_access_start to start ring access
799  *
800  * @hal_soc: Opaque HAL SOC handle
801  * @hal_ring: Ring pointer (Source or Destination ring)
802  *
803  * Return: 0 on success; error on failire
804  */
805 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
806 {
807 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
808 
809 	if (qdf_unlikely(!hal_ring)) {
810 		qdf_print("Error: Invalid hal_ring\n");
811 		return;
812 	}
813 
814 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
815 	SRNG_UNLOCK(&(srng->lock));
816 }
817 
818 /**
819  * hal_srng_access_end_reap - Unlock ring access
820  * This should be used only if hal_srng_access_start to start ring access
821  * and should be used only while reaping SRC ring completions
822  *
823  * @hal_soc: Opaque HAL SOC handle
824  * @hal_ring: Ring pointer (Source or Destination ring)
825  *
826  * Return: 0 on success; error on failire
827  */
828 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
829 {
830 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
831 
832 	SRNG_UNLOCK(&(srng->lock));
833 }
834 
835 /* TODO: Check if the following definitions is available in HW headers */
836 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
837 #define NUM_MPDUS_PER_LINK_DESC 6
838 #define NUM_MSDUS_PER_LINK_DESC 7
839 #define REO_QUEUE_DESC_ALIGN 128
840 
841 #define LINK_DESC_ALIGN 128
842 
843 #define ADDRESS_MATCH_TAG_VAL 0x5
844 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
845  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
846  */
847 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
848 
849 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
850  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
851  * should be specified in 16 word units. But the number of bits defined for
852  * this field in HW header files is 5.
853  */
854 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
855 
856 
857 /**
858  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
859  * in an idle list
860  *
861  * @hal_soc: Opaque HAL SOC handle
862  *
863  */
864 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
865 {
866 	return WBM_IDLE_SCATTER_BUF_SIZE;
867 }
868 
869 /**
870  * hal_get_link_desc_size - Get the size of each link descriptor
871  *
872  * @hal_soc: Opaque HAL SOC handle
873  *
874  */
875 static inline uint32_t hal_get_link_desc_size(struct hal_soc *hal_soc)
876 {
877 	if (!hal_soc || !hal_soc->ops) {
878 		qdf_print("Error: Invalid ops\n");
879 		QDF_BUG(0);
880 		return -EINVAL;
881 	}
882 	if (!hal_soc->ops->hal_get_link_desc_size) {
883 		qdf_print("Error: Invalid function pointer\n");
884 		QDF_BUG(0);
885 		return -EINVAL;
886 	}
887 	return hal_soc->ops->hal_get_link_desc_size();
888 }
889 
890 /**
891  * hal_get_link_desc_align - Get the required start address alignment for
892  * link descriptors
893  *
894  * @hal_soc: Opaque HAL SOC handle
895  *
896  */
897 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
898 {
899 	return LINK_DESC_ALIGN;
900 }
901 
902 /**
903  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
904  *
905  * @hal_soc: Opaque HAL SOC handle
906  *
907  */
908 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
909 {
910 	return NUM_MPDUS_PER_LINK_DESC;
911 }
912 
913 /**
914  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
915  *
916  * @hal_soc: Opaque HAL SOC handle
917  *
918  */
919 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
920 {
921 	return NUM_MSDUS_PER_LINK_DESC;
922 }
923 
924 /**
925  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
926  * descriptor can hold
927  *
928  * @hal_soc: Opaque HAL SOC handle
929  *
930  */
931 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
932 {
933 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
934 }
935 
936 /**
937  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
938  * that the given buffer size
939  *
940  * @hal_soc: Opaque HAL SOC handle
941  * @scatter_buf_size: Size of scatter buffer
942  *
943  */
944 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
945 	uint32_t scatter_buf_size)
946 {
947 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
948 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
949 }
950 
951 /**
952  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
953  * each given buffer size
954  *
955  * @hal_soc: Opaque HAL SOC handle
956  * @total_mem: size of memory to be scattered
957  * @scatter_buf_size: Size of scatter buffer
958  *
959  */
960 static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc,
961 	uint32_t total_mem, uint32_t scatter_buf_size)
962 {
963 	uint8_t rem = (total_mem % (scatter_buf_size -
964 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
965 
966 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
967 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
968 
969 	return num_scatter_bufs;
970 }
971 
972 /* REO parameters to be passed to hal_reo_setup */
973 struct hal_reo_params {
974 	/** rx hash steering enabled or disabled */
975 	bool rx_hash_enabled;
976 	/** reo remap 1 register */
977 	uint32_t remap1;
978 	/** reo remap 2 register */
979 	uint32_t remap2;
980 	/** fragment destination ring */
981 	uint8_t frag_dst_ring;
982 	/** padding */
983 	uint8_t padding[3];
984 };
985 
986 
987 enum hal_pn_type {
988 	HAL_PN_NONE,
989 	HAL_PN_WPA,
990 	HAL_PN_WAPI_EVEN,
991 	HAL_PN_WAPI_UNEVEN,
992 };
993 
994 #define HAL_RX_MAX_BA_WINDOW 256
995 
996 /**
997  * hal_get_reo_qdesc_align - Get start address alignment for reo
998  * queue descriptors
999  *
1000  * @hal_soc: Opaque HAL SOC handle
1001  *
1002  */
1003 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
1004 {
1005 	return REO_QUEUE_DESC_ALIGN;
1006 }
1007 
1008 /**
1009  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1010  *
1011  * @hal_soc: Opaque HAL SOC handle
1012  * @ba_window_size: BlockAck window size
1013  * @start_seq: Starting sequence number
1014  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1015  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1016  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1017  *
1018  */
1019 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
1020 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
1021 	int pn_type);
1022 
1023 /**
1024  * hal_srng_get_hp_addr - Get head pointer physical address
1025  *
1026  * @hal_soc: Opaque HAL SOC handle
1027  * @hal_ring: Ring pointer (Source or Destination ring)
1028  *
1029  */
1030 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
1031 {
1032 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1033 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1034 
1035 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1036 		return hal->shadow_wrptr_mem_paddr +
1037 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1038 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1039 	} else {
1040 		return hal->shadow_rdptr_mem_paddr +
1041 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1042 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1043 	}
1044 }
1045 
1046 /**
1047  * hal_srng_get_tp_addr - Get tail pointer physical address
1048  *
1049  * @hal_soc: Opaque HAL SOC handle
1050  * @hal_ring: Ring pointer (Source or Destination ring)
1051  *
1052  */
1053 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
1054 {
1055 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1056 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1057 
1058 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1059 		return hal->shadow_rdptr_mem_paddr +
1060 			((unsigned long)(srng->u.src_ring.tp_addr) -
1061 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1062 	} else {
1063 		return hal->shadow_wrptr_mem_paddr +
1064 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1065 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1066 	}
1067 }
1068 
1069 /**
1070  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1071  *
1072  * @hal_soc: Opaque HAL SOC handle
1073  * @hal_ring: Ring pointer (Source or Destination ring)
1074  * @ring_params: SRNG parameters will be returned through this structure
1075  */
1076 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
1077 	struct hal_srng_params *ring_params);
1078 
1079 /**
1080  * hal_mem_info - Retrieve hal memory base address
1081  *
1082  * @hal_soc: Opaque HAL SOC handle
1083  * @mem: pointer to structure to be updated with hal mem info
1084  */
1085 extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
1086 
1087 /**
1088  * hal_get_target_type - Return target type
1089  *
1090  * @hal_soc: Opaque HAL SOC handle
1091  */
1092 uint32_t hal_get_target_type(struct hal_soc *hal);
1093 
1094 /**
1095  * hal_get_ba_aging_timeout - Retrieve BA aging timeout
1096  *
1097  * @hal_soc: Opaque HAL SOC handle
1098  * @ac: Access category
1099  * @value: timeout duration in millisec
1100  */
1101 void hal_get_ba_aging_timeout(void *hal_soc, uint8_t ac,
1102 			      uint32_t *value);
1103 /**
1104  * hal_set_aging_timeout - Set BA aging timeout
1105  *
1106  * @hal_soc: Opaque HAL SOC handle
1107  * @ac: Access category in millisec
1108  * @value: timeout duration value
1109  */
1110 void hal_set_ba_aging_timeout(void *hal_soc, uint8_t ac,
1111 			      uint32_t value);
1112 /**
1113  * hal_srng_dst_hw_init - Private function to initialize SRNG
1114  * destination ring HW
1115  * @hal_soc: HAL SOC handle
1116  * @srng: SRNG ring pointer
1117  */
1118 static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
1119 	struct hal_srng *srng)
1120 {
1121 	hal->ops->hal_srng_dst_hw_init(hal, srng);
1122 }
1123 
1124 /**
1125  * hal_srng_src_hw_init - Private function to initialize SRNG
1126  * source ring HW
1127  * @hal_soc: HAL SOC handle
1128  * @srng: SRNG ring pointer
1129  */
1130 static inline void hal_srng_src_hw_init(struct hal_soc *hal,
1131 	struct hal_srng *srng)
1132 {
1133 	hal->ops->hal_srng_src_hw_init(hal, srng);
1134 }
1135 
1136 /**
1137  * hal_reo_setup - Initialize HW REO block
1138  *
1139  * @hal_soc: Opaque HAL SOC handle
1140  * @reo_params: parameters needed by HAL for REO config
1141  */
1142 static inline void hal_reo_setup(void *halsoc,
1143 	 void *reoparams)
1144 {
1145 	struct hal_soc *hal_soc = (struct hal_soc *)halsoc;
1146 
1147 	hal_soc->ops->hal_reo_setup(halsoc, reoparams);
1148 }
1149 
1150 /**
1151  * hal_setup_link_idle_list - Setup scattered idle list using the
1152  * buffer list provided
1153  *
1154  * @hal_soc: Opaque HAL SOC handle
1155  * @scatter_bufs_base_paddr: Array of physical base addresses
1156  * @scatter_bufs_base_vaddr: Array of virtual base addresses
1157  * @num_scatter_bufs: Number of scatter buffers in the above lists
1158  * @scatter_buf_size: Size of each scatter buffer
1159  * @last_buf_end_offset: Offset to the last entry
1160  * @num_entries: Total entries of all scatter bufs
1161  *
1162  */
1163 static inline void hal_setup_link_idle_list(void *halsoc,
1164 	qdf_dma_addr_t scatter_bufs_base_paddr[],
1165 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
1166 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset,
1167 	uint32_t num_entries)
1168 {
1169 	struct hal_soc *hal_soc = (struct hal_soc *)halsoc;
1170 
1171 	hal_soc->ops->hal_setup_link_idle_list(halsoc, scatter_bufs_base_paddr,
1172 			scatter_bufs_base_vaddr, num_scatter_bufs,
1173 			scatter_buf_size, last_buf_end_offset,
1174 			num_entries);
1175 
1176 }
1177 #endif /* _HAL_APIH_ */
1178