xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_api.h (revision fffcebf2e926a46534518e770b63d1ab6574e139)
1 /*
2  * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above
10  *       copyright notice, this list of conditions and the following
11  *       disclaimer in the documentation and/or other materials provided
12  *       with the distribution.
13  *     * Neither the name of The Linux Foundation nor the names of its
14  *       contributors may be used to endorse or promote products derived
15  *       from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #ifndef _HAL_API_H_
31 #define _HAL_API_H_
32 
33 #include "qdf_types.h"
34 #include "qdf_util.h"
35 #include "hal_internal.h"
36 #include "rx_msdu_link.h"
37 #include "rx_reo_queue.h"
38 #include "rx_reo_queue_ext.h"
39 
40 #define MAX_UNWINDOWED_ADDRESS 0x80000
41 #define WINDOW_ENABLE_BIT 0x80000000
42 #define WINDOW_REG_ADDRESS 0x310C
43 #define WINDOW_SHIFT 19
44 #define WINDOW_VALUE_MASK 0x3F
45 #define WINDOW_START MAX_UNWINDOWED_ADDRESS
46 #define WINDOW_RANGE_MASK 0x7FFFF
47 
48 static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
49 {
50 	uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
51 	if (window != hal_soc->register_window) {
52 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
53 			      WINDOW_ENABLE_BIT | window);
54 		hal_soc->register_window = window;
55 	}
56 }
57 
58 /**
59  * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
60  * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
61  * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
62  *				would be a bug
63  */
64 static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
65 				  uint32_t value)
66 {
67 
68 	if (!hal_soc->use_register_windowing ||
69 	    offset < MAX_UNWINDOWED_ADDRESS) {
70 		qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
71 	} else {
72 		qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
73 		hal_select_window(hal_soc, offset);
74 		qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
75 			  (offset & WINDOW_RANGE_MASK), value);
76 		qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
77 	}
78 }
79 
80 /**
81  * hal_write_address_32_mb - write a value to a register
82  *
83  */
84 static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
85 					   void __iomem *addr, uint32_t value)
86 {
87 	uint32_t offset;
88 
89 	if (!hal_soc->use_register_windowing)
90 		return qdf_iowrite32(addr, value);
91 
92 	offset = addr - hal_soc->dev_base_addr;
93 	hal_write32_mb(hal_soc, offset, value);
94 }
95 
96 static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
97 {
98 	uint32_t ret;
99 
100 	if (!hal_soc->use_register_windowing ||
101 	    offset < MAX_UNWINDOWED_ADDRESS) {
102 		return qdf_ioread32(hal_soc->dev_base_addr + offset);
103 	}
104 
105 	qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
106 	hal_select_window(hal_soc, offset);
107 	ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
108 		       (offset & WINDOW_RANGE_MASK));
109 	qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
110 
111 	return ret;
112 }
113 
114 #include "hif_io32.h"
115 
116 /**
117  * hal_attach - Initalize HAL layer
118  * @hif_handle: Opaque HIF handle
119  * @qdf_dev: QDF device
120  *
121  * Return: Opaque HAL SOC handle
122  *		 NULL on failure (if given ring is not available)
123  *
124  * This function should be called as part of HIF initialization (for accessing
125  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
126  */
127 extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
128 
129 /**
130  * hal_detach - Detach HAL layer
131  * @hal_soc: HAL SOC handle
132  *
133  * This function should be called as part of HIF detach
134  *
135  */
136 extern void hal_detach(void *hal_soc);
137 
138 /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
139 enum hal_ring_type {
140 	REO_DST,
141 	REO_EXCEPTION,
142 	REO_REINJECT,
143 	REO_CMD,
144 	REO_STATUS,
145 	TCL_DATA,
146 	TCL_CMD,
147 	TCL_STATUS,
148 	CE_SRC,
149 	CE_DST,
150 	CE_DST_STATUS,
151 	WBM_IDLE_LINK,
152 	SW2WBM_RELEASE,
153 	WBM2SW_RELEASE,
154 	RXDMA_BUF,
155 	RXDMA_DST,
156 	RXDMA_MONITOR_BUF,
157 	RXDMA_MONITOR_STATUS,
158 	RXDMA_MONITOR_DST,
159 	RXDMA_MONITOR_DESC,
160 	DIR_BUF_RX_DMA_SRC,
161 #ifdef WLAN_FEATURE_CIF_CFR
162 	WIFI_POS_SRC,
163 #endif
164 	MAX_RING_TYPES
165 };
166 
167 /* SRNG flags passed in hal_srng_params.flags */
168 #define HAL_SRNG_MSI_SWAP				0x00000008
169 #define HAL_SRNG_RING_PTR_SWAP			0x00000010
170 #define HAL_SRNG_DATA_TLV_SWAP			0x00000020
171 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
172 #define HAL_SRNG_MSI_INTR				0x00020000
173 
174 #define PN_SIZE_24 0
175 #define PN_SIZE_48 1
176 #define PN_SIZE_128 2
177 
178 /**
179  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
180  * used by callers for calculating the size of memory to be allocated before
181  * calling hal_srng_setup to setup the ring
182  *
183  * @hal_soc: Opaque HAL SOC handle
184  * @ring_type: one of the types from hal_ring_type
185  *
186  */
187 extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
188 
189 /**
190  * hal_srng_max_entries - Returns maximum possible number of ring entries
191  * @hal_soc: Opaque HAL SOC handle
192  * @ring_type: one of the types from hal_ring_type
193  *
194  * Return: Maximum number of entries for the given ring_type
195  */
196 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
197 
198 /**
199  * hal_srng_get_dir - Returns the direction of the ring
200  * @hal_soc: Opaque HAL SOC handle
201  * @ring_type: one of the types from hal_ring_type
202  *
203  * Return: Ring direction
204  */
205 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
206 
207 /* HAL memory information */
208 struct hal_mem_info {
209 	/* dev base virutal addr */
210 	void *dev_base_addr;
211 	/* dev base physical addr */
212 	void *dev_base_paddr;
213 	/* Remote virtual pointer memory for HW/FW updates */
214 	void *shadow_rdptr_mem_vaddr;
215 	/* Remote physical pointer memory for HW/FW updates */
216 	void *shadow_rdptr_mem_paddr;
217 	/* Shared memory for ring pointer updates from host to FW */
218 	void *shadow_wrptr_mem_vaddr;
219 	/* Shared physical memory for ring pointer updates from host to FW */
220 	void *shadow_wrptr_mem_paddr;
221 };
222 
223 /* SRNG parameters to be passed to hal_srng_setup */
224 struct hal_srng_params {
225 	/* Physical base address of the ring */
226 	qdf_dma_addr_t ring_base_paddr;
227 	/* Virtual base address of the ring */
228 	void *ring_base_vaddr;
229 	/* Number of entries in ring */
230 	uint32_t num_entries;
231 	/* max transfer length */
232 	uint16_t max_buffer_length;
233 	/* MSI Address */
234 	qdf_dma_addr_t msi_addr;
235 	/* MSI data */
236 	uint32_t msi_data;
237 	/* Interrupt timer threshold – in micro seconds */
238 	uint32_t intr_timer_thres_us;
239 	/* Interrupt batch counter threshold – in number of ring entries */
240 	uint32_t intr_batch_cntr_thres_entries;
241 	/* Low threshold – in number of ring entries
242 	 * (valid for src rings only)
243 	 */
244 	uint32_t low_threshold;
245 	/* Misc flags */
246 	uint32_t flags;
247 	/* Unique ring id */
248 	uint8_t ring_id;
249 	/* Source or Destination ring */
250 	enum hal_srng_dir ring_dir;
251 	/* Size of ring entry */
252 	uint32_t entry_size;
253 	/* hw register base address */
254 	void *hwreg_base[MAX_SRNG_REG_GROUPS];
255 };
256 
257 /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
258  * @hal_soc: hal handle
259  *
260  * Return: QDF_STATUS_OK on success
261  */
262 extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
263 
264 /* hal_set_one_shadow_config() - add a config for the specified ring
265  * @hal_soc: hal handle
266  * @ring_type: ring type
267  * @ring_num: ring num
268  *
269  * The ring type and ring num uniquely specify the ring.  After this call,
270  * the hp/tp will be added as the next entry int the shadow register
271  * configuration table.  The hal code will use the shadow register address
272  * in place of the hp/tp address.
273  *
274  * This function is exposed, so that the CE module can skip configuring shadow
275  * registers for unused ring and rings assigned to the firmware.
276  *
277  * Return: QDF_STATUS_OK on success
278  */
279 extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
280 					    int ring_num);
281 /**
282  * hal_get_shadow_config() - retrieve the config table
283  * @hal_soc: hal handle
284  * @shadow_config: will point to the table after
285  * @num_shadow_registers_configured: will contain the number of valid entries
286  */
287 extern void hal_get_shadow_config(void *hal_soc,
288 				  struct pld_shadow_reg_v2_cfg **shadow_config,
289 				  int *num_shadow_registers_configured);
290 /**
291  * hal_srng_setup - Initalize HW SRNG ring.
292  *
293  * @hal_soc: Opaque HAL SOC handle
294  * @ring_type: one of the types from hal_ring_type
295  * @ring_num: Ring number if there are multiple rings of
296  *		same type (staring from 0)
297  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
298  * @ring_params: SRNG ring params in hal_srng_params structure.
299 
300  * Callers are expected to allocate contiguous ring memory of size
301  * 'num_entries * entry_size' bytes and pass the physical and virtual base
302  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
303  * structure. Ring base address should be 8 byte aligned and size of each ring
304  * entry should be queried using the API hal_srng_get_entrysize
305  *
306  * Return: Opaque pointer to ring on success
307  *		 NULL on failure (if given ring is not available)
308  */
309 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
310 	int mac_id, struct hal_srng_params *ring_params);
311 
312 /* Remapping ids of REO rings */
313 #define REO_REMAP_TCL 0
314 #define REO_REMAP_SW1 1
315 #define REO_REMAP_SW2 2
316 #define REO_REMAP_SW3 3
317 #define REO_REMAP_SW4 4
318 #define REO_REMAP_RELEASE 5
319 #define REO_REMAP_FW 6
320 #define REO_REMAP_UNUSED 7
321 
322 /*
323  * currently this macro only works for IX0 since all the rings we are remapping
324  * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
325  */
326 #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
327 	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
328 /* allow the destination macros to be expanded */
329 #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
330 	(_NEW_DEST << \
331 	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
332 	  _ORIGINAL_DEST ## _SHFT))
333 
334 /**
335  * hal_reo_remap_IX0 - Remap REO ring destination
336  * @hal: HAL SOC handle
337  * @remap_val: Remap value
338  */
339 extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val);
340 
341 /**
342  * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
343  * @sring: sring pointer
344  * @paddr: physical address
345  */
346 extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
347 
348 /**
349  * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
350  * @srng: sring pointer
351  * @vaddr: virtual address
352  */
353 extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
354 
355 /**
356  * hal_srng_cleanup - Deinitialize HW SRNG ring.
357  * @hal_soc: Opaque HAL SOC handle
358  * @hal_srng: Opaque HAL SRNG pointer
359  */
360 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
361 
362 static inline bool hal_srng_initialized(void *hal_ring)
363 {
364 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
365 
366 	return !!srng->initialized;
367 }
368 
369 /**
370  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
371  * hal_srng_access_start if locked access is required
372  *
373  * @hal_soc: Opaque HAL SOC handle
374  * @hal_ring: Ring pointer (Source or Destination ring)
375  *
376  * Return: 0 on success; error on failire
377  */
378 static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
379 {
380 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
381 
382 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
383 		srng->u.src_ring.cached_tp =
384 			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
385 	else
386 		srng->u.dst_ring.cached_hp =
387 			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
388 
389 	return 0;
390 }
391 
392 /**
393  * hal_srng_access_start - Start (locked) ring access
394  *
395  * @hal_soc: Opaque HAL SOC handle
396  * @hal_ring: Ring pointer (Source or Destination ring)
397  *
398  * Return: 0 on success; error on failire
399  */
400 static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
401 {
402 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
403 
404 	SRNG_LOCK(&(srng->lock));
405 
406 	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
407 }
408 
409 /**
410  * hal_srng_dst_get_next - Get next entry from a destination ring and move
411  * cached tail pointer
412  *
413  * @hal_soc: Opaque HAL SOC handle
414  * @hal_ring: Destination ring pointer
415  *
416  * Return: Opaque pointer for next ring entry; NULL on failire
417  */
418 static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
419 {
420 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
421 	uint32_t *desc;
422 
423 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
424 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
425 		/* TODO: Using % is expensive, but we have to do this since
426 		 * size of some SRNG rings is not power of 2 (due to descriptor
427 		 * sizes). Need to create separate API for rings used
428 		 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
429 		 * SW2RXDMA and CE rings)
430 		 */
431 		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
432 			srng->ring_size;
433 
434 		return (void *)desc;
435 	}
436 
437 	return NULL;
438 }
439 
440 /**
441  * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
442  * cached head pointer
443  *
444  * @hal_soc: Opaque HAL SOC handle
445  * @hal_ring: Destination ring pointer
446  *
447  * Return: Opaque pointer for next ring entry; NULL on failire
448  */
449 static inline void *hal_srng_dst_get_next_hp(void *hal_soc, void *hal_ring)
450 {
451 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
452 	uint32_t *desc;
453 	/* TODO: Using % is expensive, but we have to do this since
454 	 * size of some SRNG rings is not power of 2 (due to descriptor
455 	 * sizes). Need to create separate API for rings used
456 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
457 	 * SW2RXDMA and CE rings)
458 	 */
459 	uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
460 		srng->ring_size;
461 
462 	if (next_hp != srng->u.dst_ring.tp) {
463 		desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
464 		srng->u.dst_ring.cached_hp = next_hp;
465 		return (void *)desc;
466 	}
467 
468 	return NULL;
469 }
470 
471 /**
472  * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
473  * hal_srng_dst_get_next should be called subsequently to move the tail pointer
474  * TODO: See if we need an optimized version of get_next that doesn't check for
475  * loop_cnt
476  *
477  * @hal_soc: Opaque HAL SOC handle
478  * @hal_ring: Destination ring pointer
479  *
480  * Return: Opaque pointer for next ring entry; NULL on failire
481  */
482 static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
483 {
484 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
485 
486 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
487 		return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
488 
489 	return NULL;
490 }
491 
492 /**
493  * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
494  * by SW) in destination ring
495  *
496  * @hal_soc: Opaque HAL SOC handle
497  * @hal_ring: Destination ring pointer
498  * @sync_hw_ptr: Sync cached head pointer with HW
499  *
500  */
501 static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
502 	int sync_hw_ptr)
503 {
504 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
505 	uint32 hp;
506 	uint32 tp = srng->u.dst_ring.tp;
507 
508 	if (sync_hw_ptr) {
509 		hp = *(srng->u.dst_ring.hp_addr);
510 		srng->u.dst_ring.cached_hp = hp;
511 	} else {
512 		hp = srng->u.dst_ring.cached_hp;
513 	}
514 
515 	if (hp >= tp)
516 		return (hp - tp) / srng->entry_size;
517 	else
518 		return (srng->ring_size - tp + hp) / srng->entry_size;
519 }
520 
521 /**
522  * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
523  * pointer. This can be used to release any buffers associated with completed
524  * ring entries. Note that this should not be used for posting new descriptor
525  * entries. Posting of new entries should be done only using
526  * hal_srng_src_get_next_reaped when this function is used for reaping.
527  *
528  * @hal_soc: Opaque HAL SOC handle
529  * @hal_ring: Source ring pointer
530  *
531  * Return: Opaque pointer for next ring entry; NULL on failire
532  */
533 static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
534 {
535 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
536 	uint32_t *desc;
537 
538 	/* TODO: Using % is expensive, but we have to do this since
539 	 * size of some SRNG rings is not power of 2 (due to descriptor
540 	 * sizes). Need to create separate API for rings used
541 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
542 	 * SW2RXDMA and CE rings)
543 	 */
544 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
545 		srng->ring_size;
546 
547 	if (next_reap_hp != srng->u.src_ring.cached_tp) {
548 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
549 		srng->u.src_ring.reap_hp = next_reap_hp;
550 		return (void *)desc;
551 	}
552 
553 	return NULL;
554 }
555 
556 /**
557  * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
558  * already reaped using hal_srng_src_reap_next, for posting new entries to
559  * the ring
560  *
561  * @hal_soc: Opaque HAL SOC handle
562  * @hal_ring: Source ring pointer
563  *
564  * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
565  */
566 static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
567 {
568 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
569 	uint32_t *desc;
570 
571 	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
572 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
573 		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
574 			srng->ring_size;
575 
576 		return (void *)desc;
577 	}
578 
579 	return NULL;
580 }
581 
582 /**
583  * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
584  * move reap pointer. This API is used in detach path to release any buffers
585  * associated with ring entries which are pending reap.
586  *
587  * @hal_soc: Opaque HAL SOC handle
588  * @hal_ring: Source ring pointer
589  *
590  * Return: Opaque pointer for next ring entry; NULL on failire
591  */
592 static inline void *hal_srng_src_pending_reap_next(void *hal_soc, void *hal_ring)
593 {
594 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
595 	uint32_t *desc;
596 
597 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
598 		srng->ring_size;
599 
600 	if (next_reap_hp != srng->u.src_ring.hp) {
601 		desc = &(srng->ring_base_vaddr[next_reap_hp]);
602 		srng->u.src_ring.reap_hp = next_reap_hp;
603 		return (void *)desc;
604 	}
605 
606 	return NULL;
607 }
608 
609 /**
610  * hal_srng_src_done_val -
611  *
612  * @hal_soc: Opaque HAL SOC handle
613  * @hal_ring: Source ring pointer
614  *
615  * Return: Opaque pointer for next ring entry; NULL on failire
616  */
617 static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
618 {
619 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
620 	/* TODO: Using % is expensive, but we have to do this since
621 	 * size of some SRNG rings is not power of 2 (due to descriptor
622 	 * sizes). Need to create separate API for rings used
623 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
624 	 * SW2RXDMA and CE rings)
625 	 */
626 	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
627 		srng->ring_size;
628 
629 	if (next_reap_hp == srng->u.src_ring.cached_tp)
630 		return 0;
631 
632 	if (srng->u.src_ring.cached_tp > next_reap_hp)
633 		return (srng->u.src_ring.cached_tp - next_reap_hp) /
634 			srng->entry_size;
635 	else
636 		return ((srng->ring_size - next_reap_hp) +
637 			srng->u.src_ring.cached_tp) / srng->entry_size;
638 }
639 
640 /**
641  * hal_api_get_tphp - Get head and tail pointer location for any ring
642  * @hal_soc: Opaque HAL SOC handle
643  * @hal_ring: Source ring pointer
644  * @tailp: Tail Pointer
645  * @headp: Head Pointer
646  *
647  * Return: Update tail pointer and head pointer in arguments.
648  */
649 static inline void hal_api_get_tphp(void *hal_soc, void *hal_ring,
650 	uint32_t *tailp, uint32_t *headp)
651 {
652 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
653 
654 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
655 		*headp = srng->u.src_ring.hp / srng->entry_size;
656 		*tailp = *(srng->u.src_ring.tp_addr) / srng->entry_size;
657 	} else {
658 		*tailp = srng->u.dst_ring.tp / srng->entry_size;
659 		*headp = *(srng->u.dst_ring.hp_addr) / srng->entry_size;
660 	}
661 }
662 
663 /**
664  * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
665  *
666  * @hal_soc: Opaque HAL SOC handle
667  * @hal_ring: Source ring pointer
668  *
669  * Return: Opaque pointer for next ring entry; NULL on failire
670  */
671 static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
672 {
673 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
674 	uint32_t *desc;
675 	/* TODO: Using % is expensive, but we have to do this since
676 	 * size of some SRNG rings is not power of 2 (due to descriptor
677 	 * sizes). Need to create separate API for rings used
678 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
679 	 * SW2RXDMA and CE rings)
680 	 */
681 	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
682 		srng->ring_size;
683 
684 	if (next_hp != srng->u.src_ring.cached_tp) {
685 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
686 		srng->u.src_ring.hp = next_hp;
687 		/* TODO: Since reap function is not used by all rings, we can
688 		 * remove the following update of reap_hp in this function
689 		 * if we can ensure that only hal_srng_src_get_next_reaped
690 		 * is used for the rings requiring reap functionality
691 		 */
692 		srng->u.src_ring.reap_hp = next_hp;
693 		return (void *)desc;
694 	}
695 
696 	return NULL;
697 }
698 
699 /**
700  * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
701  * hal_srng_src_get_next should be called subsequently to move the head pointer
702  *
703  * @hal_soc: Opaque HAL SOC handle
704  * @hal_ring: Source ring pointer
705  *
706  * Return: Opaque pointer for next ring entry; NULL on failire
707  */
708 static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
709 {
710 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
711 	uint32_t *desc;
712 
713 	/* TODO: Using % is expensive, but we have to do this since
714 	 * size of some SRNG rings is not power of 2 (due to descriptor
715 	 * sizes). Need to create separate API for rings used
716 	 * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
717 	 * SW2RXDMA and CE rings)
718 	 */
719 	if (((srng->u.src_ring.hp + srng->entry_size) %
720 		srng->ring_size) != srng->u.src_ring.cached_tp) {
721 		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
722 		return (void *)desc;
723 	}
724 
725 	return NULL;
726 }
727 
728 /**
729  * hal_srng_src_num_avail - Returns number of available entries in src ring
730  *
731  * @hal_soc: Opaque HAL SOC handle
732  * @hal_ring: Source ring pointer
733  * @sync_hw_ptr: Sync cached tail pointer with HW
734  *
735  */
736 static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
737 	void *hal_ring, int sync_hw_ptr)
738 {
739 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
740 	uint32 tp;
741 	uint32 hp = srng->u.src_ring.hp;
742 
743 	if (sync_hw_ptr) {
744 		tp = *(srng->u.src_ring.tp_addr);
745 		srng->u.src_ring.cached_tp = tp;
746 	} else {
747 		tp = srng->u.src_ring.cached_tp;
748 	}
749 
750 	if (tp > hp)
751 		return ((tp - hp) / srng->entry_size) - 1;
752 	else
753 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
754 }
755 
756 /**
757  * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
758  * ring head/tail pointers to HW.
759  * This should be used only if hal_srng_access_start_unlocked to start ring
760  * access
761  *
762  * @hal_soc: Opaque HAL SOC handle
763  * @hal_ring: Ring pointer (Source or Destination ring)
764  *
765  * Return: 0 on success; error on failire
766  */
767 static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
768 {
769 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
770 
771 	/* TODO: See if we need a write memory barrier here */
772 	if (srng->flags & HAL_SRNG_LMAC_RING) {
773 		/* For LMAC rings, ring pointer updates are done through FW and
774 		 * hence written to a shared memory location that is read by FW
775 		 */
776 		if (srng->ring_dir == HAL_SRNG_SRC_RING) {
777 			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
778 		} else {
779 			*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
780 		}
781 	} else {
782 		if (srng->ring_dir == HAL_SRNG_SRC_RING)
783 			hal_write_address_32_mb(hal_soc,
784 				srng->u.src_ring.hp_addr,
785 				srng->u.src_ring.hp);
786 		else
787 			hal_write_address_32_mb(hal_soc,
788 				srng->u.dst_ring.tp_addr,
789 				srng->u.dst_ring.tp);
790 	}
791 }
792 
793 /**
794  * hal_srng_access_end - Unlock ring access and update cached ring head/tail
795  * pointers to HW
796  * This should be used only if hal_srng_access_start to start ring access
797  *
798  * @hal_soc: Opaque HAL SOC handle
799  * @hal_ring: Ring pointer (Source or Destination ring)
800  *
801  * Return: 0 on success; error on failire
802  */
803 static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
804 {
805 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
806 
807 	hal_srng_access_end_unlocked(hal_soc, hal_ring);
808 	SRNG_UNLOCK(&(srng->lock));
809 }
810 
811 /**
812  * hal_srng_access_end_reap - Unlock ring access
813  * This should be used only if hal_srng_access_start to start ring access
814  * and should be used only while reaping SRC ring completions
815  *
816  * @hal_soc: Opaque HAL SOC handle
817  * @hal_ring: Ring pointer (Source or Destination ring)
818  *
819  * Return: 0 on success; error on failire
820  */
821 static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
822 {
823 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
824 
825 	SRNG_UNLOCK(&(srng->lock));
826 }
827 
828 /* TODO: Check if the following definitions is available in HW headers */
829 #define WBM_IDLE_DESC_LIST 1
830 #define WBM_IDLE_SCATTER_BUF_SIZE 32704
831 #define NUM_MPDUS_PER_LINK_DESC 6
832 #define NUM_MSDUS_PER_LINK_DESC 7
833 #define REO_QUEUE_DESC_ALIGN 128
834 
835 #define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
836 #define LINK_DESC_ALIGN 128
837 
838 #define ADDRESS_MATCH_TAG_VAL 0x5
839 /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
840  * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
841  */
842 #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
843 
844 /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
845  * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
846  * should be specified in 16 word units. But the number of bits defined for
847  * this field in HW header files is 5.
848  */
849 #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
850 
851 /**
852  * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
853  * HW structure
854  *
855  * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
856  * @cookie: SW cookie for the buffer/descriptor
857  * @link_desc_paddr: Physical address of link descriptor entry
858  *
859  */
860 static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
861 	qdf_dma_addr_t link_desc_paddr)
862 {
863 	uint32_t *buf_addr = (uint32_t *)desc;
864 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
865 		link_desc_paddr & 0xffffffff);
866 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
867 		(uint64_t)link_desc_paddr >> 32);
868 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
869 		WBM_IDLE_DESC_LIST);
870 	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
871 		cookie);
872 }
873 
874 /**
875  * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
876  * in an idle list
877  *
878  * @hal_soc: Opaque HAL SOC handle
879  *
880  */
881 static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
882 {
883 	return WBM_IDLE_SCATTER_BUF_SIZE;
884 }
885 
886 /**
887  * hal_get_link_desc_size - Get the size of each link descriptor
888  *
889  * @hal_soc: Opaque HAL SOC handle
890  *
891  */
892 static inline uint32_t hal_get_link_desc_size(void *hal_soc)
893 {
894 	return LINK_DESC_SIZE;
895 }
896 
897 /**
898  * hal_get_link_desc_align - Get the required start address alignment for
899  * link descriptors
900  *
901  * @hal_soc: Opaque HAL SOC handle
902  *
903  */
904 static inline uint32_t hal_get_link_desc_align(void *hal_soc)
905 {
906 	return LINK_DESC_ALIGN;
907 }
908 
909 /**
910  * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
911  *
912  * @hal_soc: Opaque HAL SOC handle
913  *
914  */
915 static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
916 {
917 	return NUM_MPDUS_PER_LINK_DESC;
918 }
919 
920 /**
921  * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
922  *
923  * @hal_soc: Opaque HAL SOC handle
924  *
925  */
926 static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
927 {
928 	return NUM_MSDUS_PER_LINK_DESC;
929 }
930 
931 /**
932  * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
933  * descriptor can hold
934  *
935  * @hal_soc: Opaque HAL SOC handle
936  *
937  */
938 static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
939 {
940 	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
941 }
942 
943 /**
944  * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
945  * that the given buffer size
946  *
947  * @hal_soc: Opaque HAL SOC handle
948  * @scatter_buf_size: Size of scatter buffer
949  *
950  */
951 static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
952 	uint32_t scatter_buf_size)
953 {
954 	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
955 		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
956 }
957 
958 /**
959  * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
960  * each given buffer size
961  *
962  * @hal_soc: Opaque HAL SOC handle
963  * @total_mem: size of memory to be scattered
964  * @scatter_buf_size: Size of scatter buffer
965  *
966  */
967 static inline uint32_t hal_idle_list_num_scatter_bufs(void *hal_soc,
968 	uint32_t total_mem, uint32_t scatter_buf_size)
969 {
970 	uint8_t rem = (total_mem % (scatter_buf_size -
971 			WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
972 
973 	uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
974 				WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
975 
976 	return num_scatter_bufs;
977 }
978 
979 /**
980  * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
981  * provided
982  *
983  * @hal_soc: Opaque HAL SOC handle
984  * @idle_scatter_bufs_base_paddr: Array of physical base addresses
985  * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
986  * @num_scatter_bufs: Number of scatter buffers in the above lists
987  * @scatter_buf_size: Size of each scatter buffer
988  * @last_buf_end_offset: Offset to the last entry
989  * @num_entries: Total entries of all scatter bufs
990  *
991  */
992 extern void hal_setup_link_idle_list(void *hal_soc,
993 	qdf_dma_addr_t scatter_bufs_base_paddr[],
994 	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
995 	uint32_t scatter_buf_size, uint32_t last_buf_end_offset,
996 	uint32_t num_entries);
997 
998 /* REO parameters to be passed to hal_reo_setup */
999 struct hal_reo_params {
1000 	/* rx hash steering enabled or disabled */
1001 	bool rx_hash_enabled;
1002 	/* reo remap 1 register */
1003 	uint32_t remap1;
1004 	/* reo remap 2 register */
1005 	uint32_t remap2;
1006 };
1007 
1008 /**
1009  * hal_reo_setup - Initialize HW REO block
1010  *
1011  * @hal_soc: Opaque HAL SOC handle
1012  * @reo_params: parameters needed by HAL for REO config
1013  */
1014 extern void hal_reo_setup(void *hal_soc,
1015 	 struct hal_reo_params *reo_params);
1016 
1017 enum hal_pn_type {
1018 	HAL_PN_NONE,
1019 	HAL_PN_WPA,
1020 	HAL_PN_WAPI_EVEN,
1021 	HAL_PN_WAPI_UNEVEN,
1022 };
1023 
1024 #define HAL_RX_MAX_BA_WINDOW 256
1025 /**
1026  * hal_get_reo_qdesc_size - Get size of reo queue descriptor
1027  *
1028  * @hal_soc: Opaque HAL SOC handle
1029  * @ba_window_size: BlockAck window size
1030  *
1031  */
1032 static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
1033 	uint32_t ba_window_size)
1034 {
1035 	if (ba_window_size <= 1)
1036 		return sizeof(struct rx_reo_queue);
1037 
1038 	if (ba_window_size <= 105)
1039 		return sizeof(struct rx_reo_queue) +
1040 			sizeof(struct rx_reo_queue_ext);
1041 
1042 	if (ba_window_size <= 210)
1043 		return sizeof(struct rx_reo_queue) +
1044 			(2 * sizeof(struct rx_reo_queue_ext));
1045 
1046 	return sizeof(struct rx_reo_queue) +
1047 		(3 * sizeof(struct rx_reo_queue_ext));
1048 }
1049 
1050 /**
1051  * hal_get_reo_qdesc_align - Get start address alignment for reo
1052  * queue descriptors
1053  *
1054  * @hal_soc: Opaque HAL SOC handle
1055  *
1056  */
1057 static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
1058 {
1059 	return REO_QUEUE_DESC_ALIGN;
1060 }
1061 
1062 /**
1063  * hal_reo_qdesc_setup - Setup HW REO queue descriptor
1064  *
1065  * @hal_soc: Opaque HAL SOC handle
1066  * @ba_window_size: BlockAck window size
1067  * @start_seq: Starting sequence number
1068  * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
1069  * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
1070  * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
1071  *
1072  */
1073 extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
1074 	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
1075 	int pn_type);
1076 
1077 /**
1078  * hal_srng_get_hp_addr - Get head pointer physical address
1079  *
1080  * @hal_soc: Opaque HAL SOC handle
1081  * @hal_ring: Ring pointer (Source or Destination ring)
1082  *
1083  */
1084 static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
1085 {
1086 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1087 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1088 
1089 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1090 		return hal->shadow_wrptr_mem_paddr +
1091 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
1092 		  (unsigned long)(hal->shadow_wrptr_mem_vaddr));
1093 	} else {
1094 		return hal->shadow_rdptr_mem_paddr +
1095 		  ((unsigned long)(srng->u.dst_ring.hp_addr) -
1096 		   (unsigned long)(hal->shadow_rdptr_mem_vaddr));
1097 	}
1098 }
1099 
1100 /**
1101  * hal_srng_get_tp_addr - Get tail pointer physical address
1102  *
1103  * @hal_soc: Opaque HAL SOC handle
1104  * @hal_ring: Ring pointer (Source or Destination ring)
1105  *
1106  */
1107 static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
1108 {
1109 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
1110 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1111 
1112 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1113 		return hal->shadow_rdptr_mem_paddr +
1114 			((unsigned long)(srng->u.src_ring.tp_addr) -
1115 			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
1116 	} else {
1117 		return hal->shadow_wrptr_mem_paddr +
1118 			((unsigned long)(srng->u.dst_ring.tp_addr) -
1119 			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
1120 	}
1121 }
1122 
1123 /**
1124  * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
1125  *
1126  * @hal_soc: Opaque HAL SOC handle
1127  * @hal_ring: Ring pointer (Source or Destination ring)
1128  * @ring_params: SRNG parameters will be returned through this structure
1129  */
1130 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
1131 	struct hal_srng_params *ring_params);
1132 
1133 /**
1134  * hal_mem_info - Retreive hal memory base address
1135  *
1136  * @hal_soc: Opaque HAL SOC handle
1137  * @mem: pointer to structure to be updated with hal mem info
1138  */
1139 extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
1140 #endif /* _HAL_APIH_ */
1141