xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c (revision ad85c389289a03e320cd08dea21861f9857892fc)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "hal_api.h"
21 #include "target_type.h"
22 #include "wcss_version.h"
23 #include "qdf_module.h"
24 #ifdef QCA_WIFI_QCA8074
25 void hal_qca6290_attach(struct hal_soc *hal);
26 #endif
27 #ifdef QCA_WIFI_QCA8074
28 void hal_qca8074_attach(struct hal_soc *hal);
29 #endif
30 #ifdef QCA_WIFI_QCA8074V2
31 void hal_qca8074v2_attach(struct hal_soc *hal);
32 #endif
33 #ifdef QCA_WIFI_QCA6390
34 void hal_qca6390_attach(struct hal_soc *hal);
35 #endif
36 #ifdef QCA_WIFI_QCA6018
37 void hal_qca6018_attach(struct hal_soc *hal);
38 #endif
39 
40 /**
41  * hal_get_srng_ring_id() - get the ring id of a descriped ring
42  * @hal: hal_soc data structure
43  * @ring_type: type enum describing the ring
44  * @ring_num: which ring of the ring type
45  * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings)
46  *
47  * Return: the ring id or -EINVAL if the ring does not exist.
48  */
49 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
50 				int ring_num, int mac_id)
51 {
52 	struct hal_hw_srng_config *ring_config =
53 		HAL_SRNG_CONFIG(hal, ring_type);
54 	int ring_id;
55 
56 	if (ring_num >= ring_config->max_rings) {
57 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
58 			  "%s: ring_num exceeded maximum no. of supported rings",
59 			  __func__);
60 		/* TODO: This is a programming error. Assert if this happens */
61 		return -EINVAL;
62 	}
63 
64 	if (ring_config->lmac_ring) {
65 		ring_id = ring_config->start_ring_id + ring_num +
66 			(mac_id * HAL_MAX_RINGS_PER_LMAC);
67 	} else {
68 		ring_id = ring_config->start_ring_id + ring_num;
69 	}
70 
71 	return ring_id;
72 }
73 
74 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id)
75 {
76 	/* TODO: Should we allocate srng structures dynamically? */
77 	return &(hal->srng_list[ring_id]);
78 }
79 
80 #define HP_OFFSET_IN_REG_START 1
81 #define OFFSET_FROM_HP_TO_TP 4
82 static void hal_update_srng_hp_tp_address(void *hal_soc,
83 					  int shadow_config_index,
84 					  int ring_type,
85 					  int ring_num)
86 {
87 	struct hal_srng *srng;
88 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
89 	int ring_id;
90 	struct hal_hw_srng_config *ring_config =
91 		HAL_SRNG_CONFIG(hal, ring_type);
92 
93 	ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0);
94 	if (ring_id < 0)
95 		return;
96 
97 	srng = hal_get_srng(hal_soc, ring_id);
98 
99 	if (ring_config->ring_dir == HAL_SRNG_DST_RING) {
100 		srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index)
101 			+ hal->dev_base_addr;
102 		hal_debug("tp_addr=%pK dev base addr %pK index %u",
103 			  srng->u.dst_ring.tp_addr, hal->dev_base_addr,
104 			  shadow_config_index);
105 	} else {
106 		srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index)
107 			+ hal->dev_base_addr;
108 		hal_debug("hp_addr=%pK dev base addr %pK index %u",
109 			  srng->u.src_ring.hp_addr,
110 			  hal->dev_base_addr, shadow_config_index);
111 	}
112 
113 }
114 
115 QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
116 				     int ring_type,
117 				     int ring_num)
118 {
119 	uint32_t target_register;
120 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
121 	struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type];
122 	int shadow_config_index = hal->num_shadow_registers_configured;
123 
124 	if (shadow_config_index >= MAX_SHADOW_REGISTERS) {
125 		QDF_ASSERT(0);
126 		return QDF_STATUS_E_RESOURCES;
127 	}
128 
129 	hal->num_shadow_registers_configured++;
130 
131 	target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START];
132 	target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
133 			    *ring_num);
134 
135 	/* if the ring is a dst ring, we need to shadow the tail pointer */
136 	if (srng_config->ring_dir == HAL_SRNG_DST_RING)
137 		target_register += OFFSET_FROM_HP_TO_TP;
138 
139 	hal->shadow_config[shadow_config_index].addr = target_register;
140 
141 	/* update hp/tp addr in the hal_soc structure*/
142 	hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type,
143 				      ring_num);
144 
145 	hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d",
146 		  target_register,
147 		  SHADOW_REGISTER(shadow_config_index),
148 		  shadow_config_index,
149 		  ring_type, ring_num);
150 
151 	return QDF_STATUS_SUCCESS;
152 }
153 
154 qdf_export_symbol(hal_set_one_shadow_config);
155 
156 QDF_STATUS hal_construct_shadow_config(void *hal_soc)
157 {
158 	int ring_type, ring_num;
159 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
160 
161 	for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) {
162 		struct hal_hw_srng_config *srng_config =
163 			&hal->hw_srng_table[ring_type];
164 
165 		if (ring_type == CE_SRC ||
166 		    ring_type == CE_DST ||
167 		    ring_type == CE_DST_STATUS)
168 			continue;
169 
170 		if (srng_config->lmac_ring)
171 			continue;
172 
173 		for (ring_num = 0; ring_num < srng_config->max_rings;
174 		     ring_num++)
175 			hal_set_one_shadow_config(hal_soc, ring_type, ring_num);
176 	}
177 
178 	return QDF_STATUS_SUCCESS;
179 }
180 
181 qdf_export_symbol(hal_construct_shadow_config);
182 
183 void hal_get_shadow_config(void *hal_soc,
184 	struct pld_shadow_reg_v2_cfg **shadow_config,
185 	int *num_shadow_registers_configured)
186 {
187 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
188 
189 	*shadow_config = hal->shadow_config;
190 	*num_shadow_registers_configured =
191 		hal->num_shadow_registers_configured;
192 
193 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
194 			"%s", __func__);
195 }
196 
197 qdf_export_symbol(hal_get_shadow_config);
198 
199 
200 static void hal_validate_shadow_register(struct hal_soc *hal,
201 				  uint32_t *destination,
202 				  uint32_t *shadow_address)
203 {
204 	unsigned int index;
205 	uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr;
206 	int destination_ba_offset =
207 		((char *)destination) - (char *)hal->dev_base_addr;
208 
209 	index =	shadow_address - shadow_0_offset;
210 
211 	if (index >= MAX_SHADOW_REGISTERS) {
212 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
213 			"%s: index %x out of bounds", __func__, index);
214 		goto error;
215 	} else if (hal->shadow_config[index].addr != destination_ba_offset) {
216 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
217 			"%s: sanity check failure, expected %x, found %x",
218 			__func__, destination_ba_offset,
219 			hal->shadow_config[index].addr);
220 		goto error;
221 	}
222 	return;
223 error:
224 	qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x",
225 		  __func__, hal->dev_base_addr, destination, shadow_address,
226 		  shadow_0_offset, index);
227 	QDF_BUG(0);
228 	return;
229 }
230 
231 static void hal_target_based_configure(struct hal_soc *hal)
232 {
233 	switch (hal->target_type) {
234 #ifdef QCA_WIFI_QCA6290
235 	case TARGET_TYPE_QCA6290:
236 		hal->use_register_windowing = true;
237 		hal_qca6290_attach(hal);
238 	break;
239 #endif
240 #ifdef QCA_WIFI_QCA6390
241 	case TARGET_TYPE_QCA6390:
242 		hal->use_register_windowing = true;
243 		hal_qca6390_attach(hal);
244 	break;
245 #endif
246 #if defined(QCA_WIFI_QCA8074) && defined(CONFIG_WIN)
247 	case TARGET_TYPE_QCA8074:
248 		hal_qca8074_attach(hal);
249 	break;
250 #endif
251 
252 #if defined(QCA_WIFI_QCA8074V2) && defined(CONFIG_WIN)
253 	case TARGET_TYPE_QCA8074V2:
254 		hal_qca8074v2_attach(hal);
255 	break;
256 #endif
257 
258 #if defined(QCA_WIFI_QCA6018) && defined(CONFIG_WIN)
259 	case TARGET_TYPE_QCA6018:
260 		hal_qca6018_attach(hal);
261 	break;
262 #endif
263 	default:
264 	break;
265 	}
266 }
267 
268 uint32_t hal_get_target_type(struct hal_soc *hal)
269 {
270 	struct hif_target_info *tgt_info =
271 		hif_get_target_info_handle(hal->hif_handle);
272 
273 	return tgt_info->target_type;
274 }
275 
276 qdf_export_symbol(hal_get_target_type);
277 
278 /**
279  * hal_attach - Initialize HAL layer
280  * @hif_handle: Opaque HIF handle
281  * @qdf_dev: QDF device
282  *
283  * Return: Opaque HAL SOC handle
284  *		 NULL on failure (if given ring is not available)
285  *
286  * This function should be called as part of HIF initialization (for accessing
287  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
288  *
289  */
290 void *hal_attach(void *hif_handle, qdf_device_t qdf_dev)
291 {
292 	struct hal_soc *hal;
293 	int i;
294 
295 	hal = qdf_mem_malloc(sizeof(*hal));
296 
297 	if (!hal) {
298 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
299 			"%s: hal_soc allocation failed", __func__);
300 		goto fail0;
301 	}
302 	hal->hif_handle = hif_handle;
303 	hal->dev_base_addr = hif_get_dev_ba(hif_handle);
304 	hal->qdf_dev = qdf_dev;
305 	hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
306 		qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
307 		HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
308 	if (!hal->shadow_rdptr_mem_paddr) {
309 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
310 			"%s: hal->shadow_rdptr_mem_paddr allocation failed",
311 			__func__);
312 		goto fail1;
313 	}
314 	qdf_mem_zero(hal->shadow_rdptr_mem_vaddr,
315 		     sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX);
316 
317 	hal->shadow_wrptr_mem_vaddr =
318 		(uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
319 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
320 		&(hal->shadow_wrptr_mem_paddr));
321 	if (!hal->shadow_wrptr_mem_vaddr) {
322 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
323 			"%s: hal->shadow_wrptr_mem_vaddr allocation failed",
324 			__func__);
325 		goto fail2;
326 	}
327 	qdf_mem_zero(hal->shadow_wrptr_mem_vaddr,
328 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS);
329 
330 	for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
331 		hal->srng_list[i].initialized = 0;
332 		hal->srng_list[i].ring_id = i;
333 	}
334 
335 	qdf_spinlock_create(&hal->register_access_lock);
336 	hal->register_window = 0;
337 	hal->target_type = hal_get_target_type(hal);
338 
339 	hal_target_based_configure(hal);
340 
341 	return (void *)hal;
342 
343 fail2:
344 	qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
345 		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
346 		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
347 fail1:
348 	qdf_mem_free(hal);
349 fail0:
350 	return NULL;
351 }
352 qdf_export_symbol(hal_attach);
353 
354 /**
355  * hal_mem_info - Retrieve hal memory base address
356  *
357  * @hal_soc: Opaque HAL SOC handle
358  * @mem: pointer to structure to be updated with hal mem info
359  */
360 void hal_get_meminfo(void *hal_soc, struct hal_mem_info *mem )
361 {
362 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
363 	mem->dev_base_addr = (void *)hal->dev_base_addr;
364         mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr;
365 	mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr;
366         mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr;
367 	mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr;
368 	hif_read_phy_mem_base(hal->hif_handle, (qdf_dma_addr_t *)&mem->dev_base_paddr);
369 	return;
370 }
371 qdf_export_symbol(hal_get_meminfo);
372 
373 /**
374  * hal_detach - Detach HAL layer
375  * @hal_soc: HAL SOC handle
376  *
377  * Return: Opaque HAL SOC handle
378  *		 NULL on failure (if given ring is not available)
379  *
380  * This function should be called as part of HIF initialization (for accessing
381  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
382  *
383  */
384 extern void hal_detach(void *hal_soc)
385 {
386 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
387 
388 	qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
389 		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
390 		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
391 	qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
392 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
393 		hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
394 	qdf_mem_free(hal);
395 
396 	return;
397 }
398 qdf_export_symbol(hal_detach);
399 
400 
401 /**
402  * hal_ce_dst_setup - Initialize CE destination ring registers
403  * @hal_soc: HAL SOC handle
404  * @srng: SRNG ring pointer
405  */
406 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
407 				    int ring_num)
408 {
409 	uint32_t reg_val = 0;
410 	uint32_t reg_addr;
411 	struct hal_hw_srng_config *ring_config =
412 		HAL_SRNG_CONFIG(hal, CE_DST);
413 
414 	/* set DEST_MAX_LENGTH according to ce assignment */
415 	reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR(
416 			ring_config->reg_start[R0_INDEX] +
417 			(ring_num * ring_config->reg_size[R0_INDEX]));
418 
419 	reg_val = HAL_REG_READ(hal, reg_addr);
420 	reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
421 	reg_val |= srng->u.dst_ring.max_buffer_length &
422 		HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
423 	HAL_REG_WRITE(hal, reg_addr, reg_val);
424 }
425 
426 /**
427  * hal_reo_remap_IX0 - Remap REO ring destination
428  * @hal: HAL SOC handle
429  * @remap_val: Remap value
430  */
431 void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val)
432 {
433 	uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
434 				SEQ_WCSS_UMAC_REO_REG_OFFSET);
435 	HAL_REG_WRITE(hal, reg_offset, remap_val);
436 }
437 
438 /**
439  * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer
440  * @srng: sring pointer
441  * @paddr: physical address
442  */
443 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng,
444 			       uint64_t paddr)
445 {
446 	SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB,
447 			   paddr & 0xffffffff);
448 	SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB,
449 			   paddr >> 32);
450 }
451 
452 /**
453  * hal_srng_dst_init_hp() - Initilaize destination ring head pointer
454  * @srng: sring pointer
455  * @vaddr: virtual address
456  */
457 void hal_srng_dst_init_hp(struct hal_srng *srng,
458 			  uint32_t *vaddr)
459 {
460 	if (!srng)
461 		return;
462 
463 	srng->u.dst_ring.hp_addr = vaddr;
464 	SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp);
465 
466 	if (vaddr) {
467 		*srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp;
468 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
469 			  "hp_addr=%pK, cached_hp=%d, hp=%d",
470 			  (void *)srng->u.dst_ring.hp_addr,
471 			  srng->u.dst_ring.cached_hp,
472 			  *srng->u.dst_ring.hp_addr);
473 	}
474 }
475 
476 /**
477  * hal_srng_hw_init - Private function to initialize SRNG HW
478  * @hal_soc: HAL SOC handle
479  * @srng: SRNG ring pointer
480  */
481 static inline void hal_srng_hw_init(struct hal_soc *hal,
482 	struct hal_srng *srng)
483 {
484 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
485 		hal_srng_src_hw_init(hal, srng);
486 	else
487 		hal_srng_dst_hw_init(hal, srng);
488 }
489 
490 #ifdef CONFIG_SHADOW_V2
491 #define ignore_shadow false
492 #define CHECK_SHADOW_REGISTERS true
493 #else
494 #define ignore_shadow true
495 #define CHECK_SHADOW_REGISTERS false
496 #endif
497 
498 /**
499  * hal_srng_setup - Initialize HW SRNG ring.
500  * @hal_soc: Opaque HAL SOC handle
501  * @ring_type: one of the types from hal_ring_type
502  * @ring_num: Ring number if there are multiple rings of same type (staring
503  * from 0)
504  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
505  * @ring_params: SRNG ring params in hal_srng_params structure.
506 
507  * Callers are expected to allocate contiguous ring memory of size
508  * 'num_entries * entry_size' bytes and pass the physical and virtual base
509  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
510  * hal_srng_params structure. Ring base address should be 8 byte aligned
511  * and size of each ring entry should be queried using the API
512  * hal_srng_get_entrysize
513  *
514  * Return: Opaque pointer to ring on success
515  *		 NULL on failure (if given ring is not available)
516  */
517 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
518 	int mac_id, struct hal_srng_params *ring_params)
519 {
520 	int ring_id;
521 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
522 	struct hal_srng *srng;
523 	struct hal_hw_srng_config *ring_config =
524 		HAL_SRNG_CONFIG(hal, ring_type);
525 	void *dev_base_addr;
526 	int i;
527 
528 	ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id);
529 	if (ring_id < 0)
530 		return NULL;
531 
532 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
533 		  "%s: mac_id %d ring_id %d",
534 		  __func__, mac_id, ring_id);
535 
536 	srng = hal_get_srng(hal_soc, ring_id);
537 
538 	if (srng->initialized) {
539 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
540 			  "%s: Ring (ring_type, ring_num) already initialized",
541 			  __func__);
542 		return NULL;
543 	}
544 
545 	dev_base_addr = hal->dev_base_addr;
546 	srng->ring_id = ring_id;
547 	srng->ring_dir = ring_config->ring_dir;
548 	srng->ring_base_paddr = ring_params->ring_base_paddr;
549 	srng->ring_base_vaddr = ring_params->ring_base_vaddr;
550 	srng->entry_size = ring_config->entry_size;
551 	srng->num_entries = ring_params->num_entries;
552 	srng->ring_size = srng->num_entries * srng->entry_size;
553 	srng->ring_size_mask = srng->ring_size - 1;
554 	srng->msi_addr = ring_params->msi_addr;
555 	srng->msi_data = ring_params->msi_data;
556 	srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
557 	srng->intr_batch_cntr_thres_entries =
558 		ring_params->intr_batch_cntr_thres_entries;
559 	srng->hal_soc = hal_soc;
560 
561 	for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
562 		srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
563 			+ (ring_num * ring_config->reg_size[i]);
564 	}
565 
566 	/* Zero out the entire ring memory */
567 	qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
568 		srng->num_entries) << 2);
569 
570 	srng->flags = ring_params->flags;
571 #ifdef BIG_ENDIAN_HOST
572 		/* TODO: See if we should we get these flags from caller */
573 	srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
574 	srng->flags |= HAL_SRNG_MSI_SWAP;
575 	srng->flags |= HAL_SRNG_RING_PTR_SWAP;
576 #endif
577 
578 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
579 		srng->u.src_ring.hp = 0;
580 		srng->u.src_ring.reap_hp = srng->ring_size -
581 			srng->entry_size;
582 		srng->u.src_ring.tp_addr =
583 			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
584 		srng->u.src_ring.low_threshold =
585 			ring_params->low_threshold * srng->entry_size;
586 		if (ring_config->lmac_ring) {
587 			/* For LMAC rings, head pointer updates will be done
588 			 * through FW by writing to a shared memory location
589 			 */
590 			srng->u.src_ring.hp_addr =
591 				&(hal->shadow_wrptr_mem_vaddr[ring_id -
592 					HAL_SRNG_LMAC1_ID_START]);
593 			srng->flags |= HAL_SRNG_LMAC_RING;
594 		} else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) {
595 			srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP);
596 
597 			if (CHECK_SHADOW_REGISTERS) {
598 				QDF_TRACE(QDF_MODULE_ID_TXRX,
599 				    QDF_TRACE_LEVEL_ERROR,
600 				    "%s: Ring (%d, %d) missing shadow config",
601 				    __func__, ring_type, ring_num);
602 			}
603 		} else {
604 			hal_validate_shadow_register(hal,
605 						     SRNG_SRC_ADDR(srng, HP),
606 						     srng->u.src_ring.hp_addr);
607 		}
608 	} else {
609 		/* During initialization loop count in all the descriptors
610 		 * will be set to zero, and HW will set it to 1 on completing
611 		 * descriptor update in first loop, and increments it by 1 on
612 		 * subsequent loops (loop count wraps around after reaching
613 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
614 		 * loop count in descriptors updated by HW (to be processed
615 		 * by SW).
616 		 */
617 		srng->u.dst_ring.loop_cnt = 1;
618 		srng->u.dst_ring.tp = 0;
619 		srng->u.dst_ring.hp_addr =
620 			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
621 		if (ring_config->lmac_ring) {
622 			/* For LMAC rings, tail pointer updates will be done
623 			 * through FW by writing to a shared memory location
624 			 */
625 			srng->u.dst_ring.tp_addr =
626 				&(hal->shadow_wrptr_mem_vaddr[ring_id -
627 				HAL_SRNG_LMAC1_ID_START]);
628 			srng->flags |= HAL_SRNG_LMAC_RING;
629 		} else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) {
630 			srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP);
631 
632 			if (CHECK_SHADOW_REGISTERS) {
633 				QDF_TRACE(QDF_MODULE_ID_TXRX,
634 				    QDF_TRACE_LEVEL_ERROR,
635 				    "%s: Ring (%d, %d) missing shadow config",
636 				    __func__, ring_type, ring_num);
637 			}
638 		} else {
639 			hal_validate_shadow_register(hal,
640 						     SRNG_DST_ADDR(srng, TP),
641 						     srng->u.dst_ring.tp_addr);
642 		}
643 	}
644 
645 	if (!(ring_config->lmac_ring)) {
646 		hal_srng_hw_init(hal, srng);
647 
648 		if (ring_type == CE_DST) {
649 			srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length;
650 			hal_ce_dst_setup(hal, srng, ring_num);
651 		}
652 	}
653 
654 	SRNG_LOCK_INIT(&srng->lock);
655 
656 	srng->initialized = true;
657 
658 	return (void *)srng;
659 }
660 qdf_export_symbol(hal_srng_setup);
661 
662 /**
663  * hal_srng_cleanup - Deinitialize HW SRNG ring.
664  * @hal_soc: Opaque HAL SOC handle
665  * @hal_srng: Opaque HAL SRNG pointer
666  */
667 void hal_srng_cleanup(void *hal_soc, void *hal_srng)
668 {
669 	struct hal_srng *srng = (struct hal_srng *)hal_srng;
670 	SRNG_LOCK_DESTROY(&srng->lock);
671 	srng->initialized = 0;
672 }
673 qdf_export_symbol(hal_srng_cleanup);
674 
675 /**
676  * hal_srng_get_entrysize - Returns size of ring entry in bytes
677  * @hal_soc: Opaque HAL SOC handle
678  * @ring_type: one of the types from hal_ring_type
679  *
680  */
681 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
682 {
683 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
684 	struct hal_hw_srng_config *ring_config =
685 		HAL_SRNG_CONFIG(hal, ring_type);
686 	return ring_config->entry_size << 2;
687 }
688 qdf_export_symbol(hal_srng_get_entrysize);
689 
690 /**
691  * hal_srng_max_entries - Returns maximum possible number of ring entries
692  * @hal_soc: Opaque HAL SOC handle
693  * @ring_type: one of the types from hal_ring_type
694  *
695  * Return: Maximum number of entries for the given ring_type
696  */
697 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
698 {
699 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
700 	struct hal_hw_srng_config *ring_config =
701 		HAL_SRNG_CONFIG(hal, ring_type);
702 
703 	return ring_config->max_size / ring_config->entry_size;
704 }
705 qdf_export_symbol(hal_srng_max_entries);
706 
707 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
708 {
709 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
710 	struct hal_hw_srng_config *ring_config =
711 		HAL_SRNG_CONFIG(hal, ring_type);
712 
713 	return ring_config->ring_dir;
714 }
715 
716 /**
717  * hal_srng_dump - Dump ring status
718  * @srng: hal srng pointer
719  */
720 void hal_srng_dump(struct hal_srng *srng)
721 {
722 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
723 		qdf_print("=== SRC RING %d ===", srng->ring_id);
724 		qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u",
725 			  srng->u.src_ring.hp,
726 			  srng->u.src_ring.reap_hp,
727 			  *srng->u.src_ring.tp_addr,
728 			  srng->u.src_ring.cached_tp);
729 	} else {
730 		qdf_print("=== DST RING %d ===", srng->ring_id);
731 		qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u",
732 			  srng->u.dst_ring.tp,
733 			  *srng->u.dst_ring.hp_addr,
734 			  srng->u.dst_ring.cached_hp,
735 			  srng->u.dst_ring.loop_cnt);
736 	}
737 }
738 
739 /**
740  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
741  *
742  * @hal_soc: Opaque HAL SOC handle
743  * @hal_ring: Ring pointer (Source or Destination ring)
744  * @ring_params: SRNG parameters will be returned through this structure
745  */
746 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
747 	struct hal_srng_params *ring_params)
748 {
749 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
750 	int i =0;
751 	ring_params->ring_id = srng->ring_id;
752 	ring_params->ring_dir = srng->ring_dir;
753 	ring_params->entry_size = srng->entry_size;
754 
755 	ring_params->ring_base_paddr = srng->ring_base_paddr;
756 	ring_params->ring_base_vaddr = srng->ring_base_vaddr;
757 	ring_params->num_entries = srng->num_entries;
758 	ring_params->msi_addr = srng->msi_addr;
759 	ring_params->msi_data = srng->msi_data;
760 	ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
761 	ring_params->intr_batch_cntr_thres_entries =
762 		srng->intr_batch_cntr_thres_entries;
763 	ring_params->low_threshold = srng->u.src_ring.low_threshold;
764 	ring_params->flags = srng->flags;
765 	ring_params->ring_id = srng->ring_id;
766 	for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++)
767 		ring_params->hwreg_base[i] = srng->hwreg_base[i];
768 }
769 qdf_export_symbol(hal_get_srng_params);
770