xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "hal_api.h"
21 #include "target_type.h"
22 #include "wcss_version.h"
23 #include "qdf_module.h"
24 #ifdef QCA_WIFI_QCA8074
25 void hal_qca6290_attach(struct hal_soc *hal);
26 #endif
27 #ifdef QCA_WIFI_QCA8074
28 void hal_qca8074_attach(struct hal_soc *hal);
29 #endif
30 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018)
31 void hal_qca8074v2_attach(struct hal_soc *hal);
32 #endif
33 #ifdef QCA_WIFI_QCA6390
34 void hal_qca6390_attach(struct hal_soc *hal);
35 #endif
36 #ifdef QCA_WIFI_QCA6490
37 void hal_qca6490_attach(struct hal_soc *hal);
38 #endif
39 #ifdef QCA_WIFI_QCN9000
40 void hal_qcn9000_attach(struct hal_soc *hal);
41 #endif
42 
43 #ifdef ENABLE_VERBOSE_DEBUG
44 bool is_hal_verbose_debug_enabled;
45 #endif
46 
47 /**
48  * hal_get_srng_ring_id() - get the ring id of a descriped ring
49  * @hal: hal_soc data structure
50  * @ring_type: type enum describing the ring
51  * @ring_num: which ring of the ring type
52  * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings)
53  *
54  * Return: the ring id or -EINVAL if the ring does not exist.
55  */
56 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
57 				int ring_num, int mac_id)
58 {
59 	struct hal_hw_srng_config *ring_config =
60 		HAL_SRNG_CONFIG(hal, ring_type);
61 	int ring_id;
62 
63 	if (ring_num >= ring_config->max_rings) {
64 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
65 			  "%s: ring_num exceeded maximum no. of supported rings",
66 			  __func__);
67 		/* TODO: This is a programming error. Assert if this happens */
68 		return -EINVAL;
69 	}
70 
71 	if (ring_config->lmac_ring) {
72 		ring_id = ring_config->start_ring_id + ring_num +
73 			(mac_id * HAL_MAX_RINGS_PER_LMAC);
74 	} else {
75 		ring_id = ring_config->start_ring_id + ring_num;
76 	}
77 
78 	return ring_id;
79 }
80 
81 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id)
82 {
83 	/* TODO: Should we allocate srng structures dynamically? */
84 	return &(hal->srng_list[ring_id]);
85 }
86 
87 #define HP_OFFSET_IN_REG_START 1
88 #define OFFSET_FROM_HP_TO_TP 4
89 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc,
90 					  int shadow_config_index,
91 					  int ring_type,
92 					  int ring_num)
93 {
94 	struct hal_srng *srng;
95 	int ring_id;
96 	struct hal_hw_srng_config *ring_config =
97 		HAL_SRNG_CONFIG(hal_soc, ring_type);
98 
99 	ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0);
100 	if (ring_id < 0)
101 		return;
102 
103 	srng = hal_get_srng(hal_soc, ring_id);
104 
105 	if (ring_config->ring_dir == HAL_SRNG_DST_RING) {
106 		srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index)
107 			+ hal_soc->dev_base_addr;
108 		hal_debug("tp_addr=%pK dev base addr %pK index %u",
109 			  srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr,
110 			  shadow_config_index);
111 	} else {
112 		srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index)
113 			+ hal_soc->dev_base_addr;
114 		hal_debug("hp_addr=%pK dev base addr %pK index %u",
115 			  srng->u.src_ring.hp_addr,
116 			  hal_soc->dev_base_addr, shadow_config_index);
117 	}
118 
119 }
120 
121 QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
122 				     int ring_type,
123 				     int ring_num)
124 {
125 	uint32_t target_register;
126 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
127 	struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type];
128 	int shadow_config_index = hal->num_shadow_registers_configured;
129 
130 	if (shadow_config_index >= MAX_SHADOW_REGISTERS) {
131 		QDF_ASSERT(0);
132 		return QDF_STATUS_E_RESOURCES;
133 	}
134 
135 	hal->num_shadow_registers_configured++;
136 
137 	target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START];
138 	target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
139 			    *ring_num);
140 
141 	/* if the ring is a dst ring, we need to shadow the tail pointer */
142 	if (srng_config->ring_dir == HAL_SRNG_DST_RING)
143 		target_register += OFFSET_FROM_HP_TO_TP;
144 
145 	hal->shadow_config[shadow_config_index].addr = target_register;
146 
147 	/* update hp/tp addr in the hal_soc structure*/
148 	hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type,
149 				      ring_num);
150 
151 	hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d",
152 		  target_register,
153 		  SHADOW_REGISTER(shadow_config_index),
154 		  shadow_config_index,
155 		  ring_type, ring_num);
156 
157 	return QDF_STATUS_SUCCESS;
158 }
159 
160 qdf_export_symbol(hal_set_one_shadow_config);
161 
162 QDF_STATUS hal_construct_shadow_config(void *hal_soc)
163 {
164 	int ring_type, ring_num;
165 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
166 
167 	for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) {
168 		struct hal_hw_srng_config *srng_config =
169 			&hal->hw_srng_table[ring_type];
170 
171 		if (ring_type == CE_SRC ||
172 		    ring_type == CE_DST ||
173 		    ring_type == CE_DST_STATUS)
174 			continue;
175 
176 		if (srng_config->lmac_ring)
177 			continue;
178 
179 		for (ring_num = 0; ring_num < srng_config->max_rings;
180 		     ring_num++)
181 			hal_set_one_shadow_config(hal_soc, ring_type, ring_num);
182 	}
183 
184 	return QDF_STATUS_SUCCESS;
185 }
186 
187 qdf_export_symbol(hal_construct_shadow_config);
188 
189 void hal_get_shadow_config(void *hal_soc,
190 	struct pld_shadow_reg_v2_cfg **shadow_config,
191 	int *num_shadow_registers_configured)
192 {
193 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
194 
195 	*shadow_config = hal->shadow_config;
196 	*num_shadow_registers_configured =
197 		hal->num_shadow_registers_configured;
198 
199 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
200 			"%s", __func__);
201 }
202 
203 qdf_export_symbol(hal_get_shadow_config);
204 
205 
206 static void hal_validate_shadow_register(struct hal_soc *hal,
207 				  uint32_t *destination,
208 				  uint32_t *shadow_address)
209 {
210 	unsigned int index;
211 	uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr;
212 	int destination_ba_offset =
213 		((char *)destination) - (char *)hal->dev_base_addr;
214 
215 	index =	shadow_address - shadow_0_offset;
216 
217 	if (index >= MAX_SHADOW_REGISTERS) {
218 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
219 			"%s: index %x out of bounds", __func__, index);
220 		goto error;
221 	} else if (hal->shadow_config[index].addr != destination_ba_offset) {
222 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
223 			"%s: sanity check failure, expected %x, found %x",
224 			__func__, destination_ba_offset,
225 			hal->shadow_config[index].addr);
226 		goto error;
227 	}
228 	return;
229 error:
230 	qdf_print("%s: baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x",
231 		  __func__, hal->dev_base_addr, destination, shadow_address,
232 		  shadow_0_offset, index);
233 	QDF_BUG(0);
234 	return;
235 }
236 
237 static void hal_target_based_configure(struct hal_soc *hal)
238 {
239 	switch (hal->target_type) {
240 #ifdef QCA_WIFI_QCA6290
241 	case TARGET_TYPE_QCA6290:
242 		hal->use_register_windowing = true;
243 		hal_qca6290_attach(hal);
244 	break;
245 #endif
246 #ifdef QCA_WIFI_QCA6390
247 	case TARGET_TYPE_QCA6390:
248 		hal->use_register_windowing = true;
249 		hal_qca6390_attach(hal);
250 	break;
251 #endif
252 #ifdef QCA_WIFI_QCA6490
253 	case TARGET_TYPE_QCA6490:
254 		hal->use_register_windowing = true;
255 		hal_qca6490_attach(hal);
256 	break;
257 #endif
258 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0)
259 	case TARGET_TYPE_QCA8074:
260 		hal_qca8074_attach(hal);
261 	break;
262 #endif
263 
264 #if defined(QCA_WIFI_QCA8074V2)
265 	case TARGET_TYPE_QCA8074V2:
266 		hal_qca8074v2_attach(hal);
267 	break;
268 #endif
269 
270 #if defined(QCA_WIFI_QCA6018)
271 	case TARGET_TYPE_QCA6018:
272 		hal_qca8074v2_attach(hal);
273 	break;
274 #endif
275 
276 #ifdef QCA_WIFI_QCN9000
277 	case TARGET_TYPE_QCN9000:
278 		hal->use_register_windowing = true;
279 		hal_qcn9000_attach(hal);
280 	break;
281 #endif
282 	default:
283 	break;
284 	}
285 }
286 
287 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl)
288 {
289 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
290 	struct hif_target_info *tgt_info =
291 		hif_get_target_info_handle(hal_soc->hif_handle);
292 
293 	return tgt_info->target_type;
294 }
295 
296 qdf_export_symbol(hal_get_target_type);
297 
298 /**
299  * hal_attach - Initialize HAL layer
300  * @hif_handle: Opaque HIF handle
301  * @qdf_dev: QDF device
302  *
303  * Return: Opaque HAL SOC handle
304  *		 NULL on failure (if given ring is not available)
305  *
306  * This function should be called as part of HIF initialization (for accessing
307  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
308  *
309  */
310 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev)
311 {
312 	struct hal_soc *hal;
313 	int i;
314 
315 	hal = qdf_mem_malloc(sizeof(*hal));
316 
317 	if (!hal) {
318 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
319 			"%s: hal_soc allocation failed", __func__);
320 		goto fail0;
321 	}
322 	qdf_minidump_log(hal, sizeof(*hal), "hal_soc");
323 	hal->hif_handle = hif_handle;
324 	hal->dev_base_addr = hif_get_dev_ba(hif_handle);
325 	hal->qdf_dev = qdf_dev;
326 	hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
327 		qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
328 		HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
329 	if (!hal->shadow_rdptr_mem_paddr) {
330 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
331 			"%s: hal->shadow_rdptr_mem_paddr allocation failed",
332 			__func__);
333 		goto fail1;
334 	}
335 	qdf_mem_zero(hal->shadow_rdptr_mem_vaddr,
336 		     sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX);
337 
338 	hal->shadow_wrptr_mem_vaddr =
339 		(uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
340 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
341 		&(hal->shadow_wrptr_mem_paddr));
342 	if (!hal->shadow_wrptr_mem_vaddr) {
343 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
344 			"%s: hal->shadow_wrptr_mem_vaddr allocation failed",
345 			__func__);
346 		goto fail2;
347 	}
348 	qdf_mem_zero(hal->shadow_wrptr_mem_vaddr,
349 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS);
350 
351 	for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
352 		hal->srng_list[i].initialized = 0;
353 		hal->srng_list[i].ring_id = i;
354 	}
355 
356 	qdf_spinlock_create(&hal->register_access_lock);
357 	hal->register_window = 0;
358 	hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal));
359 
360 	hal_target_based_configure(hal);
361 
362 	return (void *)hal;
363 
364 fail2:
365 	qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
366 		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
367 		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
368 fail1:
369 	qdf_mem_free(hal);
370 fail0:
371 	return NULL;
372 }
373 qdf_export_symbol(hal_attach);
374 
375 /**
376  * hal_mem_info - Retrieve hal memory base address
377  *
378  * @hal_soc: Opaque HAL SOC handle
379  * @mem: pointer to structure to be updated with hal mem info
380  */
381 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem)
382 {
383 	struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
384 	mem->dev_base_addr = (void *)hal->dev_base_addr;
385         mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr;
386 	mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr;
387         mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr;
388 	mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr;
389 	hif_read_phy_mem_base((void *)hal->hif_handle,
390 			      (qdf_dma_addr_t *)&mem->dev_base_paddr);
391 	return;
392 }
393 qdf_export_symbol(hal_get_meminfo);
394 
395 /**
396  * hal_detach - Detach HAL layer
397  * @hal_soc: HAL SOC handle
398  *
399  * Return: Opaque HAL SOC handle
400  *		 NULL on failure (if given ring is not available)
401  *
402  * This function should be called as part of HIF initialization (for accessing
403  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
404  *
405  */
406 extern void hal_detach(void *hal_soc)
407 {
408 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
409 
410 	qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
411 		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
412 		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
413 	qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
414 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
415 		hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
416 	qdf_minidump_remove(hal);
417 	qdf_mem_free(hal);
418 
419 	return;
420 }
421 qdf_export_symbol(hal_detach);
422 
423 
424 /**
425  * hal_ce_dst_setup - Initialize CE destination ring registers
426  * @hal_soc: HAL SOC handle
427  * @srng: SRNG ring pointer
428  */
429 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
430 				    int ring_num)
431 {
432 	uint32_t reg_val = 0;
433 	uint32_t reg_addr;
434 	struct hal_hw_srng_config *ring_config =
435 		HAL_SRNG_CONFIG(hal, CE_DST);
436 
437 	/* set DEST_MAX_LENGTH according to ce assignment */
438 	reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR(
439 			ring_config->reg_start[R0_INDEX] +
440 			(ring_num * ring_config->reg_size[R0_INDEX]));
441 
442 	reg_val = HAL_REG_READ(hal, reg_addr);
443 	reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
444 	reg_val |= srng->u.dst_ring.max_buffer_length &
445 		HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
446 	HAL_REG_WRITE(hal, reg_addr, reg_val);
447 }
448 
449 /**
450  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
451  * @hal: HAL SOC handle
452  * @read: boolean value to indicate if read or write
453  * @ix0: pointer to store IX0 reg value
454  * @ix1: pointer to store IX1 reg value
455  * @ix2: pointer to store IX2 reg value
456  * @ix3: pointer to store IX3 reg value
457  */
458 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
459 				uint32_t *ix0, uint32_t *ix1,
460 				uint32_t *ix2, uint32_t *ix3)
461 {
462 	uint32_t reg_offset;
463 	struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
464 
465 	if (read) {
466 		if (ix0) {
467 			reg_offset =
468 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
469 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
470 			*ix0 = HAL_REG_READ(hal, reg_offset);
471 		}
472 
473 		if (ix1) {
474 			reg_offset =
475 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR(
476 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
477 			*ix1 = HAL_REG_READ(hal, reg_offset);
478 		}
479 
480 		if (ix2) {
481 			reg_offset =
482 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
483 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
484 			*ix2 = HAL_REG_READ(hal, reg_offset);
485 		}
486 
487 		if (ix3) {
488 			reg_offset =
489 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
490 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
491 			*ix3 = HAL_REG_READ(hal, reg_offset);
492 		}
493 	} else {
494 		if (ix0) {
495 			reg_offset =
496 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
497 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
498 			HAL_REG_WRITE(hal, reg_offset, *ix0);
499 		}
500 
501 		if (ix1) {
502 			reg_offset =
503 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR(
504 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
505 			HAL_REG_WRITE(hal, reg_offset, *ix1);
506 		}
507 
508 		if (ix2) {
509 			reg_offset =
510 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
511 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
512 			HAL_REG_WRITE(hal, reg_offset, *ix2);
513 		}
514 
515 		if (ix3) {
516 			reg_offset =
517 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
518 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
519 			HAL_REG_WRITE(hal, reg_offset, *ix3);
520 		}
521 	}
522 }
523 
524 /**
525  * hal_srng_dst_set_hp_paddr() - Set physical address to dest ring head pointer
526  * @srng: sring pointer
527  * @paddr: physical address
528  */
529 void hal_srng_dst_set_hp_paddr(struct hal_srng *srng,
530 			       uint64_t paddr)
531 {
532 	SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB,
533 			   paddr & 0xffffffff);
534 	SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB,
535 			   paddr >> 32);
536 }
537 
538 /**
539  * hal_srng_dst_init_hp() - Initilaize destination ring head pointer
540  * @srng: sring pointer
541  * @vaddr: virtual address
542  */
543 void hal_srng_dst_init_hp(struct hal_srng *srng,
544 			  uint32_t *vaddr)
545 {
546 	if (!srng)
547 		return;
548 
549 	srng->u.dst_ring.hp_addr = vaddr;
550 	SRNG_DST_REG_WRITE(srng, HP, srng->u.dst_ring.cached_hp);
551 
552 	if (vaddr) {
553 		*srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp;
554 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
555 			  "hp_addr=%pK, cached_hp=%d, hp=%d",
556 			  (void *)srng->u.dst_ring.hp_addr,
557 			  srng->u.dst_ring.cached_hp,
558 			  *srng->u.dst_ring.hp_addr);
559 	}
560 }
561 
562 /**
563  * hal_srng_hw_init - Private function to initialize SRNG HW
564  * @hal_soc: HAL SOC handle
565  * @srng: SRNG ring pointer
566  */
567 static inline void hal_srng_hw_init(struct hal_soc *hal,
568 	struct hal_srng *srng)
569 {
570 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
571 		hal_srng_src_hw_init(hal, srng);
572 	else
573 		hal_srng_dst_hw_init(hal, srng);
574 }
575 
576 #ifdef CONFIG_SHADOW_V2
577 #define ignore_shadow false
578 #define CHECK_SHADOW_REGISTERS true
579 #else
580 #define ignore_shadow true
581 #define CHECK_SHADOW_REGISTERS false
582 #endif
583 
584 /**
585  * hal_srng_setup - Initialize HW SRNG ring.
586  * @hal_soc: Opaque HAL SOC handle
587  * @ring_type: one of the types from hal_ring_type
588  * @ring_num: Ring number if there are multiple rings of same type (staring
589  * from 0)
590  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
591  * @ring_params: SRNG ring params in hal_srng_params structure.
592 
593  * Callers are expected to allocate contiguous ring memory of size
594  * 'num_entries * entry_size' bytes and pass the physical and virtual base
595  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
596  * hal_srng_params structure. Ring base address should be 8 byte aligned
597  * and size of each ring entry should be queried using the API
598  * hal_srng_get_entrysize
599  *
600  * Return: Opaque pointer to ring on success
601  *		 NULL on failure (if given ring is not available)
602  */
603 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
604 	int mac_id, struct hal_srng_params *ring_params)
605 {
606 	int ring_id;
607 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
608 	struct hal_srng *srng;
609 	struct hal_hw_srng_config *ring_config =
610 		HAL_SRNG_CONFIG(hal, ring_type);
611 	void *dev_base_addr;
612 	int i;
613 
614 	ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id);
615 	if (ring_id < 0)
616 		return NULL;
617 
618 	hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id);
619 
620 	srng = hal_get_srng(hal_soc, ring_id);
621 
622 	if (srng->initialized) {
623 		hal_verbose_debug("Ring (ring_type, ring_num) already initialized");
624 		return NULL;
625 	}
626 
627 	dev_base_addr = hal->dev_base_addr;
628 	srng->ring_id = ring_id;
629 	srng->ring_dir = ring_config->ring_dir;
630 	srng->ring_base_paddr = ring_params->ring_base_paddr;
631 	srng->ring_base_vaddr = ring_params->ring_base_vaddr;
632 	srng->entry_size = ring_config->entry_size;
633 	srng->num_entries = ring_params->num_entries;
634 	srng->ring_size = srng->num_entries * srng->entry_size;
635 	srng->ring_size_mask = srng->ring_size - 1;
636 	srng->msi_addr = ring_params->msi_addr;
637 	srng->msi_data = ring_params->msi_data;
638 	srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
639 	srng->intr_batch_cntr_thres_entries =
640 		ring_params->intr_batch_cntr_thres_entries;
641 	srng->hal_soc = hal_soc;
642 
643 	for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
644 		srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
645 			+ (ring_num * ring_config->reg_size[i]);
646 	}
647 
648 	/* Zero out the entire ring memory */
649 	qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
650 		srng->num_entries) << 2);
651 
652 	srng->flags = ring_params->flags;
653 #ifdef BIG_ENDIAN_HOST
654 		/* TODO: See if we should we get these flags from caller */
655 	srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
656 	srng->flags |= HAL_SRNG_MSI_SWAP;
657 	srng->flags |= HAL_SRNG_RING_PTR_SWAP;
658 #endif
659 
660 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
661 		srng->u.src_ring.hp = 0;
662 		srng->u.src_ring.reap_hp = srng->ring_size -
663 			srng->entry_size;
664 		srng->u.src_ring.tp_addr =
665 			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
666 		srng->u.src_ring.low_threshold =
667 			ring_params->low_threshold * srng->entry_size;
668 		if (ring_config->lmac_ring) {
669 			/* For LMAC rings, head pointer updates will be done
670 			 * through FW by writing to a shared memory location
671 			 */
672 			srng->u.src_ring.hp_addr =
673 				&(hal->shadow_wrptr_mem_vaddr[ring_id -
674 					HAL_SRNG_LMAC1_ID_START]);
675 			srng->flags |= HAL_SRNG_LMAC_RING;
676 		} else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) {
677 			srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP);
678 
679 			if (CHECK_SHADOW_REGISTERS) {
680 				QDF_TRACE(QDF_MODULE_ID_TXRX,
681 				    QDF_TRACE_LEVEL_ERROR,
682 				    "%s: Ring (%d, %d) missing shadow config",
683 				    __func__, ring_type, ring_num);
684 			}
685 		} else {
686 			hal_validate_shadow_register(hal,
687 						     SRNG_SRC_ADDR(srng, HP),
688 						     srng->u.src_ring.hp_addr);
689 		}
690 	} else {
691 		/* During initialization loop count in all the descriptors
692 		 * will be set to zero, and HW will set it to 1 on completing
693 		 * descriptor update in first loop, and increments it by 1 on
694 		 * subsequent loops (loop count wraps around after reaching
695 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
696 		 * loop count in descriptors updated by HW (to be processed
697 		 * by SW).
698 		 */
699 		srng->u.dst_ring.loop_cnt = 1;
700 		srng->u.dst_ring.tp = 0;
701 		srng->u.dst_ring.hp_addr =
702 			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
703 		if (ring_config->lmac_ring) {
704 			/* For LMAC rings, tail pointer updates will be done
705 			 * through FW by writing to a shared memory location
706 			 */
707 			srng->u.dst_ring.tp_addr =
708 				&(hal->shadow_wrptr_mem_vaddr[ring_id -
709 				HAL_SRNG_LMAC1_ID_START]);
710 			srng->flags |= HAL_SRNG_LMAC_RING;
711 		} else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) {
712 			srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP);
713 
714 			if (CHECK_SHADOW_REGISTERS) {
715 				QDF_TRACE(QDF_MODULE_ID_TXRX,
716 				    QDF_TRACE_LEVEL_ERROR,
717 				    "%s: Ring (%d, %d) missing shadow config",
718 				    __func__, ring_type, ring_num);
719 			}
720 		} else {
721 			hal_validate_shadow_register(hal,
722 						     SRNG_DST_ADDR(srng, TP),
723 						     srng->u.dst_ring.tp_addr);
724 		}
725 	}
726 
727 	if (!(ring_config->lmac_ring)) {
728 		hal_srng_hw_init(hal, srng);
729 
730 		if (ring_type == CE_DST) {
731 			srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length;
732 			hal_ce_dst_setup(hal, srng, ring_num);
733 		}
734 	}
735 
736 	SRNG_LOCK_INIT(&srng->lock);
737 
738 	srng->srng_event = 0;
739 
740 	srng->initialized = true;
741 
742 	return (void *)srng;
743 }
744 qdf_export_symbol(hal_srng_setup);
745 
746 /**
747  * hal_srng_cleanup - Deinitialize HW SRNG ring.
748  * @hal_soc: Opaque HAL SOC handle
749  * @hal_srng: Opaque HAL SRNG pointer
750  */
751 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
752 {
753 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
754 	SRNG_LOCK_DESTROY(&srng->lock);
755 	srng->initialized = 0;
756 }
757 qdf_export_symbol(hal_srng_cleanup);
758 
759 /**
760  * hal_srng_get_entrysize - Returns size of ring entry in bytes
761  * @hal_soc: Opaque HAL SOC handle
762  * @ring_type: one of the types from hal_ring_type
763  *
764  */
765 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
766 {
767 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
768 	struct hal_hw_srng_config *ring_config =
769 		HAL_SRNG_CONFIG(hal, ring_type);
770 	return ring_config->entry_size << 2;
771 }
772 qdf_export_symbol(hal_srng_get_entrysize);
773 
774 /**
775  * hal_srng_max_entries - Returns maximum possible number of ring entries
776  * @hal_soc: Opaque HAL SOC handle
777  * @ring_type: one of the types from hal_ring_type
778  *
779  * Return: Maximum number of entries for the given ring_type
780  */
781 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
782 {
783 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
784 	struct hal_hw_srng_config *ring_config =
785 		HAL_SRNG_CONFIG(hal, ring_type);
786 
787 	return ring_config->max_size / ring_config->entry_size;
788 }
789 qdf_export_symbol(hal_srng_max_entries);
790 
791 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
792 {
793 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
794 	struct hal_hw_srng_config *ring_config =
795 		HAL_SRNG_CONFIG(hal, ring_type);
796 
797 	return ring_config->ring_dir;
798 }
799 
800 /**
801  * hal_srng_dump - Dump ring status
802  * @srng: hal srng pointer
803  */
804 void hal_srng_dump(struct hal_srng *srng)
805 {
806 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
807 		qdf_print("=== SRC RING %d ===", srng->ring_id);
808 		qdf_print("hp %u, reap_hp %u, tp %u, cached tp %u",
809 			  srng->u.src_ring.hp,
810 			  srng->u.src_ring.reap_hp,
811 			  *srng->u.src_ring.tp_addr,
812 			  srng->u.src_ring.cached_tp);
813 	} else {
814 		qdf_print("=== DST RING %d ===", srng->ring_id);
815 		qdf_print("tp %u, hp %u, cached tp %u, loop_cnt %u",
816 			  srng->u.dst_ring.tp,
817 			  *srng->u.dst_ring.hp_addr,
818 			  srng->u.dst_ring.cached_hp,
819 			  srng->u.dst_ring.loop_cnt);
820 	}
821 }
822 
823 /**
824  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
825  *
826  * @hal_soc: Opaque HAL SOC handle
827  * @hal_ring: Ring pointer (Source or Destination ring)
828  * @ring_params: SRNG parameters will be returned through this structure
829  */
830 extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
831 				hal_ring_handle_t hal_ring_hdl,
832 				struct hal_srng_params *ring_params)
833 {
834 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
835 	int i =0;
836 	ring_params->ring_id = srng->ring_id;
837 	ring_params->ring_dir = srng->ring_dir;
838 	ring_params->entry_size = srng->entry_size;
839 
840 	ring_params->ring_base_paddr = srng->ring_base_paddr;
841 	ring_params->ring_base_vaddr = srng->ring_base_vaddr;
842 	ring_params->num_entries = srng->num_entries;
843 	ring_params->msi_addr = srng->msi_addr;
844 	ring_params->msi_data = srng->msi_data;
845 	ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
846 	ring_params->intr_batch_cntr_thres_entries =
847 		srng->intr_batch_cntr_thres_entries;
848 	ring_params->low_threshold = srng->u.src_ring.low_threshold;
849 	ring_params->flags = srng->flags;
850 	ring_params->ring_id = srng->ring_id;
851 	for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++)
852 		ring_params->hwreg_base[i] = srng->hwreg_base[i];
853 }
854 qdf_export_symbol(hal_get_srng_params);
855