xref: /wlan-dirver/qca-wifi-host-cmn/hal/wifi3.0/hal_srng.c (revision 2ea97ac98512848a8d721c76dddf82576e7c417e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "hal_hw_headers.h"
20 #include "hal_api.h"
21 #include "target_type.h"
22 #include "wcss_version.h"
23 #include "qdf_module.h"
24 
25 #ifdef QCA_WIFI_QCA8074
26 void hal_qca6290_attach(struct hal_soc *hal);
27 #endif
28 #ifdef QCA_WIFI_QCA8074
29 void hal_qca8074_attach(struct hal_soc *hal);
30 #endif
31 #if defined(QCA_WIFI_QCA8074V2) || defined(QCA_WIFI_QCA6018)
32 void hal_qca8074v2_attach(struct hal_soc *hal);
33 #endif
34 #ifdef QCA_WIFI_QCA6390
35 void hal_qca6390_attach(struct hal_soc *hal);
36 #endif
37 #ifdef QCA_WIFI_QCA6490
38 void hal_qca6490_attach(struct hal_soc *hal);
39 #endif
40 #ifdef QCA_WIFI_QCN9000
41 void hal_qcn9000_attach(struct hal_soc *hal);
42 #endif
43 #ifdef QCA_WIFI_QCN6122
44 void hal_qcn6122_attach(struct hal_soc *hal);
45 #endif
46 #ifdef QCA_WIFI_QCA6750
47 void hal_qca6750_attach(struct hal_soc *hal);
48 #endif
49 #ifdef QCA_WIFI_QCA5018
50 void hal_qca5018_attach(struct hal_soc *hal);
51 #endif
52 
53 #ifdef ENABLE_VERBOSE_DEBUG
54 bool is_hal_verbose_debug_enabled;
55 #endif
56 
57 #ifdef ENABLE_HAL_REG_WR_HISTORY
58 struct hal_reg_write_fail_history hal_reg_wr_hist;
59 
60 void hal_reg_wr_fail_history_add(struct hal_soc *hal_soc,
61 				 uint32_t offset,
62 				 uint32_t wr_val, uint32_t rd_val)
63 {
64 	struct hal_reg_write_fail_entry *record;
65 	int idx;
66 
67 	idx = hal_history_get_next_index(&hal_soc->reg_wr_fail_hist->index,
68 					 HAL_REG_WRITE_HIST_SIZE);
69 
70 	record = &hal_soc->reg_wr_fail_hist->record[idx];
71 
72 	record->timestamp = qdf_get_log_timestamp();
73 	record->reg_offset = offset;
74 	record->write_val = wr_val;
75 	record->read_val = rd_val;
76 }
77 
78 static void hal_reg_write_fail_history_init(struct hal_soc *hal)
79 {
80 	hal->reg_wr_fail_hist = &hal_reg_wr_hist;
81 
82 	qdf_atomic_set(&hal->reg_wr_fail_hist->index, -1);
83 }
84 #else
85 static void hal_reg_write_fail_history_init(struct hal_soc *hal)
86 {
87 }
88 #endif
89 
90 /**
91  * hal_get_srng_ring_id() - get the ring id of a descriped ring
92  * @hal: hal_soc data structure
93  * @ring_type: type enum describing the ring
94  * @ring_num: which ring of the ring type
95  * @mac_id: which mac does the ring belong to (or 0 for non-lmac rings)
96  *
97  * Return: the ring id or -EINVAL if the ring does not exist.
98  */
99 static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
100 				int ring_num, int mac_id)
101 {
102 	struct hal_hw_srng_config *ring_config =
103 		HAL_SRNG_CONFIG(hal, ring_type);
104 	int ring_id;
105 
106 	if (ring_num >= ring_config->max_rings) {
107 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
108 			  "%s: ring_num exceeded maximum no. of supported rings",
109 			  __func__);
110 		/* TODO: This is a programming error. Assert if this happens */
111 		return -EINVAL;
112 	}
113 
114 	if (ring_config->lmac_ring) {
115 		ring_id = ring_config->start_ring_id + ring_num +
116 			(mac_id * HAL_MAX_RINGS_PER_LMAC);
117 	} else {
118 		ring_id = ring_config->start_ring_id + ring_num;
119 	}
120 
121 	return ring_id;
122 }
123 
124 static struct hal_srng *hal_get_srng(struct hal_soc *hal, int ring_id)
125 {
126 	/* TODO: Should we allocate srng structures dynamically? */
127 	return &(hal->srng_list[ring_id]);
128 }
129 
130 #define HP_OFFSET_IN_REG_START 1
131 #define OFFSET_FROM_HP_TO_TP 4
132 static void hal_update_srng_hp_tp_address(struct hal_soc *hal_soc,
133 					  int shadow_config_index,
134 					  int ring_type,
135 					  int ring_num)
136 {
137 	struct hal_srng *srng;
138 	int ring_id;
139 	struct hal_hw_srng_config *ring_config =
140 		HAL_SRNG_CONFIG(hal_soc, ring_type);
141 
142 	ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, 0);
143 	if (ring_id < 0)
144 		return;
145 
146 	srng = hal_get_srng(hal_soc, ring_id);
147 
148 	if (ring_config->ring_dir == HAL_SRNG_DST_RING) {
149 		srng->u.dst_ring.tp_addr = SHADOW_REGISTER(shadow_config_index)
150 			+ hal_soc->dev_base_addr;
151 		hal_debug("tp_addr=%pK dev base addr %pK index %u",
152 			  srng->u.dst_ring.tp_addr, hal_soc->dev_base_addr,
153 			  shadow_config_index);
154 	} else {
155 		srng->u.src_ring.hp_addr = SHADOW_REGISTER(shadow_config_index)
156 			+ hal_soc->dev_base_addr;
157 		hal_debug("hp_addr=%pK dev base addr %pK index %u",
158 			  srng->u.src_ring.hp_addr,
159 			  hal_soc->dev_base_addr, shadow_config_index);
160 	}
161 
162 }
163 
164 #ifdef GENERIC_SHADOW_REGISTER_ACCESS_ENABLE
165 void hal_set_one_target_reg_config(struct hal_soc *hal,
166 				   uint32_t target_reg_offset,
167 				   int list_index)
168 {
169 	int i = list_index;
170 
171 	qdf_assert_always(i < MAX_GENERIC_SHADOW_REG);
172 	hal->list_shadow_reg_config[i].target_register =
173 		target_reg_offset;
174 	hal->num_generic_shadow_regs_configured++;
175 }
176 
177 qdf_export_symbol(hal_set_one_target_reg_config);
178 
179 #define REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET 0x4
180 #define MAX_REO_REMAP_SHADOW_REGS 4
181 QDF_STATUS hal_set_shadow_regs(void *hal_soc)
182 {
183 	uint32_t target_reg_offset;
184 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
185 	int i;
186 	struct hal_hw_srng_config *srng_config =
187 		&hal->hw_srng_table[WBM2SW_RELEASE];
188 
189 	target_reg_offset =
190 		HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
191 			SEQ_WCSS_UMAC_REO_REG_OFFSET);
192 
193 	for (i = 0; i < MAX_REO_REMAP_SHADOW_REGS; i++) {
194 		hal_set_one_target_reg_config(hal, target_reg_offset, i);
195 		target_reg_offset += REO_R0_DESTINATION_RING_CTRL_ADDR_OFFSET;
196 	}
197 
198 	target_reg_offset = srng_config->reg_start[HP_OFFSET_IN_REG_START];
199 	target_reg_offset += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
200 			      * HAL_IPA_TX_COMP_RING_IDX);
201 
202 	hal_set_one_target_reg_config(hal, target_reg_offset, i);
203 	return QDF_STATUS_SUCCESS;
204 }
205 
206 qdf_export_symbol(hal_set_shadow_regs);
207 
208 QDF_STATUS hal_construct_shadow_regs(void *hal_soc)
209 {
210 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
211 	int shadow_config_index = hal->num_shadow_registers_configured;
212 	int i;
213 	int num_regs = hal->num_generic_shadow_regs_configured;
214 
215 	for (i = 0; i < num_regs; i++) {
216 		qdf_assert_always(shadow_config_index < MAX_SHADOW_REGISTERS);
217 		hal->shadow_config[shadow_config_index].addr =
218 			hal->list_shadow_reg_config[i].target_register;
219 		hal->list_shadow_reg_config[i].shadow_config_index =
220 			shadow_config_index;
221 		hal->list_shadow_reg_config[i].va =
222 			SHADOW_REGISTER(shadow_config_index) +
223 			(uintptr_t)hal->dev_base_addr;
224 		hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x",
225 			  hal->shadow_config[shadow_config_index].addr,
226 			  SHADOW_REGISTER(shadow_config_index),
227 			  shadow_config_index);
228 		shadow_config_index++;
229 		hal->num_shadow_registers_configured++;
230 	}
231 	return QDF_STATUS_SUCCESS;
232 }
233 
234 qdf_export_symbol(hal_construct_shadow_regs);
235 #endif
236 
237 QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
238 				     int ring_type,
239 				     int ring_num)
240 {
241 	uint32_t target_register;
242 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
243 	struct hal_hw_srng_config *srng_config = &hal->hw_srng_table[ring_type];
244 	int shadow_config_index = hal->num_shadow_registers_configured;
245 
246 	if (shadow_config_index >= MAX_SHADOW_REGISTERS) {
247 		QDF_ASSERT(0);
248 		return QDF_STATUS_E_RESOURCES;
249 	}
250 
251 	hal->num_shadow_registers_configured++;
252 
253 	target_register = srng_config->reg_start[HP_OFFSET_IN_REG_START];
254 	target_register += (srng_config->reg_size[HP_OFFSET_IN_REG_START]
255 			    *ring_num);
256 
257 	/* if the ring is a dst ring, we need to shadow the tail pointer */
258 	if (srng_config->ring_dir == HAL_SRNG_DST_RING)
259 		target_register += OFFSET_FROM_HP_TO_TP;
260 
261 	hal->shadow_config[shadow_config_index].addr = target_register;
262 
263 	/* update hp/tp addr in the hal_soc structure*/
264 	hal_update_srng_hp_tp_address(hal_soc, shadow_config_index, ring_type,
265 				      ring_num);
266 
267 	hal_debug("target_reg %x, shadow register 0x%x shadow_index 0x%x, ring_type %d, ring num %d",
268 		  target_register,
269 		  SHADOW_REGISTER(shadow_config_index),
270 		  shadow_config_index,
271 		  ring_type, ring_num);
272 
273 	return QDF_STATUS_SUCCESS;
274 }
275 
276 qdf_export_symbol(hal_set_one_shadow_config);
277 
278 QDF_STATUS hal_construct_srng_shadow_regs(void *hal_soc)
279 {
280 	int ring_type, ring_num;
281 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
282 
283 	for (ring_type = 0; ring_type < MAX_RING_TYPES; ring_type++) {
284 		struct hal_hw_srng_config *srng_config =
285 			&hal->hw_srng_table[ring_type];
286 
287 		if (ring_type == CE_SRC ||
288 		    ring_type == CE_DST ||
289 		    ring_type == CE_DST_STATUS)
290 			continue;
291 
292 		if (srng_config->lmac_ring)
293 			continue;
294 
295 		for (ring_num = 0; ring_num < srng_config->max_rings;
296 		     ring_num++)
297 			hal_set_one_shadow_config(hal_soc, ring_type, ring_num);
298 	}
299 
300 	return QDF_STATUS_SUCCESS;
301 }
302 
303 qdf_export_symbol(hal_construct_srng_shadow_regs);
304 
305 void hal_get_shadow_config(void *hal_soc,
306 	struct pld_shadow_reg_v2_cfg **shadow_config,
307 	int *num_shadow_registers_configured)
308 {
309 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
310 
311 	*shadow_config = hal->shadow_config;
312 	*num_shadow_registers_configured =
313 		hal->num_shadow_registers_configured;
314 }
315 
316 qdf_export_symbol(hal_get_shadow_config);
317 
318 
319 static void hal_validate_shadow_register(struct hal_soc *hal,
320 				  uint32_t *destination,
321 				  uint32_t *shadow_address)
322 {
323 	unsigned int index;
324 	uint32_t *shadow_0_offset = SHADOW_REGISTER(0) + hal->dev_base_addr;
325 	int destination_ba_offset =
326 		((char *)destination) - (char *)hal->dev_base_addr;
327 
328 	index =	shadow_address - shadow_0_offset;
329 
330 	if (index >= MAX_SHADOW_REGISTERS) {
331 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
332 			"%s: index %x out of bounds", __func__, index);
333 		goto error;
334 	} else if (hal->shadow_config[index].addr != destination_ba_offset) {
335 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
336 			"%s: sanity check failure, expected %x, found %x",
337 			__func__, destination_ba_offset,
338 			hal->shadow_config[index].addr);
339 		goto error;
340 	}
341 	return;
342 error:
343 	qdf_print("baddr %pK, desination %pK, shadow_address %pK s0offset %pK index %x",
344 		  hal->dev_base_addr, destination, shadow_address,
345 		  shadow_0_offset, index);
346 	QDF_BUG(0);
347 	return;
348 }
349 
350 static void hal_target_based_configure(struct hal_soc *hal)
351 {
352 	/**
353 	 * Indicate Initialization of srngs to avoid force wake
354 	 * as umac power collapse is not enabled yet
355 	 */
356 	hal->init_phase = true;
357 
358 	switch (hal->target_type) {
359 #ifdef QCA_WIFI_QCA6290
360 	case TARGET_TYPE_QCA6290:
361 		hal->use_register_windowing = true;
362 		hal_qca6290_attach(hal);
363 	break;
364 #endif
365 #ifdef QCA_WIFI_QCA6390
366 	case TARGET_TYPE_QCA6390:
367 		hal->use_register_windowing = true;
368 		hal_qca6390_attach(hal);
369 	break;
370 #endif
371 #ifdef QCA_WIFI_QCA6490
372 	case TARGET_TYPE_QCA6490:
373 		hal->use_register_windowing = true;
374 		hal_qca6490_attach(hal);
375 	break;
376 #endif
377 #ifdef QCA_WIFI_QCA6750
378 		case TARGET_TYPE_QCA6750:
379 			hal->use_register_windowing = true;
380 			hal->static_window_map = true;
381 			hal_qca6750_attach(hal);
382 		break;
383 #endif
384 #if defined(QCA_WIFI_QCA8074) && defined(WIFI_TARGET_TYPE_3_0)
385 	case TARGET_TYPE_QCA8074:
386 		hal_qca8074_attach(hal);
387 	break;
388 #endif
389 
390 #if defined(QCA_WIFI_QCA8074V2)
391 	case TARGET_TYPE_QCA8074V2:
392 		hal_qca8074v2_attach(hal);
393 	break;
394 #endif
395 
396 #if defined(QCA_WIFI_QCA6018)
397 	case TARGET_TYPE_QCA6018:
398 		hal_qca8074v2_attach(hal);
399 	break;
400 #endif
401 
402 #if defined(QCA_WIFI_QCN6122)
403 	case TARGET_TYPE_QCN6122:
404 		hal->use_register_windowing = true;
405 		/*
406 		 * Static window map  is enabled for qcn9000 to use 2mb bar
407 		 * size and use multiple windows to write into registers.
408 		 */
409 		hal->static_window_map = true;
410 		hal_qcn6122_attach(hal);
411 		break;
412 #endif
413 
414 #ifdef QCA_WIFI_QCN9000
415 	case TARGET_TYPE_QCN9000:
416 		hal->use_register_windowing = true;
417 		/*
418 		 * Static window map  is enabled for qcn9000 to use 2mb bar
419 		 * size and use multiple windows to write into registers.
420 		 */
421 		hal->static_window_map = true;
422 		hal_qcn9000_attach(hal);
423 	break;
424 #endif
425 #ifdef QCA_WIFI_QCA5018
426 	case TARGET_TYPE_QCA5018:
427 		hal->use_register_windowing = true;
428 		hal->static_window_map = true;
429 		hal_qca5018_attach(hal);
430 	break;
431 #endif
432 	default:
433 	break;
434 	}
435 }
436 
437 uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl)
438 {
439 	struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
440 	struct hif_target_info *tgt_info =
441 		hif_get_target_info_handle(hal_soc->hif_handle);
442 
443 	return tgt_info->target_type;
444 }
445 
446 qdf_export_symbol(hal_get_target_type);
447 
448 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
449 	defined(FEATURE_HAL_DELAYED_REG_WRITE_V2)
450 /**
451  * hal_is_reg_write_tput_level_high() - throughput level for delayed reg writes
452  * @hal: hal_soc pointer
453  *
454  * Return: true if throughput is high, else false.
455  */
456 static inline bool hal_is_reg_write_tput_level_high(struct hal_soc *hal)
457 {
458 	int bw_level = hif_get_bandwidth_level(hal->hif_handle);
459 
460 	return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
461 }
462 
463 static inline
464 char *hal_fill_reg_write_srng_stats(struct hal_srng *srng,
465 				    char *buf, qdf_size_t size)
466 {
467 	qdf_scnprintf(buf, size, "enq %u deq %u coal %u direct %u",
468 		      srng->wstats.enqueues, srng->wstats.dequeues,
469 		      srng->wstats.coalesces, srng->wstats.direct);
470 	return buf;
471 }
472 
473 /* bytes for local buffer */
474 #define HAL_REG_WRITE_SRNG_STATS_LEN 100
475 
476 void hal_dump_reg_write_srng_stats(hal_soc_handle_t hal_soc_hdl)
477 {
478 	struct hal_srng *srng;
479 	char buf[HAL_REG_WRITE_SRNG_STATS_LEN];
480 	struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
481 
482 	srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1);
483 	hal_debug("SW2TCL1: %s",
484 		  hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
485 
486 	srng = hal_get_srng(hal, HAL_SRNG_WBM2SW0_RELEASE);
487 	hal_debug("WBM2SW0: %s",
488 		  hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
489 
490 	srng = hal_get_srng(hal, HAL_SRNG_REO2SW1);
491 	hal_debug("REO2SW1: %s",
492 		  hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
493 
494 	srng = hal_get_srng(hal, HAL_SRNG_REO2SW2);
495 	hal_debug("REO2SW2: %s",
496 		  hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
497 
498 	srng = hal_get_srng(hal, HAL_SRNG_REO2SW3);
499 	hal_debug("REO2SW3: %s",
500 		  hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)));
501 }
502 
503 #ifdef FEATURE_HAL_DELAYED_REG_WRITE_V2
504 /**
505  * hal_dump_tcl_stats() - dump the TCL reg write stats
506  * @hal: hal_soc pointer
507  *
508  * Return: None
509  */
510 static inline void hal_dump_tcl_stats(struct hal_soc *hal)
511 {
512 	struct hal_srng *srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1);
513 	uint32_t *hist = hal->tcl_stats.sched_delay;
514 	char buf[HAL_REG_WRITE_SRNG_STATS_LEN];
515 
516 	hal_debug("TCL: %s sched-delay hist %u %u %u %u",
517 		  hal_fill_reg_write_srng_stats(srng, buf, sizeof(buf)),
518 		  hist[REG_WRITE_SCHED_DELAY_SUB_100us],
519 		  hist[REG_WRITE_SCHED_DELAY_SUB_1000us],
520 		  hist[REG_WRITE_SCHED_DELAY_SUB_5000us],
521 		  hist[REG_WRITE_SCHED_DELAY_GT_5000us]);
522 	hal_debug("wq_dly %u wq_dir %u tim_enq %u tim_dir %u enq_tim_cnt %u dir_tim_cnt %u rst_tim_cnt %u",
523 		  hal->tcl_stats.wq_delayed,
524 		  hal->tcl_stats.wq_direct,
525 		  hal->tcl_stats.timer_enq,
526 		  hal->tcl_stats.timer_direct,
527 		  hal->tcl_stats.enq_timer_set,
528 		  hal->tcl_stats.direct_timer_set,
529 		  hal->tcl_stats.timer_reset);
530 }
531 
532 #else
533 static inline void hal_dump_tcl_stats(struct hal_soc *hal)
534 {
535 }
536 #endif
537 
538 void hal_dump_reg_write_stats(hal_soc_handle_t hal_soc_hdl)
539 {
540 	uint32_t *hist;
541 	struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
542 
543 	hist = hal->stats.wstats.sched_delay;
544 	hal_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
545 		  qdf_atomic_read(&hal->stats.wstats.enqueues),
546 		  hal->stats.wstats.dequeues,
547 		  qdf_atomic_read(&hal->stats.wstats.coalesces),
548 		  qdf_atomic_read(&hal->stats.wstats.direct),
549 		  qdf_atomic_read(&hal->stats.wstats.q_depth),
550 		  hal->stats.wstats.max_q_depth,
551 		  hist[REG_WRITE_SCHED_DELAY_SUB_100us],
552 		  hist[REG_WRITE_SCHED_DELAY_SUB_1000us],
553 		  hist[REG_WRITE_SCHED_DELAY_SUB_5000us],
554 		  hist[REG_WRITE_SCHED_DELAY_GT_5000us]);
555 
556 	hal_dump_tcl_stats(hal);
557 }
558 
559 int hal_get_reg_write_pending_work(void *hal_soc)
560 {
561 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
562 
563 	return qdf_atomic_read(&hal->active_work_cnt);
564 }
565 
566 #endif
567 
568 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
569 #ifdef MEMORY_DEBUG
570 /*
571  * Length of the queue(array) used to hold delayed register writes.
572  * Must be a multiple of 2.
573  */
574 #define HAL_REG_WRITE_QUEUE_LEN 128
575 #else
576 #define HAL_REG_WRITE_QUEUE_LEN 32
577 #endif
578 
579 #ifdef FEATURE_HAL_DELAYED_REG_WRITE_V2
580 /**
581  * hal_process_reg_write_q_elem() - process a regiter write queue element
582  * @hal: hal_soc pointer
583  * @q_elem: pointer to hal regiter write queue element
584  *
585  * Return: The value which was written to the address
586  */
587 static uint32_t
588 hal_process_reg_write_q_elem(struct hal_soc *hal,
589 			     struct hal_reg_write_q_elem *q_elem)
590 {
591 	struct hal_srng *srng = q_elem->srng;
592 	uint32_t write_val;
593 
594 	SRNG_LOCK(&srng->lock);
595 	srng->reg_write_in_progress = false;
596 	srng->wstats.dequeues++;
597 
598 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
599 		write_val = srng->u.src_ring.hp;
600 		q_elem->dequeue_val = write_val;
601 		q_elem->valid = 0;
602 		SRNG_UNLOCK(&srng->lock);
603 		hal_write_address_32_mb(hal,
604 					srng->u.src_ring.hp_addr,
605 					write_val, false);
606 	} else {
607 		write_val = srng->u.dst_ring.tp;
608 		q_elem->dequeue_val = write_val;
609 		q_elem->valid = 0;
610 		SRNG_UNLOCK(&srng->lock);
611 		hal_write_address_32_mb(hal,
612 					srng->u.dst_ring.tp_addr,
613 					write_val, false);
614 	}
615 
616 	return write_val;
617 }
618 #else
619 /**
620  * hal_process_reg_write_q_elem() - process a regiter write queue element
621  * @hal: hal_soc pointer
622  * @q_elem: pointer to hal regiter write queue element
623  *
624  * Return: The value which was written to the address
625  */
626 static uint32_t
627 hal_process_reg_write_q_elem(struct hal_soc *hal,
628 			     struct hal_reg_write_q_elem *q_elem)
629 {
630 	struct hal_srng *srng = q_elem->srng;
631 	uint32_t write_val;
632 
633 	SRNG_LOCK(&srng->lock);
634 
635 	srng->reg_write_in_progress = false;
636 	srng->wstats.dequeues++;
637 
638 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
639 		q_elem->dequeue_val = srng->u.src_ring.hp;
640 		hal_write_address_32_mb(hal,
641 					srng->u.src_ring.hp_addr,
642 					srng->u.src_ring.hp, false);
643 		write_val = srng->u.src_ring.hp;
644 	} else {
645 		q_elem->dequeue_val = srng->u.dst_ring.tp;
646 		hal_write_address_32_mb(hal,
647 					srng->u.dst_ring.tp_addr,
648 					srng->u.dst_ring.tp, false);
649 		write_val = srng->u.dst_ring.tp;
650 	}
651 
652 	q_elem->valid = 0;
653 	SRNG_UNLOCK(&srng->lock);
654 
655 	return write_val;
656 }
657 #endif
658 
659 /**
660  * hal_reg_write_fill_sched_delay_hist() - fill reg write delay histogram in hal
661  * @hal: hal_soc pointer
662  * @delay: delay in us
663  *
664  * Return: None
665  */
666 static inline void hal_reg_write_fill_sched_delay_hist(struct hal_soc *hal,
667 						       uint64_t delay_us)
668 {
669 	uint32_t *hist;
670 
671 	hist = hal->stats.wstats.sched_delay;
672 
673 	if (delay_us < 100)
674 		hist[REG_WRITE_SCHED_DELAY_SUB_100us]++;
675 	else if (delay_us < 1000)
676 		hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++;
677 	else if (delay_us < 5000)
678 		hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++;
679 	else
680 		hist[REG_WRITE_SCHED_DELAY_GT_5000us]++;
681 }
682 
683 /**
684  * hal_reg_write_work() - Worker to process delayed writes
685  * @arg: hal_soc pointer
686  *
687  * Return: None
688  */
689 static void hal_reg_write_work(void *arg)
690 {
691 	int32_t q_depth, write_val;
692 	struct hal_soc *hal = arg;
693 	struct hal_reg_write_q_elem *q_elem;
694 	uint64_t delta_us;
695 	uint8_t ring_id;
696 	uint32_t *addr;
697 	uint32_t num_processed = 0;
698 
699 	q_elem = &hal->reg_write_queue[(hal->read_idx)];
700 	q_elem->work_scheduled_time = qdf_get_log_timestamp();
701 
702 	/* Make sure q_elem consistent in the memory for multi-cores */
703 	qdf_rmb();
704 	if (!q_elem->valid)
705 		return;
706 
707 	q_depth = qdf_atomic_read(&hal->stats.wstats.q_depth);
708 	if (q_depth > hal->stats.wstats.max_q_depth)
709 		hal->stats.wstats.max_q_depth =  q_depth;
710 
711 	if (hif_prevent_link_low_power_states(hal->hif_handle)) {
712 		hal->stats.wstats.prevent_l1_fails++;
713 		return;
714 	}
715 
716 	while (true) {
717 		qdf_rmb();
718 		if (!q_elem->valid)
719 			break;
720 
721 		q_elem->dequeue_time = qdf_get_log_timestamp();
722 		ring_id = q_elem->srng->ring_id;
723 		addr = q_elem->addr;
724 		delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
725 						      q_elem->enqueue_time);
726 		hal_reg_write_fill_sched_delay_hist(hal, delta_us);
727 
728 		hal->stats.wstats.dequeues++;
729 		qdf_atomic_dec(&hal->stats.wstats.q_depth);
730 
731 		write_val = hal_process_reg_write_q_elem(hal, q_elem);
732 		hal_verbose_debug("read_idx %u srng 0x%x, addr 0x%pK dequeue_val %u sched delay %llu us",
733 				  hal->read_idx, ring_id, addr, write_val, delta_us);
734 
735 		num_processed++;
736 		hal->read_idx = (hal->read_idx + 1) &
737 					(HAL_REG_WRITE_QUEUE_LEN - 1);
738 		q_elem = &hal->reg_write_queue[(hal->read_idx)];
739 	}
740 
741 	hif_allow_link_low_power_states(hal->hif_handle);
742 	/*
743 	 * Decrement active_work_cnt by the number of elements dequeued after
744 	 * hif_allow_link_low_power_states.
745 	 * This makes sure that hif_try_complete_tasks will wait till we make
746 	 * the bus access in hif_allow_link_low_power_states. This will avoid
747 	 * race condition between delayed register worker and bus suspend
748 	 * (system suspend or runtime suspend).
749 	 *
750 	 * The following decrement should be done at the end!
751 	 */
752 	qdf_atomic_sub(num_processed, &hal->active_work_cnt);
753 }
754 
755 static void __hal_flush_reg_write_work(struct hal_soc *hal)
756 {
757 	qdf_cancel_work(&hal->reg_write_work);
758 
759 }
760 
761 void hal_flush_reg_write_work(hal_soc_handle_t hal_handle)
762 {	__hal_flush_reg_write_work((struct hal_soc *)hal_handle);
763 }
764 
765 /**
766  * hal_reg_write_enqueue() - enqueue register writes into kworker
767  * @hal_soc: hal_soc pointer
768  * @srng: srng pointer
769  * @addr: iomem address of regiter
770  * @value: value to be written to iomem address
771  *
772  * This function executes from within the SRNG LOCK
773  *
774  * Return: None
775  */
776 static void hal_reg_write_enqueue(struct hal_soc *hal_soc,
777 				  struct hal_srng *srng,
778 				  void __iomem *addr,
779 				  uint32_t value)
780 {
781 	struct hal_reg_write_q_elem *q_elem;
782 	uint32_t write_idx;
783 
784 	if (srng->reg_write_in_progress) {
785 		hal_verbose_debug("Already in progress srng ring id 0x%x addr 0x%pK val %u",
786 				  srng->ring_id, addr, value);
787 		qdf_atomic_inc(&hal_soc->stats.wstats.coalesces);
788 		srng->wstats.coalesces++;
789 		return;
790 	}
791 
792 	write_idx = qdf_atomic_inc_return(&hal_soc->write_idx);
793 
794 	write_idx = write_idx & (HAL_REG_WRITE_QUEUE_LEN - 1);
795 
796 	q_elem = &hal_soc->reg_write_queue[write_idx];
797 
798 	if (q_elem->valid) {
799 		hal_err("queue full");
800 		QDF_BUG(0);
801 		return;
802 	}
803 
804 	qdf_atomic_inc(&hal_soc->stats.wstats.enqueues);
805 	srng->wstats.enqueues++;
806 
807 	qdf_atomic_inc(&hal_soc->stats.wstats.q_depth);
808 
809 	q_elem->srng = srng;
810 	q_elem->addr = addr;
811 	q_elem->enqueue_val = value;
812 	q_elem->enqueue_time = qdf_get_log_timestamp();
813 
814 	/*
815 	 * Before the valid flag is set to true, all the other
816 	 * fields in the q_elem needs to be updated in memory.
817 	 * Else there is a chance that the dequeuing worker thread
818 	 * might read stale entries and process incorrect srng.
819 	 */
820 	qdf_wmb();
821 	q_elem->valid = true;
822 
823 	/*
824 	 * After all other fields in the q_elem has been updated
825 	 * in memory successfully, the valid flag needs to be updated
826 	 * in memory in time too.
827 	 * Else there is a chance that the dequeuing worker thread
828 	 * might read stale valid flag and the work will be bypassed
829 	 * for this round. And if there is no other work scheduled
830 	 * later, this hal register writing won't be updated any more.
831 	 */
832 	qdf_wmb();
833 
834 	srng->reg_write_in_progress  = true;
835 	qdf_atomic_inc(&hal_soc->active_work_cnt);
836 
837 	hal_verbose_debug("write_idx %u srng ring id 0x%x addr 0x%pK val %u",
838 			  write_idx, srng->ring_id, addr, value);
839 
840 	qdf_queue_work(hal_soc->qdf_dev, hal_soc->reg_write_wq,
841 		       &hal_soc->reg_write_work);
842 }
843 
844 /**
845  * hal_delayed_reg_write_init() - Initialization function for delayed reg writes
846  * @hal_soc: hal_soc pointer
847  *
848  * Initialize main data structures to process register writes in a delayed
849  * workqueue.
850  *
851  * Return: QDF_STATUS_SUCCESS on success else a QDF error.
852  */
853 static QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal)
854 {
855 	hal->reg_write_wq =
856 		qdf_alloc_high_prior_ordered_workqueue("hal_register_write_wq");
857 	qdf_create_work(0, &hal->reg_write_work, hal_reg_write_work, hal);
858 	hal->reg_write_queue = qdf_mem_malloc(HAL_REG_WRITE_QUEUE_LEN *
859 					      sizeof(*hal->reg_write_queue));
860 	if (!hal->reg_write_queue) {
861 		hal_err("unable to allocate memory");
862 		QDF_BUG(0);
863 		return QDF_STATUS_E_NOMEM;
864 	}
865 
866 	/* Initial value of indices */
867 	hal->read_idx = 0;
868 	qdf_atomic_set(&hal->write_idx, -1);
869 	return QDF_STATUS_SUCCESS;
870 }
871 
872 /**
873  * hal_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
874  * @hal_soc: hal_soc pointer
875  *
876  * De-initialize main data structures to process register writes in a delayed
877  * workqueue.
878  *
879  * Return: None
880  */
881 static void hal_delayed_reg_write_deinit(struct hal_soc *hal)
882 {
883 	__hal_flush_reg_write_work(hal);
884 
885 	qdf_flush_workqueue(0, hal->reg_write_wq);
886 	qdf_destroy_workqueue(0, hal->reg_write_wq);
887 	qdf_mem_free(hal->reg_write_queue);
888 }
889 
890 #else
891 static inline QDF_STATUS hal_delayed_reg_write_init(struct hal_soc *hal)
892 {
893 	return QDF_STATUS_SUCCESS;
894 }
895 
896 static inline void hal_delayed_reg_write_deinit(struct hal_soc *hal)
897 {
898 }
899 #endif
900 
901 #ifdef FEATURE_HAL_DELAYED_REG_WRITE_V2
902 #ifdef MEMORY_DEBUG
903 /**
904  * hal_reg_write_get_timestamp() - Function to get the timestamp
905  *
906  * Return: return present simestamp
907  */
908 static inline qdf_time_t hal_del_reg_write_get_ts(void)
909 {
910 	return qdf_get_log_timestamp();
911 }
912 
913 /**
914  * hal_del_reg_write_ts_usecs() - Convert the timestamp to micro secs
915  * @ts: timestamp value to be converted
916  *
917  * Return: return the timestamp in micro secs
918  */
919 static inline qdf_time_t hal_del_reg_write_ts_usecs(qdf_time_t ts)
920 {
921 	return qdf_log_timestamp_to_usecs(ts);
922 }
923 
924 /**
925  * hal_tcl_write_fill_sched_delay_hist() - fill TCL reg write delay histogram
926  * @hal: hal_soc pointer
927  * @delay: delay in us
928  *
929  * Return: None
930  */
931 static inline void hal_tcl_write_fill_sched_delay_hist(struct hal_soc *hal)
932 {
933 	uint32_t *hist;
934 	uint32_t delay_us;
935 
936 	hal->tcl_stats.deq_time = hal_del_reg_write_get_ts();
937 	delay_us = hal_del_reg_write_ts_usecs(hal->tcl_stats.deq_time -
938 					      hal->tcl_stats.enq_time);
939 
940 	hist = hal->tcl_stats.sched_delay;
941 	if (delay_us < 100)
942 		hist[REG_WRITE_SCHED_DELAY_SUB_100us]++;
943 	else if (delay_us < 1000)
944 		hist[REG_WRITE_SCHED_DELAY_SUB_1000us]++;
945 	else if (delay_us < 5000)
946 		hist[REG_WRITE_SCHED_DELAY_SUB_5000us]++;
947 	else
948 		hist[REG_WRITE_SCHED_DELAY_GT_5000us]++;
949 }
950 
951 #else
952 static inline qdf_time_t hal_del_reg_write_get_ts(void)
953 {
954 	return 0;
955 }
956 
957 static inline qdf_time_t hal_del_reg_write_ts_usecs(qdf_time_t ts)
958 {
959 	return 0;
960 }
961 
962 static inline void hal_tcl_write_fill_sched_delay_hist(struct hal_soc *hal)
963 {
964 }
965 #endif
966 
967 /**
968  * hal_tcl_reg_write_work() - Worker to process delayed SW2TCL1 writes
969  * @arg: hal_soc pointer
970  *
971  * Return: None
972  */
973 static void hal_tcl_reg_write_work(void *arg)
974 {
975 	struct hal_soc *hal = arg;
976 	struct hal_srng *srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1);
977 
978 	SRNG_LOCK(&srng->lock);
979 	srng->wstats.dequeues++;
980 	hal_tcl_write_fill_sched_delay_hist(hal);
981 
982 	/*
983 	 * During the tranition of low to high tput scenario, reg write moves
984 	 * from delayed to direct write context, there is a little chance that
985 	 * worker thread gets scheduled later than direct context write which
986 	 * already wrote the latest HP value. This check can catch that case
987 	 * and avoid the repetitive writing of the same HP value.
988 	 */
989 	if (srng->last_reg_wr_val != srng->u.src_ring.hp) {
990 		srng->last_reg_wr_val = srng->u.src_ring.hp;
991 		if (hal->tcl_direct) {
992 			/*
993 			 * TCL reg writes have been moved to direct context and
994 			 * the assumption is that PCIe bus stays in Active state
995 			 * during high tput, hence its fine to write the HP
996 			 * while the SRNG_LOCK is being held.
997 			 */
998 			hal->tcl_stats.wq_direct++;
999 			hal_write_address_32_mb(hal, srng->u.src_ring.hp_addr,
1000 						srng->last_reg_wr_val, false);
1001 			srng->reg_write_in_progress = false;
1002 			SRNG_UNLOCK(&srng->lock);
1003 		} else {
1004 			/*
1005 			 * TCL reg write to happen in delayed context,
1006 			 * write operation might take time due to possibility of
1007 			 * PCIe bus stays in low power state during low tput,
1008 			 * Hence release the SRNG_LOCK before writing.
1009 			 */
1010 			hal->tcl_stats.wq_delayed++;
1011 			srng->reg_write_in_progress = false;
1012 			SRNG_UNLOCK(&srng->lock);
1013 			hal_write_address_32_mb(hal, srng->u.src_ring.hp_addr,
1014 						srng->last_reg_wr_val, false);
1015 		}
1016 	} else {
1017 		srng->reg_write_in_progress = false;
1018 		SRNG_UNLOCK(&srng->lock);
1019 	}
1020 
1021 	/*
1022 	 * Decrement active_work_cnt to make sure that hif_try_complete_tasks
1023 	 * will wait. This will avoid race condition between delayed register
1024 	 * worker and bus suspend (system suspend or runtime suspend).
1025 	 *
1026 	 * The following decrement should be done at the end!
1027 	 */
1028 	qdf_atomic_dec(&hal->active_work_cnt);
1029 	qdf_atomic_set(&hal->tcl_work_active, false);
1030 }
1031 
1032 static void __hal_flush_tcl_reg_write_work(struct hal_soc *hal)
1033 {
1034 	qdf_cancel_work(&hal->tcl_reg_write_work);
1035 }
1036 
1037 /**
1038  * hal_tcl_reg_write_enqueue() - enqueue TCL register writes into kworker
1039  * @hal_soc: hal_soc pointer
1040  * @srng: srng pointer
1041  * @addr: iomem address of regiter
1042  * @value: value to be written to iomem address
1043  *
1044  * This function executes from within the SRNG LOCK
1045  *
1046  * Return: None
1047  */
1048 static void hal_tcl_reg_write_enqueue(struct hal_soc *hal_soc,
1049 				      struct hal_srng *srng,
1050 				      void __iomem *addr,
1051 				      uint32_t value)
1052 {
1053 	hal_soc->tcl_stats.enq_time = hal_del_reg_write_get_ts();
1054 
1055 	if (qdf_queue_work(hal_soc->qdf_dev, hal_soc->tcl_reg_write_wq,
1056 			   &hal_soc->tcl_reg_write_work)) {
1057 		srng->reg_write_in_progress  = true;
1058 		qdf_atomic_inc(&hal_soc->active_work_cnt);
1059 		qdf_atomic_set(&hal_soc->tcl_work_active, true);
1060 		srng->wstats.enqueues++;
1061 	} else {
1062 		hal_soc->tcl_stats.enq_timer_set++;
1063 		qdf_timer_mod(&hal_soc->tcl_reg_write_timer, 1);
1064 	}
1065 }
1066 
1067 /**
1068  * hal_tcl_reg_write_timer() - timer handler to take care of pending TCL writes
1069  * @arg: srng handle
1070  *
1071  * This function handles the pending TCL reg writes missed due to the previous
1072  * scheduled worker running.
1073  *
1074  * Return: None
1075  */
1076 static void hal_tcl_reg_write_timer(void *arg)
1077 {
1078 	hal_ring_handle_t srng_hdl = arg;
1079 	struct hal_srng *srng;
1080 	struct hal_soc *hal;
1081 
1082 	srng = (struct hal_srng *)srng_hdl;
1083 	hal = srng->hal_soc;
1084 
1085 	if (hif_pm_runtime_get(hal->hif_handle, RTPM_ID_DW_TX_HW_ENQUEUE,
1086 			       true)) {
1087 		hal_srng_set_event(srng_hdl, HAL_SRNG_FLUSH_EVENT);
1088 		hal_srng_inc_flush_cnt(srng_hdl);
1089 		goto fail;
1090 	}
1091 
1092 	SRNG_LOCK(&srng->lock);
1093 	if (hal->tcl_direct) {
1094 		/*
1095 		 * Due to the previous scheduled worker still running,
1096 		 * direct reg write cannot be performed, so posted the
1097 		 * pending writes to timer context.
1098 		 */
1099 		if (srng->last_reg_wr_val != srng->u.src_ring.hp) {
1100 			srng->last_reg_wr_val = srng->u.src_ring.hp;
1101 			srng->wstats.direct++;
1102 			hal->tcl_stats.timer_direct++;
1103 			hal_write_address_32_mb(hal, srng->u.src_ring.hp_addr,
1104 						srng->last_reg_wr_val, false);
1105 		}
1106 	} else {
1107 		/*
1108 		 * Due to the previous scheduled worker still running,
1109 		 * queue_work from delayed context would fail,
1110 		 * so retry from timer context.
1111 		 */
1112 		if (qdf_queue_work(hal->qdf_dev, hal->tcl_reg_write_wq,
1113 				   &hal->tcl_reg_write_work)) {
1114 			srng->reg_write_in_progress  = true;
1115 			qdf_atomic_inc(&hal->active_work_cnt);
1116 			qdf_atomic_set(&hal->tcl_work_active, true);
1117 			srng->wstats.enqueues++;
1118 			hal->tcl_stats.timer_enq++;
1119 		} else {
1120 			if (srng->last_reg_wr_val != srng->u.src_ring.hp) {
1121 				hal->tcl_stats.timer_reset++;
1122 				qdf_timer_mod(&hal->tcl_reg_write_timer, 1);
1123 			}
1124 		}
1125 	}
1126 	SRNG_UNLOCK(&srng->lock);
1127 	hif_pm_runtime_put(hal->hif_handle, RTPM_ID_DW_TX_HW_ENQUEUE);
1128 
1129 fail:
1130 	return;
1131 }
1132 
1133 /**
1134  * hal_delayed_tcl_reg_write_init() - Initialization for delayed TCL reg writes
1135  * @hal_soc: hal_soc pointer
1136  *
1137  * Initialize main data structures to process TCL register writes in a delayed
1138  * workqueue.
1139  *
1140  * Return: QDF_STATUS_SUCCESS on success else a QDF error.
1141  */
1142 static QDF_STATUS hal_delayed_tcl_reg_write_init(struct hal_soc *hal)
1143 {
1144 	struct hal_srng *srng = hal_get_srng(hal, HAL_SRNG_SW2TCL1);
1145 	QDF_STATUS status;
1146 
1147 	hal->tcl_reg_write_wq =
1148 		qdf_alloc_high_prior_ordered_workqueue("hal_tcl_reg_write_wq");
1149 	if (!hal->tcl_reg_write_wq) {
1150 		hal_err("hal_tcl_reg_write_wq alloc failed");
1151 		return QDF_STATUS_E_NOMEM;
1152 	}
1153 
1154 	status = qdf_create_work(0, &hal->tcl_reg_write_work,
1155 				 hal_tcl_reg_write_work, hal);
1156 	if (status != QDF_STATUS_SUCCESS) {
1157 		hal_err("tcl_reg_write_work create failed");
1158 		goto fail;
1159 	}
1160 
1161 	status = qdf_timer_init(hal->qdf_dev, &hal->tcl_reg_write_timer,
1162 				hal_tcl_reg_write_timer, (void *)srng,
1163 				QDF_TIMER_TYPE_WAKE_APPS);
1164 	if (status != QDF_STATUS_SUCCESS) {
1165 		hal_err("tcl_reg_write_timer init failed");
1166 		goto fail;
1167 	}
1168 
1169 	qdf_atomic_init(&hal->tcl_work_active);
1170 
1171 	return QDF_STATUS_SUCCESS;
1172 
1173 fail:
1174 	qdf_destroy_workqueue(0, hal->tcl_reg_write_wq);
1175 	return status;
1176 }
1177 
1178 /**
1179  * hal_delayed_tcl_reg_write_deinit() - De-Initialize delayed TCL reg writes
1180  * @hal_soc: hal_soc pointer
1181  *
1182  * De-initialize main data structures to process TCL register writes in a
1183  * delayed workqueue.
1184  *
1185  * Return: None
1186  */
1187 static void hal_delayed_tcl_reg_write_deinit(struct hal_soc *hal)
1188 {
1189 	qdf_timer_stop(&hal->tcl_reg_write_timer);
1190 	qdf_timer_free(&hal->tcl_reg_write_timer);
1191 
1192 	__hal_flush_tcl_reg_write_work(hal);
1193 	qdf_flush_workqueue(0, hal->tcl_reg_write_wq);
1194 	qdf_destroy_workqueue(0, hal->tcl_reg_write_wq);
1195 }
1196 
1197 #else
1198 static inline QDF_STATUS hal_delayed_tcl_reg_write_init(struct hal_soc *hal)
1199 {
1200 	return QDF_STATUS_SUCCESS;
1201 }
1202 
1203 static inline void hal_delayed_tcl_reg_write_deinit(struct hal_soc *hal)
1204 {
1205 }
1206 #endif
1207 
1208 #ifdef FEATURE_HAL_DELAYED_REG_WRITE_V2
1209 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1210 static inline void hal_reg_write_enqueue_v2(struct hal_soc *hal_soc,
1211 					    struct hal_srng *srng,
1212 					    void __iomem *addr,
1213 					    uint32_t value)
1214 {
1215 	hal_reg_write_enqueue(hal_soc, srng, addr, value);
1216 }
1217 #else
1218 static inline void hal_reg_write_enqueue_v2(struct hal_soc *hal_soc,
1219 					    struct hal_srng *srng,
1220 					    void __iomem *addr,
1221 					    uint32_t value)
1222 {
1223 	qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1224 	srng->wstats.direct++;
1225 	hal_write_address_32_mb(hal_soc, addr, value, false);
1226 }
1227 #endif
1228 
1229 void hal_delayed_reg_write(struct hal_soc *hal_soc,
1230 			   struct hal_srng *srng,
1231 			   void __iomem *addr,
1232 			   uint32_t value)
1233 {
1234 	switch (srng->ring_type) {
1235 	case TCL_DATA:
1236 		if (hal_is_reg_write_tput_level_high(hal_soc)) {
1237 			hal_soc->tcl_direct = true;
1238 			if (srng->reg_write_in_progress ||
1239 			    !qdf_atomic_read(&hal_soc->tcl_work_active)) {
1240 				/*
1241 				 * Now the delayed work have either completed
1242 				 * the writing or not even scheduled and would
1243 				 * be blocked by SRNG_LOCK, hence it is fine to
1244 				 * do direct write here.
1245 				 */
1246 				srng->last_reg_wr_val = srng->u.src_ring.hp;
1247 				srng->wstats.direct++;
1248 				hal_write_address_32_mb(hal_soc, addr,
1249 							srng->last_reg_wr_val,
1250 							false);
1251 			} else {
1252 				hal_soc->tcl_stats.direct_timer_set++;
1253 				qdf_timer_mod(&hal_soc->tcl_reg_write_timer, 1);
1254 			}
1255 		} else {
1256 			hal_soc->tcl_direct = false;
1257 			if (srng->reg_write_in_progress) {
1258 				srng->wstats.coalesces++;
1259 			} else {
1260 				hal_tcl_reg_write_enqueue(hal_soc, srng,
1261 							  addr, value);
1262 			}
1263 		}
1264 		break;
1265 	case CE_SRC:
1266 	case CE_DST:
1267 	case CE_DST_STATUS:
1268 		hal_reg_write_enqueue_v2(hal_soc, srng, addr, value);
1269 		break;
1270 	default:
1271 		qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1272 		srng->wstats.direct++;
1273 		hal_write_address_32_mb(hal_soc, addr, value, false);
1274 		break;
1275 	}
1276 }
1277 
1278 #else
1279 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1280 #ifdef QCA_WIFI_QCA6750
1281 void hal_delayed_reg_write(struct hal_soc *hal_soc,
1282 			   struct hal_srng *srng,
1283 			   void __iomem *addr,
1284 			   uint32_t value)
1285 {
1286 	switch (srng->ring_type) {
1287 	case CE_SRC:
1288 	case CE_DST:
1289 	case CE_DST_STATUS:
1290 		if (hif_get_ep_vote_access(hal_soc->hif_handle,
1291 					   HIF_EP_VOTE_NONDP_ACCESS) ==
1292 					   HIF_EP_VOTE_ACCESS_DISABLE) {
1293 			hal_write_address_32_mb(hal_soc, addr, value, false);
1294 			qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1295 			srng->wstats.direct++;
1296 		} else {
1297 			hal_reg_write_enqueue(hal_soc, srng, addr, value);
1298 		}
1299 		break;
1300 	default:
1301 		if (hif_get_ep_vote_access(hal_soc->hif_handle,
1302 		    HIF_EP_VOTE_DP_ACCESS) ==
1303 		    HIF_EP_VOTE_ACCESS_DISABLE ||
1304 		    hal_is_reg_write_tput_level_high(hal_soc) ||
1305 		    PLD_MHI_STATE_L0 ==
1306 		    pld_get_mhi_state(hal_soc->qdf_dev->dev)) {
1307 			hal_write_address_32_mb(hal_soc, addr, value, false);
1308 			qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1309 			srng->wstats.direct++;
1310 		} else {
1311 			hal_reg_write_enqueue(hal_soc, srng, addr, value);
1312 		}
1313 
1314 		break;
1315 	}
1316 }
1317 #else
1318 void hal_delayed_reg_write(struct hal_soc *hal_soc,
1319 			   struct hal_srng *srng,
1320 			   void __iomem *addr,
1321 			   uint32_t value)
1322 {
1323 	if (pld_is_device_awake(hal_soc->qdf_dev->dev) ||
1324 	    hal_is_reg_write_tput_level_high(hal_soc)) {
1325 		qdf_atomic_inc(&hal_soc->stats.wstats.direct);
1326 		srng->wstats.direct++;
1327 		hal_write_address_32_mb(hal_soc, addr, value, false);
1328 	} else {
1329 		hal_reg_write_enqueue(hal_soc, srng, addr, value);
1330 	}
1331 }
1332 #endif
1333 #endif
1334 #endif
1335 
1336 /**
1337  * hal_attach - Initialize HAL layer
1338  * @hif_handle: Opaque HIF handle
1339  * @qdf_dev: QDF device
1340  *
1341  * Return: Opaque HAL SOC handle
1342  *		 NULL on failure (if given ring is not available)
1343  *
1344  * This function should be called as part of HIF initialization (for accessing
1345  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
1346  *
1347  */
1348 void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev)
1349 {
1350 	struct hal_soc *hal;
1351 	int i;
1352 
1353 	hal = qdf_mem_malloc(sizeof(*hal));
1354 
1355 	if (!hal) {
1356 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1357 			"%s: hal_soc allocation failed", __func__);
1358 		goto fail0;
1359 	}
1360 	hal->hif_handle = hif_handle;
1361 	hal->dev_base_addr = hif_get_dev_ba(hif_handle); /* UMAC */
1362 	hal->dev_base_addr_ce = hif_get_dev_ba_ce(hif_handle); /* CE */
1363 	hal->qdf_dev = qdf_dev;
1364 	hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
1365 		qdf_dev, qdf_dev->dev, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
1366 		HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
1367 	if (!hal->shadow_rdptr_mem_paddr) {
1368 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1369 			"%s: hal->shadow_rdptr_mem_paddr allocation failed",
1370 			__func__);
1371 		goto fail1;
1372 	}
1373 	qdf_mem_zero(hal->shadow_rdptr_mem_vaddr,
1374 		     sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX);
1375 
1376 	hal->shadow_wrptr_mem_vaddr =
1377 		(uint32_t *)qdf_mem_alloc_consistent(qdf_dev, qdf_dev->dev,
1378 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
1379 		&(hal->shadow_wrptr_mem_paddr));
1380 	if (!hal->shadow_wrptr_mem_vaddr) {
1381 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1382 			"%s: hal->shadow_wrptr_mem_vaddr allocation failed",
1383 			__func__);
1384 		goto fail2;
1385 	}
1386 	qdf_mem_zero(hal->shadow_wrptr_mem_vaddr,
1387 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS);
1388 
1389 	for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
1390 		hal->srng_list[i].initialized = 0;
1391 		hal->srng_list[i].ring_id = i;
1392 	}
1393 
1394 	qdf_spinlock_create(&hal->register_access_lock);
1395 	hal->register_window = 0;
1396 	hal->target_type = hal_get_target_type(hal_soc_to_hal_soc_handle(hal));
1397 
1398 	hal_target_based_configure(hal);
1399 
1400 	hal_reg_write_fail_history_init(hal);
1401 
1402 	qdf_minidump_log(hal, sizeof(*hal), "hal_soc");
1403 
1404 	qdf_atomic_init(&hal->active_work_cnt);
1405 	hal_delayed_reg_write_init(hal);
1406 	hal_delayed_tcl_reg_write_init(hal);
1407 
1408 	return (void *)hal;
1409 
1410 fail2:
1411 	qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
1412 		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
1413 		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
1414 fail1:
1415 	qdf_mem_free(hal);
1416 fail0:
1417 	return NULL;
1418 }
1419 qdf_export_symbol(hal_attach);
1420 
1421 /**
1422  * hal_mem_info - Retrieve hal memory base address
1423  *
1424  * @hal_soc: Opaque HAL SOC handle
1425  * @mem: pointer to structure to be updated with hal mem info
1426  */
1427 void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem)
1428 {
1429 	struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
1430 	mem->dev_base_addr = (void *)hal->dev_base_addr;
1431         mem->shadow_rdptr_mem_vaddr = (void *)hal->shadow_rdptr_mem_vaddr;
1432 	mem->shadow_wrptr_mem_vaddr = (void *)hal->shadow_wrptr_mem_vaddr;
1433         mem->shadow_rdptr_mem_paddr = (void *)hal->shadow_rdptr_mem_paddr;
1434 	mem->shadow_wrptr_mem_paddr = (void *)hal->shadow_wrptr_mem_paddr;
1435 	hif_read_phy_mem_base((void *)hal->hif_handle,
1436 			      (qdf_dma_addr_t *)&mem->dev_base_paddr);
1437 	return;
1438 }
1439 qdf_export_symbol(hal_get_meminfo);
1440 
1441 /**
1442  * hal_detach - Detach HAL layer
1443  * @hal_soc: HAL SOC handle
1444  *
1445  * Return: Opaque HAL SOC handle
1446  *		 NULL on failure (if given ring is not available)
1447  *
1448  * This function should be called as part of HIF initialization (for accessing
1449  * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
1450  *
1451  */
1452 extern void hal_detach(void *hal_soc)
1453 {
1454 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1455 
1456 	hal_delayed_reg_write_deinit(hal);
1457 	hal_delayed_tcl_reg_write_deinit(hal);
1458 
1459 	qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
1460 		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
1461 		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
1462 	qdf_mem_free_consistent(hal->qdf_dev, hal->qdf_dev->dev,
1463 		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
1464 		hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
1465 	qdf_minidump_remove(hal, sizeof(*hal), "hal_soc");
1466 	qdf_mem_free(hal);
1467 
1468 	return;
1469 }
1470 qdf_export_symbol(hal_detach);
1471 
1472 /**
1473  * hal_ce_dst_setup - Initialize CE destination ring registers
1474  * @hal_soc: HAL SOC handle
1475  * @srng: SRNG ring pointer
1476  */
1477 static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
1478 				    int ring_num)
1479 {
1480 	uint32_t reg_val = 0;
1481 	uint32_t reg_addr;
1482 	struct hal_hw_srng_config *ring_config =
1483 		HAL_SRNG_CONFIG(hal, CE_DST);
1484 
1485 	/* set DEST_MAX_LENGTH according to ce assignment */
1486 	reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_ADDR(
1487 			ring_config->reg_start[R0_INDEX] +
1488 			(ring_num * ring_config->reg_size[R0_INDEX]));
1489 
1490 	reg_val = HAL_REG_READ(hal, reg_addr);
1491 	reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
1492 	reg_val |= srng->u.dst_ring.max_buffer_length &
1493 		HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_CTRL_DEST_MAX_LENGTH_BMSK;
1494 	HAL_REG_WRITE(hal, reg_addr, reg_val);
1495 
1496 	if (srng->prefetch_timer) {
1497 		reg_addr = HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_CONSUMER_PREFETCH_TIMER_ADDR(
1498 				ring_config->reg_start[R0_INDEX] +
1499 				(ring_num * ring_config->reg_size[R0_INDEX]));
1500 
1501 		reg_val = HAL_REG_READ(hal, reg_addr);
1502 		reg_val &= ~HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_CONSUMER_PREFETCH_TIMER_RMSK;
1503 		reg_val |= srng->prefetch_timer;
1504 		HAL_REG_WRITE(hal, reg_addr, reg_val);
1505 		reg_val = HAL_REG_READ(hal, reg_addr);
1506 	}
1507 
1508 }
1509 
1510 /**
1511  * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
1512  * @hal: HAL SOC handle
1513  * @read: boolean value to indicate if read or write
1514  * @ix0: pointer to store IX0 reg value
1515  * @ix1: pointer to store IX1 reg value
1516  * @ix2: pointer to store IX2 reg value
1517  * @ix3: pointer to store IX3 reg value
1518  */
1519 void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
1520 				uint32_t *ix0, uint32_t *ix1,
1521 				uint32_t *ix2, uint32_t *ix3)
1522 {
1523 	uint32_t reg_offset;
1524 	struct hal_soc *hal = (struct hal_soc *)hal_soc_hdl;
1525 
1526 	if (read) {
1527 		if (ix0) {
1528 			reg_offset =
1529 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
1530 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1531 			*ix0 = HAL_REG_READ(hal, reg_offset);
1532 		}
1533 
1534 		if (ix1) {
1535 			reg_offset =
1536 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR(
1537 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1538 			*ix1 = HAL_REG_READ(hal, reg_offset);
1539 		}
1540 
1541 		if (ix2) {
1542 			reg_offset =
1543 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
1544 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1545 			*ix2 = HAL_REG_READ(hal, reg_offset);
1546 		}
1547 
1548 		if (ix3) {
1549 			reg_offset =
1550 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
1551 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1552 			*ix3 = HAL_REG_READ(hal, reg_offset);
1553 		}
1554 	} else {
1555 		if (ix0) {
1556 			reg_offset =
1557 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
1558 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1559 			HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1560 						    *ix0, true);
1561 		}
1562 
1563 		if (ix1) {
1564 			reg_offset =
1565 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_1_ADDR(
1566 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1567 			HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1568 						    *ix1, true);
1569 		}
1570 
1571 		if (ix2) {
1572 			reg_offset =
1573 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
1574 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1575 			HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1576 						    *ix2, true);
1577 		}
1578 
1579 		if (ix3) {
1580 			reg_offset =
1581 				HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
1582 						SEQ_WCSS_UMAC_REO_REG_OFFSET);
1583 			HAL_REG_WRITE_CONFIRM_RETRY(hal, reg_offset,
1584 						    *ix3, true);
1585 		}
1586 	}
1587 }
1588 
1589 /**
1590  * hal_srng_dst_set_hp_paddr_confirm() - Set physical address to dest ring head
1591  *  pointer and confirm that write went through by reading back the value
1592  * @srng: sring pointer
1593  * @paddr: physical address
1594  *
1595  * Return: None
1596  */
1597 void hal_srng_dst_set_hp_paddr_confirm(struct hal_srng *srng, uint64_t paddr)
1598 {
1599 	SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_LSB, paddr & 0xffffffff);
1600 	SRNG_DST_REG_WRITE_CONFIRM(srng, HP_ADDR_MSB, paddr >> 32);
1601 }
1602 
1603 /**
1604  * hal_srng_dst_init_hp() - Initialize destination ring head
1605  * pointer
1606  * @hal_soc: hal_soc handle
1607  * @srng: sring pointer
1608  * @vaddr: virtual address
1609  */
1610 void hal_srng_dst_init_hp(struct hal_soc_handle *hal_soc,
1611 			  struct hal_srng *srng,
1612 			  uint32_t *vaddr)
1613 {
1614 	uint32_t reg_offset;
1615 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1616 
1617 	if (!srng)
1618 		return;
1619 
1620 	srng->u.dst_ring.hp_addr = vaddr;
1621 	reg_offset = SRNG_DST_ADDR(srng, HP) - hal->dev_base_addr;
1622 	HAL_REG_WRITE_CONFIRM_RETRY(
1623 		hal, reg_offset, srng->u.dst_ring.cached_hp, true);
1624 
1625 	if (vaddr) {
1626 		*srng->u.dst_ring.hp_addr = srng->u.dst_ring.cached_hp;
1627 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1628 			  "hp_addr=%pK, cached_hp=%d, hp=%d",
1629 			  (void *)srng->u.dst_ring.hp_addr,
1630 			  srng->u.dst_ring.cached_hp,
1631 			  *srng->u.dst_ring.hp_addr);
1632 	}
1633 }
1634 
1635 /**
1636  * hal_srng_hw_init - Private function to initialize SRNG HW
1637  * @hal_soc: HAL SOC handle
1638  * @srng: SRNG ring pointer
1639  */
1640 static inline void hal_srng_hw_init(struct hal_soc *hal,
1641 	struct hal_srng *srng)
1642 {
1643 	if (srng->ring_dir == HAL_SRNG_SRC_RING)
1644 		hal_srng_src_hw_init(hal, srng);
1645 	else
1646 		hal_srng_dst_hw_init(hal, srng);
1647 }
1648 
1649 #ifdef CONFIG_SHADOW_V2
1650 #define ignore_shadow false
1651 #define CHECK_SHADOW_REGISTERS true
1652 #else
1653 #define ignore_shadow true
1654 #define CHECK_SHADOW_REGISTERS false
1655 #endif
1656 
1657 /**
1658  * hal_srng_setup - Initialize HW SRNG ring.
1659  * @hal_soc: Opaque HAL SOC handle
1660  * @ring_type: one of the types from hal_ring_type
1661  * @ring_num: Ring number if there are multiple rings of same type (staring
1662  * from 0)
1663  * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
1664  * @ring_params: SRNG ring params in hal_srng_params structure.
1665 
1666  * Callers are expected to allocate contiguous ring memory of size
1667  * 'num_entries * entry_size' bytes and pass the physical and virtual base
1668  * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
1669  * hal_srng_params structure. Ring base address should be 8 byte aligned
1670  * and size of each ring entry should be queried using the API
1671  * hal_srng_get_entrysize
1672  *
1673  * Return: Opaque pointer to ring on success
1674  *		 NULL on failure (if given ring is not available)
1675  */
1676 void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
1677 	int mac_id, struct hal_srng_params *ring_params)
1678 {
1679 	int ring_id;
1680 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1681 	struct hal_srng *srng;
1682 	struct hal_hw_srng_config *ring_config =
1683 		HAL_SRNG_CONFIG(hal, ring_type);
1684 	void *dev_base_addr;
1685 	int i;
1686 
1687 	ring_id = hal_get_srng_ring_id(hal_soc, ring_type, ring_num, mac_id);
1688 	if (ring_id < 0)
1689 		return NULL;
1690 
1691 	hal_verbose_debug("mac_id %d ring_id %d", mac_id, ring_id);
1692 
1693 	srng = hal_get_srng(hal_soc, ring_id);
1694 
1695 	if (srng->initialized) {
1696 		hal_verbose_debug("Ring (ring_type, ring_num) already initialized");
1697 		return NULL;
1698 	}
1699 
1700 	dev_base_addr = hal->dev_base_addr;
1701 	srng->ring_id = ring_id;
1702 	srng->ring_type = ring_type;
1703 	srng->ring_dir = ring_config->ring_dir;
1704 	srng->ring_base_paddr = ring_params->ring_base_paddr;
1705 	srng->ring_base_vaddr = ring_params->ring_base_vaddr;
1706 	srng->entry_size = ring_config->entry_size;
1707 	srng->num_entries = ring_params->num_entries;
1708 	srng->ring_size = srng->num_entries * srng->entry_size;
1709 	srng->ring_size_mask = srng->ring_size - 1;
1710 	srng->msi_addr = ring_params->msi_addr;
1711 	srng->msi_data = ring_params->msi_data;
1712 	srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
1713 	srng->intr_batch_cntr_thres_entries =
1714 		ring_params->intr_batch_cntr_thres_entries;
1715 	srng->prefetch_timer = ring_params->prefetch_timer;
1716 	srng->hal_soc = hal_soc;
1717 
1718 	for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
1719 		srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
1720 			+ (ring_num * ring_config->reg_size[i]);
1721 	}
1722 
1723 	/* Zero out the entire ring memory */
1724 	qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
1725 		srng->num_entries) << 2);
1726 
1727 	srng->flags = ring_params->flags;
1728 #ifdef BIG_ENDIAN_HOST
1729 		/* TODO: See if we should we get these flags from caller */
1730 	srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
1731 	srng->flags |= HAL_SRNG_MSI_SWAP;
1732 	srng->flags |= HAL_SRNG_RING_PTR_SWAP;
1733 #endif
1734 
1735 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1736 		srng->u.src_ring.hp = 0;
1737 		srng->u.src_ring.reap_hp = srng->ring_size -
1738 			srng->entry_size;
1739 		srng->u.src_ring.tp_addr =
1740 			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
1741 		srng->u.src_ring.low_threshold =
1742 			ring_params->low_threshold * srng->entry_size;
1743 		if (ring_config->lmac_ring) {
1744 			/* For LMAC rings, head pointer updates will be done
1745 			 * through FW by writing to a shared memory location
1746 			 */
1747 			srng->u.src_ring.hp_addr =
1748 				&(hal->shadow_wrptr_mem_vaddr[ring_id -
1749 					HAL_SRNG_LMAC1_ID_START]);
1750 			srng->flags |= HAL_SRNG_LMAC_RING;
1751 		} else if (ignore_shadow || (srng->u.src_ring.hp_addr == 0)) {
1752 			srng->u.src_ring.hp_addr =
1753 				hal_get_window_address(hal,
1754 						SRNG_SRC_ADDR(srng, HP));
1755 
1756 			if (CHECK_SHADOW_REGISTERS) {
1757 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1758 				    QDF_TRACE_LEVEL_ERROR,
1759 				    "%s: Ring (%d, %d) missing shadow config",
1760 				    __func__, ring_type, ring_num);
1761 			}
1762 		} else {
1763 			hal_validate_shadow_register(hal,
1764 						     SRNG_SRC_ADDR(srng, HP),
1765 						     srng->u.src_ring.hp_addr);
1766 		}
1767 	} else {
1768 		/* During initialization loop count in all the descriptors
1769 		 * will be set to zero, and HW will set it to 1 on completing
1770 		 * descriptor update in first loop, and increments it by 1 on
1771 		 * subsequent loops (loop count wraps around after reaching
1772 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1773 		 * loop count in descriptors updated by HW (to be processed
1774 		 * by SW).
1775 		 */
1776 		srng->u.dst_ring.loop_cnt = 1;
1777 		srng->u.dst_ring.tp = 0;
1778 		srng->u.dst_ring.hp_addr =
1779 			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
1780 		if (ring_config->lmac_ring) {
1781 			/* For LMAC rings, tail pointer updates will be done
1782 			 * through FW by writing to a shared memory location
1783 			 */
1784 			srng->u.dst_ring.tp_addr =
1785 				&(hal->shadow_wrptr_mem_vaddr[ring_id -
1786 				HAL_SRNG_LMAC1_ID_START]);
1787 			srng->flags |= HAL_SRNG_LMAC_RING;
1788 		} else if (ignore_shadow || srng->u.dst_ring.tp_addr == 0) {
1789 			srng->u.dst_ring.tp_addr =
1790 				hal_get_window_address(hal,
1791 						SRNG_DST_ADDR(srng, TP));
1792 
1793 			if (CHECK_SHADOW_REGISTERS) {
1794 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1795 				    QDF_TRACE_LEVEL_ERROR,
1796 				    "%s: Ring (%d, %d) missing shadow config",
1797 				    __func__, ring_type, ring_num);
1798 			}
1799 		} else {
1800 			hal_validate_shadow_register(hal,
1801 						     SRNG_DST_ADDR(srng, TP),
1802 						     srng->u.dst_ring.tp_addr);
1803 		}
1804 	}
1805 
1806 	if (!(ring_config->lmac_ring)) {
1807 		hal_srng_hw_init(hal, srng);
1808 
1809 		if (ring_type == CE_DST) {
1810 			srng->u.dst_ring.max_buffer_length = ring_params->max_buffer_length;
1811 			hal_ce_dst_setup(hal, srng, ring_num);
1812 		}
1813 	}
1814 
1815 	SRNG_LOCK_INIT(&srng->lock);
1816 
1817 	srng->srng_event = 0;
1818 
1819 	srng->initialized = true;
1820 
1821 	return (void *)srng;
1822 }
1823 qdf_export_symbol(hal_srng_setup);
1824 
1825 /**
1826  * hal_srng_cleanup - Deinitialize HW SRNG ring.
1827  * @hal_soc: Opaque HAL SOC handle
1828  * @hal_srng: Opaque HAL SRNG pointer
1829  */
1830 void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
1831 {
1832 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1833 	SRNG_LOCK_DESTROY(&srng->lock);
1834 	srng->initialized = 0;
1835 }
1836 qdf_export_symbol(hal_srng_cleanup);
1837 
1838 /**
1839  * hal_srng_get_entrysize - Returns size of ring entry in bytes
1840  * @hal_soc: Opaque HAL SOC handle
1841  * @ring_type: one of the types from hal_ring_type
1842  *
1843  */
1844 uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
1845 {
1846 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1847 	struct hal_hw_srng_config *ring_config =
1848 		HAL_SRNG_CONFIG(hal, ring_type);
1849 	return ring_config->entry_size << 2;
1850 }
1851 qdf_export_symbol(hal_srng_get_entrysize);
1852 
1853 /**
1854  * hal_srng_max_entries - Returns maximum possible number of ring entries
1855  * @hal_soc: Opaque HAL SOC handle
1856  * @ring_type: one of the types from hal_ring_type
1857  *
1858  * Return: Maximum number of entries for the given ring_type
1859  */
1860 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
1861 {
1862 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1863 	struct hal_hw_srng_config *ring_config =
1864 		HAL_SRNG_CONFIG(hal, ring_type);
1865 
1866 	return ring_config->max_size / ring_config->entry_size;
1867 }
1868 qdf_export_symbol(hal_srng_max_entries);
1869 
1870 enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
1871 {
1872 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
1873 	struct hal_hw_srng_config *ring_config =
1874 		HAL_SRNG_CONFIG(hal, ring_type);
1875 
1876 	return ring_config->ring_dir;
1877 }
1878 
1879 /**
1880  * hal_srng_dump - Dump ring status
1881  * @srng: hal srng pointer
1882  */
1883 void hal_srng_dump(struct hal_srng *srng)
1884 {
1885 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
1886 		hal_debug("=== SRC RING %d ===", srng->ring_id);
1887 		hal_debug("hp %u, reap_hp %u, tp %u, cached tp %u",
1888 			  srng->u.src_ring.hp,
1889 			  srng->u.src_ring.reap_hp,
1890 			  *srng->u.src_ring.tp_addr,
1891 			  srng->u.src_ring.cached_tp);
1892 	} else {
1893 		hal_debug("=== DST RING %d ===", srng->ring_id);
1894 		hal_debug("tp %u, hp %u, cached tp %u, loop_cnt %u",
1895 			  srng->u.dst_ring.tp,
1896 			  *srng->u.dst_ring.hp_addr,
1897 			  srng->u.dst_ring.cached_hp,
1898 			  srng->u.dst_ring.loop_cnt);
1899 	}
1900 }
1901 
1902 /**
1903  * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
1904  *
1905  * @hal_soc: Opaque HAL SOC handle
1906  * @hal_ring: Ring pointer (Source or Destination ring)
1907  * @ring_params: SRNG parameters will be returned through this structure
1908  */
1909 extern void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
1910 				hal_ring_handle_t hal_ring_hdl,
1911 				struct hal_srng_params *ring_params)
1912 {
1913 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1914 	int i =0;
1915 	ring_params->ring_id = srng->ring_id;
1916 	ring_params->ring_dir = srng->ring_dir;
1917 	ring_params->entry_size = srng->entry_size;
1918 
1919 	ring_params->ring_base_paddr = srng->ring_base_paddr;
1920 	ring_params->ring_base_vaddr = srng->ring_base_vaddr;
1921 	ring_params->num_entries = srng->num_entries;
1922 	ring_params->msi_addr = srng->msi_addr;
1923 	ring_params->msi_data = srng->msi_data;
1924 	ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
1925 	ring_params->intr_batch_cntr_thres_entries =
1926 		srng->intr_batch_cntr_thres_entries;
1927 	ring_params->low_threshold = srng->u.src_ring.low_threshold;
1928 	ring_params->flags = srng->flags;
1929 	ring_params->ring_id = srng->ring_id;
1930 	for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++)
1931 		ring_params->hwreg_base[i] = srng->hwreg_base[i];
1932 }
1933 qdf_export_symbol(hal_get_srng_params);
1934 
1935 void hal_set_low_threshold(hal_ring_handle_t hal_ring_hdl,
1936 				 uint32_t low_threshold)
1937 {
1938 	struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
1939 	srng->u.src_ring.low_threshold = low_threshold * srng->entry_size;
1940 }
1941 qdf_export_symbol(hal_set_low_threshold);
1942 
1943 
1944 #ifdef FORCE_WAKE
1945 void hal_set_init_phase(hal_soc_handle_t soc, bool init_phase)
1946 {
1947 	struct hal_soc *hal_soc = (struct hal_soc *)soc;
1948 
1949 	hal_soc->init_phase = init_phase;
1950 }
1951 #endif /* FORCE_WAKE */
1952