xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "ce_api.h"
34 #include "qdf_trace.h"
35 #include "pld_common.h"
36 #include "hif_debug.h"
37 #include "ce_internal.h"
38 #include "ce_reg.h"
39 #include "ce_assignment.h"
40 #include "ce_tasklet.h"
41 #include "qdf_module.h"
42 
43 #define CE_POLL_TIMEOUT 10      /* ms */
44 
45 #define AGC_DUMP         1
46 #define CHANINFO_DUMP    2
47 #define BB_WATCHDOG_DUMP 3
48 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
49 #define PCIE_ACCESS_DUMP 4
50 #endif
51 #include "mp_dev.h"
52 #ifdef HIF_CE_LOG_INFO
53 #include "qdf_hang_event_notifier.h"
54 #endif
55 
56 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
57 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \
58 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCA5332) || \
59 	defined(QCA_WIFI_QCA9574)) && !defined(QCA_WIFI_SUPPORT_SRNG)
60 #define QCA_WIFI_SUPPORT_SRNG
61 #endif
62 
63 #ifdef QCA_WIFI_SUPPORT_SRNG
64 #include <hal_api.h>
65 #endif
66 
67 /* Forward references */
68 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
69 
70 /*
71  * Fix EV118783, poll to check whether a BMI response comes
72  * other than waiting for the interruption which may be lost.
73  */
74 /* #define BMI_RSP_POLLING */
75 #define BMI_RSP_TO_MILLISEC  1000
76 
77 #ifdef CONFIG_BYPASS_QMI
78 #define BYPASS_QMI 1
79 #else
80 #define BYPASS_QMI 0
81 #endif
82 
83 #ifdef ENABLE_10_4_FW_HDR
84 #if (ENABLE_10_4_FW_HDR == 1)
85 #define WDI_IPA_SERVICE_GROUP 5
86 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
87 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
88 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
89 #endif /* ENABLE_10_4_FW_HDR == 1 */
90 #endif /* ENABLE_10_4_FW_HDR */
91 
92 static void hif_config_rri_on_ddr(struct hif_softc *scn);
93 
94 /**
95  * hif_target_access_log_dump() - dump access log
96  *
97  * dump access log
98  *
99  * Return: n/a
100  */
101 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102 static void hif_target_access_log_dump(void)
103 {
104 	hif_target_dump_access_log();
105 }
106 #endif
107 
108 /*
109  * This structure contains the interrupt index for each Copy engine
110  * for various number of MSIs available in the system.
111  */
112 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = {
113 	/* Default configuration */
114 	{{ CE_INTERRUPT_IDX(0),
115 	  CE_INTERRUPT_IDX(1),
116 	  CE_INTERRUPT_IDX(2),
117 	  CE_INTERRUPT_IDX(3),
118 	  CE_INTERRUPT_IDX(4),
119 	  CE_INTERRUPT_IDX(5),
120 	  CE_INTERRUPT_IDX(6),
121 	  CE_INTERRUPT_IDX(7),
122 	  CE_INTERRUPT_IDX(8),
123 	  CE_INTERRUPT_IDX(9),
124 	  CE_INTERRUPT_IDX(10),
125 	  CE_INTERRUPT_IDX(11),
126 #ifdef QCA_WIFI_QCN9224
127 	  CE_INTERRUPT_IDX(12),
128 	  CE_INTERRUPT_IDX(13),
129 	  CE_INTERRUPT_IDX(14),
130 	  CE_INTERRUPT_IDX(15),
131 #endif
132 	} },
133 	/* Interrupt assignment for 1 MSI combination */
134 	{{ CE_INTERRUPT_IDX(0),
135 	  CE_INTERRUPT_IDX(0),
136 	  CE_INTERRUPT_IDX(0),
137 	  CE_INTERRUPT_IDX(0),
138 	  CE_INTERRUPT_IDX(0),
139 	  CE_INTERRUPT_IDX(0),
140 	  CE_INTERRUPT_IDX(0),
141 	  CE_INTERRUPT_IDX(0),
142 	  CE_INTERRUPT_IDX(0),
143 	  CE_INTERRUPT_IDX(0),
144 	  CE_INTERRUPT_IDX(0),
145 	  CE_INTERRUPT_IDX(0),
146 #ifdef QCA_WIFI_QCN9224
147 	  CE_INTERRUPT_IDX(0),
148 	  CE_INTERRUPT_IDX(0),
149 	  CE_INTERRUPT_IDX(0),
150 	  CE_INTERRUPT_IDX(0),
151 #endif
152 	} },
153 	/* Interrupt assignment for 2 MSI combination */
154 	{{ CE_INTERRUPT_IDX(0),
155 	  CE_INTERRUPT_IDX(1),
156 	  CE_INTERRUPT_IDX(0),
157 	  CE_INTERRUPT_IDX(1),
158 	  CE_INTERRUPT_IDX(0),
159 	  CE_INTERRUPT_IDX(1),
160 	  CE_INTERRUPT_IDX(0),
161 	  CE_INTERRUPT_IDX(0),
162 	  CE_INTERRUPT_IDX(0),
163 	  CE_INTERRUPT_IDX(0),
164 	  CE_INTERRUPT_IDX(0),
165 	  CE_INTERRUPT_IDX(0),
166 #ifdef QCA_WIFI_QCN9224
167 	  CE_INTERRUPT_IDX(0),
168 	  CE_INTERRUPT_IDX(0),
169 	  CE_INTERRUPT_IDX(0),
170 	  CE_INTERRUPT_IDX(0),
171 #endif
172 	} },
173 	/* Interrupt assignment for 3 MSI combination */
174 	{{ CE_INTERRUPT_IDX(0),
175 	  CE_INTERRUPT_IDX(1),
176 	  CE_INTERRUPT_IDX(2),
177 	  CE_INTERRUPT_IDX(1),
178 	  CE_INTERRUPT_IDX(0),
179 	  CE_INTERRUPT_IDX(1),
180 	  CE_INTERRUPT_IDX(0),
181 	  CE_INTERRUPT_IDX(0),
182 	  CE_INTERRUPT_IDX(0),
183 	  CE_INTERRUPT_IDX(0),
184 	  CE_INTERRUPT_IDX(0),
185 	  CE_INTERRUPT_IDX(0),
186 #ifdef QCA_WIFI_QCN9224
187 	  CE_INTERRUPT_IDX(0),
188 	  CE_INTERRUPT_IDX(0),
189 	  CE_INTERRUPT_IDX(0),
190 	  CE_INTERRUPT_IDX(0),
191 #endif
192 	} },
193 	/* Interrupt assignment for 4 MSI combination */
194 	{{ CE_INTERRUPT_IDX(0),
195 	  CE_INTERRUPT_IDX(1),
196 	  CE_INTERRUPT_IDX(2),
197 	  CE_INTERRUPT_IDX(3),
198 	  CE_INTERRUPT_IDX(0),
199 	  CE_INTERRUPT_IDX(1),
200 	  CE_INTERRUPT_IDX(0),
201 	  CE_INTERRUPT_IDX(0),
202 	  CE_INTERRUPT_IDX(0),
203 	  CE_INTERRUPT_IDX(0),
204 	  CE_INTERRUPT_IDX(0),
205 	  CE_INTERRUPT_IDX(0),
206 #ifdef QCA_WIFI_QCN9224
207 	  CE_INTERRUPT_IDX(0),
208 	  CE_INTERRUPT_IDX(0),
209 	  CE_INTERRUPT_IDX(0),
210 	  CE_INTERRUPT_IDX(0),
211 #endif
212 	} },
213 	/* Interrupt assignment for 5 MSI combination */
214 	{{ CE_INTERRUPT_IDX(0),
215 	  CE_INTERRUPT_IDX(1),
216 	  CE_INTERRUPT_IDX(2),
217 	  CE_INTERRUPT_IDX(3),
218 	  CE_INTERRUPT_IDX(0),
219 	  CE_INTERRUPT_IDX(4),
220 	  CE_INTERRUPT_IDX(0),
221 	  CE_INTERRUPT_IDX(0),
222 	  CE_INTERRUPT_IDX(0),
223 	  CE_INTERRUPT_IDX(0),
224 	  CE_INTERRUPT_IDX(0),
225 	  CE_INTERRUPT_IDX(0),
226 #ifdef QCA_WIFI_QCN9224
227 	  CE_INTERRUPT_IDX(0),
228 	  CE_INTERRUPT_IDX(0),
229 	  CE_INTERRUPT_IDX(0),
230 	  CE_INTERRUPT_IDX(0),
231 #endif
232 	} },
233 	/* Interrupt assignment for 6 MSI combination */
234 	{{ CE_INTERRUPT_IDX(0),
235 	  CE_INTERRUPT_IDX(1),
236 	  CE_INTERRUPT_IDX(2),
237 	  CE_INTERRUPT_IDX(3),
238 	  CE_INTERRUPT_IDX(4),
239 	  CE_INTERRUPT_IDX(5),
240 	  CE_INTERRUPT_IDX(0),
241 	  CE_INTERRUPT_IDX(0),
242 	  CE_INTERRUPT_IDX(0),
243 	  CE_INTERRUPT_IDX(0),
244 	  CE_INTERRUPT_IDX(0),
245 	  CE_INTERRUPT_IDX(0),
246 #ifdef QCA_WIFI_QCN9224
247 	  CE_INTERRUPT_IDX(0),
248 	  CE_INTERRUPT_IDX(0),
249 	  CE_INTERRUPT_IDX(0),
250 	  CE_INTERRUPT_IDX(0),
251 #endif
252 	} },
253 	/* Interrupt assignment for 7 MSI combination */
254 	{{ CE_INTERRUPT_IDX(0),
255 	  CE_INTERRUPT_IDX(1),
256 	  CE_INTERRUPT_IDX(2),
257 	  CE_INTERRUPT_IDX(3),
258 	  CE_INTERRUPT_IDX(4),
259 	  CE_INTERRUPT_IDX(5),
260 	  CE_INTERRUPT_IDX(6),
261 	  CE_INTERRUPT_IDX(0),
262 	  CE_INTERRUPT_IDX(0),
263 	  CE_INTERRUPT_IDX(0),
264 	  CE_INTERRUPT_IDX(0),
265 	  CE_INTERRUPT_IDX(0),
266 #ifdef QCA_WIFI_QCN9224
267 	  CE_INTERRUPT_IDX(0),
268 	  CE_INTERRUPT_IDX(0),
269 	  CE_INTERRUPT_IDX(0),
270 	  CE_INTERRUPT_IDX(0),
271 #endif
272 	} },
273 	/* Interrupt assignment for 8 MSI combination */
274 	{{ CE_INTERRUPT_IDX(0),
275 	  CE_INTERRUPT_IDX(1),
276 	  CE_INTERRUPT_IDX(2),
277 	  CE_INTERRUPT_IDX(3),
278 	  CE_INTERRUPT_IDX(4),
279 	  CE_INTERRUPT_IDX(5),
280 	  CE_INTERRUPT_IDX(6),
281 	  CE_INTERRUPT_IDX(7),
282 	  CE_INTERRUPT_IDX(0),
283 	  CE_INTERRUPT_IDX(0),
284 	  CE_INTERRUPT_IDX(0),
285 	  CE_INTERRUPT_IDX(0),
286 #ifdef QCA_WIFI_QCN9224
287 	  CE_INTERRUPT_IDX(0),
288 	  CE_INTERRUPT_IDX(0),
289 	  CE_INTERRUPT_IDX(0),
290 	  CE_INTERRUPT_IDX(0),
291 #endif
292 	} },
293 	/* Interrupt assignment for 9 MSI combination */
294 	{{ CE_INTERRUPT_IDX(0),
295 	  CE_INTERRUPT_IDX(1),
296 	  CE_INTERRUPT_IDX(2),
297 	  CE_INTERRUPT_IDX(3),
298 	  CE_INTERRUPT_IDX(4),
299 	  CE_INTERRUPT_IDX(5),
300 	  CE_INTERRUPT_IDX(6),
301 	  CE_INTERRUPT_IDX(7),
302 	  CE_INTERRUPT_IDX(8),
303 	  CE_INTERRUPT_IDX(0),
304 	  CE_INTERRUPT_IDX(0),
305 	  CE_INTERRUPT_IDX(0),
306 #ifdef QCA_WIFI_QCN9224
307 	  CE_INTERRUPT_IDX(0),
308 	  CE_INTERRUPT_IDX(0),
309 	  CE_INTERRUPT_IDX(0),
310 	  CE_INTERRUPT_IDX(0),
311 #endif
312 	} },
313 	/* Interrupt assignment for 10 MSI combination */
314 	{{ CE_INTERRUPT_IDX(0),
315 	  CE_INTERRUPT_IDX(1),
316 	  CE_INTERRUPT_IDX(2),
317 	  CE_INTERRUPT_IDX(3),
318 	  CE_INTERRUPT_IDX(4),
319 	  CE_INTERRUPT_IDX(5),
320 	  CE_INTERRUPT_IDX(6),
321 	  CE_INTERRUPT_IDX(7),
322 	  CE_INTERRUPT_IDX(8),
323 	  CE_INTERRUPT_IDX(9),
324 	  CE_INTERRUPT_IDX(0),
325 	  CE_INTERRUPT_IDX(0),
326 #ifdef QCA_WIFI_QCN9224
327 	  CE_INTERRUPT_IDX(0),
328 	  CE_INTERRUPT_IDX(0),
329 	  CE_INTERRUPT_IDX(0),
330 	  CE_INTERRUPT_IDX(0),
331 #endif
332 	} },
333 	/* Interrupt assignment for 11 MSI combination */
334 	{{ CE_INTERRUPT_IDX(0),
335 	  CE_INTERRUPT_IDX(1),
336 	  CE_INTERRUPT_IDX(2),
337 	  CE_INTERRUPT_IDX(3),
338 	  CE_INTERRUPT_IDX(4),
339 	  CE_INTERRUPT_IDX(5),
340 	  CE_INTERRUPT_IDX(6),
341 	  CE_INTERRUPT_IDX(7),
342 	  CE_INTERRUPT_IDX(8),
343 	  CE_INTERRUPT_IDX(9),
344 	  CE_INTERRUPT_IDX(10),
345 	  CE_INTERRUPT_IDX(0),
346 #ifdef QCA_WIFI_QCN9224
347 	  CE_INTERRUPT_IDX(0),
348 	  CE_INTERRUPT_IDX(0),
349 	  CE_INTERRUPT_IDX(0),
350 	  CE_INTERRUPT_IDX(0),
351 #endif
352 	} },
353 	/* Interrupt assignment for 12 MSI combination */
354 	{{ CE_INTERRUPT_IDX(0),
355 	  CE_INTERRUPT_IDX(1),
356 	  CE_INTERRUPT_IDX(2),
357 	  CE_INTERRUPT_IDX(3),
358 	  CE_INTERRUPT_IDX(4),
359 	  CE_INTERRUPT_IDX(5),
360 	  CE_INTERRUPT_IDX(6),
361 	  CE_INTERRUPT_IDX(7),
362 	  CE_INTERRUPT_IDX(8),
363 	  CE_INTERRUPT_IDX(9),
364 	  CE_INTERRUPT_IDX(10),
365 	  CE_INTERRUPT_IDX(11),
366 #ifdef QCA_WIFI_QCN9224
367 	  CE_INTERRUPT_IDX(0),
368 	  CE_INTERRUPT_IDX(0),
369 	  CE_INTERRUPT_IDX(0),
370 	  CE_INTERRUPT_IDX(0),
371 #endif
372 	} },
373 #ifdef QCA_WIFI_QCN9224
374 	/* Interrupt assignment for 13 MSI combination */
375 	{{ CE_INTERRUPT_IDX(0),
376 	  CE_INTERRUPT_IDX(1),
377 	  CE_INTERRUPT_IDX(2),
378 	  CE_INTERRUPT_IDX(3),
379 	  CE_INTERRUPT_IDX(4),
380 	  CE_INTERRUPT_IDX(5),
381 	  CE_INTERRUPT_IDX(6),
382 	  CE_INTERRUPT_IDX(7),
383 	  CE_INTERRUPT_IDX(8),
384 	  CE_INTERRUPT_IDX(9),
385 	  CE_INTERRUPT_IDX(10),
386 	  CE_INTERRUPT_IDX(11),
387 	  CE_INTERRUPT_IDX(12),
388 	  CE_INTERRUPT_IDX(0),
389 	  CE_INTERRUPT_IDX(0),
390 	  CE_INTERRUPT_IDX(0),
391 	} },
392 	/* Interrupt assignment for 14 MSI combination */
393 	{{ CE_INTERRUPT_IDX(0),
394 	  CE_INTERRUPT_IDX(1),
395 	  CE_INTERRUPT_IDX(2),
396 	  CE_INTERRUPT_IDX(3),
397 	  CE_INTERRUPT_IDX(4),
398 	  CE_INTERRUPT_IDX(5),
399 	  CE_INTERRUPT_IDX(6),
400 	  CE_INTERRUPT_IDX(7),
401 	  CE_INTERRUPT_IDX(8),
402 	  CE_INTERRUPT_IDX(9),
403 	  CE_INTERRUPT_IDX(10),
404 	  CE_INTERRUPT_IDX(11),
405 	  CE_INTERRUPT_IDX(12),
406 	  CE_INTERRUPT_IDX(13),
407 	  CE_INTERRUPT_IDX(0),
408 	  CE_INTERRUPT_IDX(0),
409 	} },
410 	/* Interrupt assignment for 15 MSI combination */
411 	{{ CE_INTERRUPT_IDX(0),
412 	  CE_INTERRUPT_IDX(1),
413 	  CE_INTERRUPT_IDX(2),
414 	  CE_INTERRUPT_IDX(3),
415 	  CE_INTERRUPT_IDX(4),
416 	  CE_INTERRUPT_IDX(5),
417 	  CE_INTERRUPT_IDX(6),
418 	  CE_INTERRUPT_IDX(7),
419 	  CE_INTERRUPT_IDX(8),
420 	  CE_INTERRUPT_IDX(9),
421 	  CE_INTERRUPT_IDX(10),
422 	  CE_INTERRUPT_IDX(11),
423 	  CE_INTERRUPT_IDX(12),
424 	  CE_INTERRUPT_IDX(13),
425 	  CE_INTERRUPT_IDX(14),
426 	  CE_INTERRUPT_IDX(0),
427 	} },
428 	/* Interrupt assignment for 16 MSI combination */
429 	{{ CE_INTERRUPT_IDX(0),
430 	  CE_INTERRUPT_IDX(1),
431 	  CE_INTERRUPT_IDX(2),
432 	  CE_INTERRUPT_IDX(3),
433 	  CE_INTERRUPT_IDX(4),
434 	  CE_INTERRUPT_IDX(5),
435 	  CE_INTERRUPT_IDX(6),
436 	  CE_INTERRUPT_IDX(7),
437 	  CE_INTERRUPT_IDX(8),
438 	  CE_INTERRUPT_IDX(9),
439 	  CE_INTERRUPT_IDX(10),
440 	  CE_INTERRUPT_IDX(11),
441 	  CE_INTERRUPT_IDX(12),
442 	  CE_INTERRUPT_IDX(13),
443 	  CE_INTERRUPT_IDX(14),
444 	  CE_INTERRUPT_IDX(15),
445 	} },
446 #endif
447 };
448 
449 
450 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
451 		      uint8_t cmd_id, bool start)
452 {
453 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
454 
455 	switch (cmd_id) {
456 	case AGC_DUMP:
457 		if (start)
458 			priv_start_agc(scn);
459 		else
460 			priv_dump_agc(scn);
461 		break;
462 	case CHANINFO_DUMP:
463 		if (start)
464 			priv_start_cap_chaninfo(scn);
465 		else
466 			priv_dump_chaninfo(scn);
467 		break;
468 	case BB_WATCHDOG_DUMP:
469 		priv_dump_bbwatchdog(scn);
470 		break;
471 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
472 	case PCIE_ACCESS_DUMP:
473 		hif_target_access_log_dump();
474 		break;
475 #endif
476 	default:
477 		hif_err("Invalid htc dump command: %d", cmd_id);
478 		break;
479 	}
480 }
481 
482 static void ce_poll_timeout(void *arg)
483 {
484 	struct CE_state *CE_state = (struct CE_state *)arg;
485 
486 	if (CE_state->timer_inited) {
487 		ce_per_engine_service(CE_state->scn, CE_state->id);
488 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
489 	}
490 }
491 
492 static unsigned int roundup_pwr2(unsigned int n)
493 {
494 	int i;
495 	unsigned int test_pwr2;
496 
497 	if (!(n & (n - 1)))
498 		return n; /* already a power of 2 */
499 
500 	test_pwr2 = 4;
501 	for (i = 0; i < 29; i++) {
502 		if (test_pwr2 > n)
503 			return test_pwr2;
504 		test_pwr2 = test_pwr2 << 1;
505 	}
506 
507 	QDF_ASSERT(0); /* n too large */
508 	return 0;
509 }
510 
511 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
512 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
513 
514 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
515 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
516 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
517 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
518 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
519 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
520 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
521 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
522 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
523 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
524 #ifdef QCA_WIFI_3_0_ADRASTEA
525 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
526 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
527 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
528 #endif
529 };
530 
531 #ifdef QCN7605_SUPPORT
532 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
533 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
534 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
535 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
536 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
537 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
538 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
539 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
540 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
541 };
542 #endif
543 
544 #ifdef WLAN_FEATURE_EPPING
545 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
546 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
547 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
548 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
549 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
550 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
551 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
552 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
553 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
554 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
555 };
556 #endif
557 
558 /* CE_PCI TABLE */
559 /*
560  * NOTE: the table below is out of date, though still a useful reference.
561  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
562  * mapping of HTC services to HIF pipes.
563  */
564 /*
565  * This authoritative table defines Copy Engine configuration and the mapping
566  * of services/endpoints to CEs.  A subset of this information is passed to
567  * the Target during startup as a prerequisite to entering BMI phase.
568  * See:
569  *    target_service_to_ce_map - Target-side mapping
570  *    hif_map_service_to_pipe      - Host-side mapping
571  *    target_ce_config         - Target-side configuration
572  *    host_ce_config           - Host-side configuration
573    ============================================================================
574    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
575  |                      |      | ctio | Size     | Frequency
576  |                      |      | n    |          |
577    ============================================================================
578    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
579    descriptor |                      |      |      | O(100B)  | and regular
580    download   |                      |      |      |          |
581    ----------------------------------------------------------------------------
582    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
583    indication |                      |      |      | O(10B)   | regular
584    upload     |                      |      |      |          |
585    ----------------------------------------------------------------------------
586    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
587    upload     |                      |      |      | O(1000B) | (frequent
588    e.g. noise |                      |      |      |          | during IP1.0
589    packets    |                      |      |      |          | testing)
590    ----------------------------------------------------------------------------
591    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
592    download   |                      |      |      | O(1000B) | (frequent
593    e.g.       |                      |      |      |          | during IP1.0
594    misdirecte |                      |      |      |          | testing)
595    d EAPOL    |                      |      |      |          |
596    packets    |                      |      |      |          |
597    ----------------------------------------------------------------------------
598    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
599  | DATA_VO (uplink)     |      |      |          |
600    ----------------------------------------------------------------------------
601    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
602  | DATA_VO (downlink)   |      |      |          |
603    ----------------------------------------------------------------------------
604    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
605  |                      |      |      | O(100B)  |
606    ----------------------------------------------------------------------------
607    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
608    messages   | (downlink)           |      |      | O(100B)  |
609  |                      |      |      |          |
610    ----------------------------------------------------------------------------
611    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
612  | HTC_RAW_STREAMS      |      |      |          |
613  | (uplink)             |      |      |          |
614    ----------------------------------------------------------------------------
615    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
616  | HTC_RAW_STREAMS      |      |      |          |
617  | (downlink)           |      |      |          |
618    ----------------------------------------------------------------------------
619    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
620  |                      |      |      |          | infrequent
621    ============================================================================
622  */
623 
624 /*
625  * Map from service/endpoint to Copy Engine.
626  * This table is derived from the CE_PCI TABLE, above.
627  * It is passed to the Target at startup for use by firmware.
628  */
629 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
630 	{
631 		WMI_DATA_VO_SVC,
632 		PIPEDIR_OUT,    /* out = UL = host -> target */
633 		3,
634 	},
635 	{
636 		WMI_DATA_VO_SVC,
637 		PIPEDIR_IN,     /* in = DL = target -> host */
638 		2,
639 	},
640 	{
641 		WMI_DATA_BK_SVC,
642 		PIPEDIR_OUT,    /* out = UL = host -> target */
643 		3,
644 	},
645 	{
646 		WMI_DATA_BK_SVC,
647 		PIPEDIR_IN,     /* in = DL = target -> host */
648 		2,
649 	},
650 	{
651 		WMI_DATA_BE_SVC,
652 		PIPEDIR_OUT,    /* out = UL = host -> target */
653 		3,
654 	},
655 	{
656 		WMI_DATA_BE_SVC,
657 		PIPEDIR_IN,     /* in = DL = target -> host */
658 		2,
659 	},
660 	{
661 		WMI_DATA_VI_SVC,
662 		PIPEDIR_OUT,    /* out = UL = host -> target */
663 		3,
664 	},
665 	{
666 		WMI_DATA_VI_SVC,
667 		PIPEDIR_IN,     /* in = DL = target -> host */
668 		2,
669 	},
670 	{
671 		WMI_CONTROL_SVC,
672 		PIPEDIR_OUT,    /* out = UL = host -> target */
673 		3,
674 	},
675 	{
676 		WMI_CONTROL_SVC,
677 		PIPEDIR_IN,     /* in = DL = target -> host */
678 		2,
679 	},
680 	{
681 		HTC_CTRL_RSVD_SVC,
682 		PIPEDIR_OUT,    /* out = UL = host -> target */
683 		0,              /* could be moved to 3 (share with WMI) */
684 	},
685 	{
686 		HTC_CTRL_RSVD_SVC,
687 		PIPEDIR_IN,     /* in = DL = target -> host */
688 		2,
689 	},
690 	{
691 		HTC_RAW_STREAMS_SVC, /* not currently used */
692 		PIPEDIR_OUT,    /* out = UL = host -> target */
693 		0,
694 	},
695 	{
696 		HTC_RAW_STREAMS_SVC, /* not currently used */
697 		PIPEDIR_IN,     /* in = DL = target -> host */
698 		2,
699 	},
700 	{
701 		HTT_DATA_MSG_SVC,
702 		PIPEDIR_OUT,    /* out = UL = host -> target */
703 		4,
704 	},
705 	{
706 		HTT_DATA_MSG_SVC,
707 		PIPEDIR_IN,     /* in = DL = target -> host */
708 		1,
709 	},
710 	{
711 		WDI_IPA_TX_SVC,
712 		PIPEDIR_OUT,    /* in = DL = target -> host */
713 		5,
714 	},
715 #if defined(QCA_WIFI_3_0_ADRASTEA)
716 	{
717 		HTT_DATA2_MSG_SVC,
718 		PIPEDIR_IN,    /* in = DL = target -> host */
719 		9,
720 	},
721 	{
722 		HTT_DATA3_MSG_SVC,
723 		PIPEDIR_IN,    /* in = DL = target -> host */
724 		10,
725 	},
726 	{
727 		PACKET_LOG_SVC,
728 		PIPEDIR_IN,    /* in = DL = target -> host */
729 		11,
730 	},
731 #endif
732 	/* (Additions here) */
733 
734 	{                       /* Must be last */
735 		0,
736 		0,
737 		0,
738 	},
739 };
740 
741 /* PIPEDIR_OUT = HOST to Target */
742 /* PIPEDIR_IN  = TARGET to HOST */
743 #if (defined(QCA_WIFI_QCA8074))
744 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
745 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
746 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
747 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
748 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
749 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
750 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
751 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
752 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
753 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
754 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
755 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
756 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
757 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
758 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
759 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
760 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
761 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
762 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
763 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
764 	/* (Additions here) */
765 	{ 0, 0, 0, },
766 };
767 #else
768 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
769 };
770 #endif
771 
772 #if (defined(QCA_WIFI_QCA9574))
773 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
774 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
775 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
776 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
777 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
778 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
779 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
780 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
781 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
782 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
783 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
784 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
785 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
786 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
787 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
788 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
789 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
790 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
791 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
792 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
793 	/* (Additions here) */
794 	{ 0, 0, 0, },
795 };
796 #else
797 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
798 };
799 #endif
800 
801 #if (defined(QCA_WIFI_QCA8074V2))
802 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
803 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
804 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
805 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
806 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
807 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
808 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
809 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
810 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
811 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
812 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
813 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
814 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
815 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
816 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
817 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
818 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
819 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
820 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
821 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
822 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
823 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
824 	/* (Additions here) */
825 	{ 0, 0, 0, },
826 };
827 #else
828 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
829 };
830 #endif
831 
832 #if (defined(QCA_WIFI_QCA6018))
833 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
834 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
835 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
836 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
837 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
838 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
839 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
840 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
841 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
842 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
843 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
844 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
845 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
846 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
847 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
848 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
849 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
850 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
851 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
852 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
853 	/* (Additions here) */
854 	{ 0, 0, 0, },
855 };
856 #else
857 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
858 };
859 #endif
860 
861 #if (defined(QCA_WIFI_QCN9000))
862 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
863 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
864 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
865 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
866 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
867 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
868 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
869 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
870 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
871 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
872 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
873 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
874 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
875 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
876 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
877 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
878 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
879 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
880 	/* (Additions here) */
881 	{ 0, 0, 0, },
882 };
883 #else
884 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
885 };
886 #endif
887 
888 #if (defined(QCA_WIFI_QCA5332))
889 static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
890 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
891 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
892 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
893 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
894 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
895 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
896 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
897 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
898 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
899 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
900 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
901 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
902 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
903 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
904 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
905 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
906 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
907 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 9, },
908 	/* (Additions here) */
909 	{ 0, 0, 0, },
910 };
911 #else
912 static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
913 };
914 #endif
915 
916 #if (defined(QCA_WIFI_QCN9224))
917 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = {
918 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
919 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
920 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
921 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
922 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
923 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
924 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
925 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
926 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
927 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
928 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
929 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
930 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
931 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
932 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
933 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
934 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, },
935 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, },
936 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
937 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, },
938 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 14, },
939 	/* (Additions here) */
940 	{ 0, 0, 0, },
941 };
942 #endif
943 
944 #if defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCN9160)
945 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
946 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
947 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
948 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
949 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
950 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
951 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
952 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
953 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
954 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
955 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
956 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
957 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
958 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
959 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
960 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
961 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
962 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
963 	/* (Additions here) */
964 	{ 0, 0, 0, },
965 };
966 #else
967 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
968 };
969 #endif
970 
971 /* PIPEDIR_OUT = HOST to Target */
972 /* PIPEDIR_IN  = TARGET to HOST */
973 #ifdef QCN7605_SUPPORT
974 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
975 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
976 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
977 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
978 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
979 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
980 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
981 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
982 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
983 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
984 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
985 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
986 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
987 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
988 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
989 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
990 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
991 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
992 #ifdef IPA_OFFLOAD
993 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
994 #else
995 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
996 #endif
997 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
998 	/* (Additions here) */
999 	{ 0, 0, 0, },
1000 };
1001 #endif
1002 
1003 #if (defined(QCA_WIFI_QCA6290))
1004 #ifdef QCA_6290_AP_MODE
1005 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1006 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1007 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
1008 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1009 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
1010 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1011 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
1012 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1013 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
1014 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1015 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
1016 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1017 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
1018 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1019 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
1020 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
1021 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
1022 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1023 	/* (Additions here) */
1024 	{ 0, 0, 0, },
1025 };
1026 #else
1027 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1028 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1029 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1030 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1031 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1032 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1033 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1034 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1035 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1036 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1037 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1038 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1039 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1040 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1041 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1042 	/* (Additions here) */
1043 	{ 0, 0, 0, },
1044 };
1045 #endif
1046 #else
1047 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1048 };
1049 #endif
1050 
1051 #if (defined(QCA_WIFI_QCA6390))
1052 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1053 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1054 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1055 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1056 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1057 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1058 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1059 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1060 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1061 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1062 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1063 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1064 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1065 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1066 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1067 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1068 	/* (Additions here) */
1069 	{ 0, 0, 0, },
1070 };
1071 #else
1072 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1073 };
1074 #endif
1075 
1076 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
1077 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1078 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1079 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1080 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1081 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1082 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1083 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1084 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1085 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1086 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1087 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1088 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1089 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1090 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1091 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1092 	/* (Additions here) */
1093 	{ 0, 0, 0, },
1094 };
1095 
1096 #if (defined(QCA_WIFI_QCA6750))
1097 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1098 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1099 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1100 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1101 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1102 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1103 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1104 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1105 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1106 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1107 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1108 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1109 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1110 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1111 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1112 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1113 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1114 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1115 #endif
1116 	/* (Additions here) */
1117 	{ 0, 0, 0, },
1118 };
1119 #else
1120 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1121 };
1122 #endif
1123 
1124 #if (defined(QCA_WIFI_KIWI))
1125 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1126 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1127 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1128 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1129 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1130 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1131 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1132 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1133 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1134 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1135 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1136 #ifdef FEATURE_XPAN
1137 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 4, },
1138 #else
1139 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1140 #endif
1141 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1142 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1143 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1144 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1145 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1146 #endif
1147 #ifdef FEATURE_XPAN
1148 	{ LPASS_DATA_MSG_SVC, PIPEDIR_OUT, 0, },
1149 	{ LPASS_DATA_MSG_SVC, PIPEDIR_IN, 5, },
1150 #endif
1151 	/* (Additions here) */
1152 	{ 0, 0, 0, },
1153 };
1154 #else
1155 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1156 };
1157 #endif
1158 
1159 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
1160 	{
1161 		WMI_DATA_VO_SVC,
1162 		PIPEDIR_OUT,    /* out = UL = host -> target */
1163 		3,
1164 	},
1165 	{
1166 		WMI_DATA_VO_SVC,
1167 		PIPEDIR_IN,     /* in = DL = target -> host */
1168 		2,
1169 	},
1170 	{
1171 		WMI_DATA_BK_SVC,
1172 		PIPEDIR_OUT,    /* out = UL = host -> target */
1173 		3,
1174 	},
1175 	{
1176 		WMI_DATA_BK_SVC,
1177 		PIPEDIR_IN,     /* in = DL = target -> host */
1178 		2,
1179 	},
1180 	{
1181 		WMI_DATA_BE_SVC,
1182 		PIPEDIR_OUT,    /* out = UL = host -> target */
1183 		3,
1184 	},
1185 	{
1186 		WMI_DATA_BE_SVC,
1187 		PIPEDIR_IN,     /* in = DL = target -> host */
1188 		2,
1189 	},
1190 	{
1191 		WMI_DATA_VI_SVC,
1192 		PIPEDIR_OUT,    /* out = UL = host -> target */
1193 		3,
1194 	},
1195 	{
1196 		WMI_DATA_VI_SVC,
1197 		PIPEDIR_IN,     /* in = DL = target -> host */
1198 		2,
1199 	},
1200 	{
1201 		WMI_CONTROL_SVC,
1202 		PIPEDIR_OUT,    /* out = UL = host -> target */
1203 		3,
1204 	},
1205 	{
1206 		WMI_CONTROL_SVC,
1207 		PIPEDIR_IN,     /* in = DL = target -> host */
1208 		2,
1209 	},
1210 	{
1211 		HTC_CTRL_RSVD_SVC,
1212 		PIPEDIR_OUT,    /* out = UL = host -> target */
1213 		0,              /* could be moved to 3 (share with WMI) */
1214 	},
1215 	{
1216 		HTC_CTRL_RSVD_SVC,
1217 		PIPEDIR_IN,     /* in = DL = target -> host */
1218 		1,
1219 	},
1220 	{
1221 		HTC_RAW_STREAMS_SVC, /* not currently used */
1222 		PIPEDIR_OUT,    /* out = UL = host -> target */
1223 		0,
1224 	},
1225 	{
1226 		HTC_RAW_STREAMS_SVC, /* not currently used */
1227 		PIPEDIR_IN,     /* in = DL = target -> host */
1228 		1,
1229 	},
1230 	{
1231 		HTT_DATA_MSG_SVC,
1232 		PIPEDIR_OUT,    /* out = UL = host -> target */
1233 		4,
1234 	},
1235 #ifdef WLAN_FEATURE_FASTPATH
1236 	{
1237 		HTT_DATA_MSG_SVC,
1238 		PIPEDIR_IN,     /* in = DL = target -> host */
1239 		5,
1240 	},
1241 #else /* WLAN_FEATURE_FASTPATH */
1242 	{
1243 		HTT_DATA_MSG_SVC,
1244 		PIPEDIR_IN,  /* in = DL = target -> host */
1245 		1,
1246 	},
1247 #endif /* WLAN_FEATURE_FASTPATH */
1248 
1249 	/* (Additions here) */
1250 
1251 	{                       /* Must be last */
1252 		0,
1253 		0,
1254 		0,
1255 	},
1256 };
1257 
1258 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1259 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1260 
1261 #ifdef WLAN_FEATURE_EPPING
1262 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1263 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1264 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1265 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
1266 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
1267 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1268 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1269 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1270 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1271 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1272 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1273 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
1274 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
1275 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1276 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
1277 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
1278 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
1279 	{0, 0, 0,},             /* Must be last */
1280 };
1281 
1282 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
1283 					   **tgt_svc_map_to_use,
1284 					   uint32_t *sz_tgt_svc_map_to_use)
1285 {
1286 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
1287 	*sz_tgt_svc_map_to_use =
1288 			sizeof(target_service_to_ce_map_wlan_epping);
1289 }
1290 #endif
1291 
1292 #ifdef QCN7605_SUPPORT
1293 static inline
1294 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1295 			       uint32_t *sz_tgt_svc_map_to_use)
1296 {
1297 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
1298 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
1299 }
1300 #else
1301 static inline
1302 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1303 			       uint32_t *sz_tgt_svc_map_to_use)
1304 {
1305 	hif_err("QCN7605 not supported");
1306 }
1307 #endif
1308 
1309 #ifdef QCA_WIFI_QCN9224
1310 static
1311 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1312 			       struct HIF_CE_state *hif_state)
1313 {
1314 	hif_state->host_ce_config = host_ce_config_wlan_qcn9224;
1315 	hif_state->target_ce_config = target_ce_config_wlan_qcn9224;
1316 	hif_state->target_ce_config_sz =
1317 				 sizeof(target_ce_config_wlan_qcn9224);
1318 	scn->ce_count = QCN_9224_CE_COUNT;
1319 	scn->disable_wake_irq = 1;
1320 }
1321 
1322 static
1323 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1324 			       uint32_t *sz_tgt_svc_map_to_use)
1325 {
1326 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn9224;
1327 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224);
1328 }
1329 #else
1330 static inline
1331 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1332 			       struct HIF_CE_state *hif_state)
1333 {
1334 	hif_err("QCN9224 not supported");
1335 }
1336 
1337 static inline
1338 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1339 			       uint32_t *sz_tgt_svc_map_to_use)
1340 {
1341 	hif_err("QCN9224 not supported");
1342 }
1343 #endif
1344 
1345 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
1346 				    struct service_to_pipe **tgt_svc_map_to_use,
1347 				    uint32_t *sz_tgt_svc_map_to_use)
1348 {
1349 	uint32_t mode = hif_get_conparam(scn);
1350 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1351 	struct hif_target_info *tgt_info = &scn->target_info;
1352 
1353 	if (QDF_IS_EPPING_ENABLED(mode)) {
1354 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
1355 						      sz_tgt_svc_map_to_use);
1356 	} else {
1357 		switch (tgt_info->target_type) {
1358 		default:
1359 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
1360 			*sz_tgt_svc_map_to_use =
1361 				sizeof(target_service_to_ce_map_wlan);
1362 			break;
1363 		case TARGET_TYPE_QCN7605:
1364 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
1365 						  sz_tgt_svc_map_to_use);
1366 			break;
1367 		case TARGET_TYPE_AR900B:
1368 		case TARGET_TYPE_QCA9984:
1369 		case TARGET_TYPE_QCA9888:
1370 		case TARGET_TYPE_AR9888:
1371 		case TARGET_TYPE_AR9888V2:
1372 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
1373 			*sz_tgt_svc_map_to_use =
1374 				sizeof(target_service_to_ce_map_ar900b);
1375 			break;
1376 		case TARGET_TYPE_QCA6290:
1377 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
1378 			*sz_tgt_svc_map_to_use =
1379 				sizeof(target_service_to_ce_map_qca6290);
1380 			break;
1381 		case TARGET_TYPE_QCA6390:
1382 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
1383 			*sz_tgt_svc_map_to_use =
1384 				sizeof(target_service_to_ce_map_qca6390);
1385 			break;
1386 		case TARGET_TYPE_QCA6490:
1387 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
1388 			*sz_tgt_svc_map_to_use =
1389 				sizeof(target_service_to_ce_map_qca6490);
1390 			break;
1391 		case TARGET_TYPE_QCA6750:
1392 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
1393 			*sz_tgt_svc_map_to_use =
1394 				sizeof(target_service_to_ce_map_qca6750);
1395 			break;
1396 		case TARGET_TYPE_KIWI:
1397 		case TARGET_TYPE_MANGO:
1398 			*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1399 			*sz_tgt_svc_map_to_use =
1400 				sizeof(target_service_to_ce_map_kiwi);
1401 			break;
1402 		case TARGET_TYPE_QCA8074:
1403 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
1404 			*sz_tgt_svc_map_to_use =
1405 				sizeof(target_service_to_ce_map_qca8074);
1406 			break;
1407 		case TARGET_TYPE_QCA8074V2:
1408 			*tgt_svc_map_to_use =
1409 				target_service_to_ce_map_qca8074_v2;
1410 			*sz_tgt_svc_map_to_use =
1411 				sizeof(target_service_to_ce_map_qca8074_v2);
1412 			break;
1413 		case TARGET_TYPE_QCA9574:
1414 			*tgt_svc_map_to_use =
1415 				target_service_to_ce_map_qca9574;
1416 			*sz_tgt_svc_map_to_use =
1417 				sizeof(target_service_to_ce_map_qca9574);
1418 			break;
1419 		case TARGET_TYPE_QCA6018:
1420 			*tgt_svc_map_to_use =
1421 				target_service_to_ce_map_qca6018;
1422 			*sz_tgt_svc_map_to_use =
1423 				sizeof(target_service_to_ce_map_qca6018);
1424 			break;
1425 		case TARGET_TYPE_QCN9000:
1426 			*tgt_svc_map_to_use =
1427 				target_service_to_ce_map_qcn9000;
1428 			*sz_tgt_svc_map_to_use =
1429 				sizeof(target_service_to_ce_map_qcn9000);
1430 			break;
1431 		case TARGET_TYPE_QCN9224:
1432 			hif_select_ce_map_qcn9224(tgt_svc_map_to_use,
1433 						  sz_tgt_svc_map_to_use);
1434 			break;
1435 		case TARGET_TYPE_QCA5332:
1436 			*tgt_svc_map_to_use = target_service_to_ce_map_qca5332;
1437 			*sz_tgt_svc_map_to_use =
1438 				sizeof(target_service_to_ce_map_qca5332);
1439 			break;
1440 		case TARGET_TYPE_QCA5018:
1441 		case TARGET_TYPE_QCN6122:
1442 		case TARGET_TYPE_QCN9160:
1443 			*tgt_svc_map_to_use =
1444 				target_service_to_ce_map_qca5018;
1445 			*sz_tgt_svc_map_to_use =
1446 				sizeof(target_service_to_ce_map_qca5018);
1447 			break;
1448 		}
1449 	}
1450 	hif_state->tgt_svc_map = *tgt_svc_map_to_use;
1451 	hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use /
1452 					sizeof(struct service_to_pipe);
1453 }
1454 
1455 /**
1456  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
1457  * @ce_state : pointer to the state context of the CE
1458  *
1459  * Description:
1460  *   Sets htt_rx_data attribute of the state structure if the
1461  *   CE serves one of the HTT DATA services.
1462  *
1463  * Return:
1464  *  false (attribute set to false)
1465  *  true  (attribute set to true);
1466  */
1467 static bool ce_mark_datapath(struct CE_state *ce_state)
1468 {
1469 	struct service_to_pipe *svc_map;
1470 	uint32_t map_sz, map_len;
1471 	int    i;
1472 	bool   rc = false;
1473 
1474 	if (ce_state) {
1475 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1476 					       &map_sz);
1477 
1478 		map_len = map_sz / sizeof(struct service_to_pipe);
1479 		for (i = 0; i < map_len; i++) {
1480 			if ((svc_map[i].pipenum == ce_state->id) &&
1481 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1482 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1483 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
1484 				/* HTT CEs are unidirectional */
1485 				if (svc_map[i].pipedir == PIPEDIR_IN)
1486 					ce_state->htt_rx_data = true;
1487 				else
1488 					ce_state->htt_tx_data = true;
1489 				rc = true;
1490 			}
1491 		}
1492 	}
1493 	return rc;
1494 }
1495 
1496 /**
1497  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
1498  * @hif_ctx: hif opaque handle
1499  *
1500  * Description:
1501  *   Gets number of WMI EPs configured in target svc map. Since EP map
1502  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
1503  *   configured for WMI service.
1504  *
1505  * Return:
1506  *  uint8_t: count for WMI eps in target svc map
1507  */
1508 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *hif_ctx)
1509 {
1510 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1511 	struct service_to_pipe *svc_map;
1512 	uint32_t map_sz, map_len;
1513 	int    i;
1514 	uint8_t   wmi_ep_count = 0;
1515 
1516 	hif_select_service_to_pipe_map(scn, &svc_map,
1517 				       &map_sz);
1518 	map_len = map_sz / sizeof(struct service_to_pipe);
1519 
1520 	for (i = 0; i < map_len; i++) {
1521 		/* Count number of WMI EPs based on out direction */
1522 		if ((svc_map[i].pipedir == PIPEDIR_OUT) &&
1523 		    ((svc_map[i].service_id == WMI_CONTROL_SVC)  ||
1524 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC1) ||
1525 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC2))) {
1526 			wmi_ep_count++;
1527 		}
1528 	}
1529 
1530 	return wmi_ep_count;
1531 }
1532 
1533 /**
1534  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
1535  * @ce_id: ce in question
1536  * @ring: ring state being examined
1537  * @type: "src_ring" or "dest_ring" string for identifying the ring
1538  *
1539  * Warns on non-zero index values.
1540  * Causes a kernel panic if the ring is not empty during initialization.
1541  */
1542 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
1543 					 char *type)
1544 {
1545 	if (ring->write_index != 0 || ring->sw_index != 0)
1546 		hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d",
1547 			  ce_id, type, ring->sw_index, ring->write_index);
1548 	if (ring->write_index != ring->sw_index)
1549 		QDF_BUG(0);
1550 }
1551 
1552 #ifdef IPA_OFFLOAD
1553 /**
1554  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
1555  * @scn: softc instance
1556  * @ce_id: ce in question
1557  * @base_addr: pointer to copyengine ring base address
1558  * @ce_ring: copyengine instance
1559  * @nentries: number of entries should be allocated
1560  * @desc_size: ce desc size
1561  *
1562  * Return: QDF_STATUS_SUCCESS - for success
1563  */
1564 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1565 				     qdf_dma_addr_t *base_addr,
1566 				     struct CE_ring_state *ce_ring,
1567 				     unsigned int nentries, uint32_t desc_size)
1568 {
1569 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1570 	    !ce_srng_based(scn)) {
1571 		if (!scn->ipa_ce_ring) {
1572 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1573 				scn->qdf_dev,
1574 				nentries * desc_size + CE_DESC_RING_ALIGN);
1575 			if (!scn->ipa_ce_ring) {
1576 				hif_err(
1577 				"Failed to allocate memory for IPA ce ring");
1578 				return QDF_STATUS_E_NOMEM;
1579 			}
1580 		}
1581 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1582 						&scn->ipa_ce_ring->mem_info);
1583 		ce_ring->base_addr_owner_space_unaligned =
1584 						scn->ipa_ce_ring->vaddr;
1585 	} else {
1586 		ce_ring->base_addr_owner_space_unaligned =
1587 			hif_mem_alloc_consistent_unaligned
1588 					(scn,
1589 					 (nentries * desc_size +
1590 					  CE_DESC_RING_ALIGN),
1591 					 base_addr,
1592 					 ce_ring->hal_ring_type,
1593 					 &ce_ring->is_ring_prealloc);
1594 
1595 		if (!ce_ring->base_addr_owner_space_unaligned) {
1596 			hif_err("Failed to allocate DMA memory for ce ring id: %u",
1597 			       CE_id);
1598 			return QDF_STATUS_E_NOMEM;
1599 		}
1600 	}
1601 	return QDF_STATUS_SUCCESS;
1602 }
1603 
1604 /**
1605  * ce_free_desc_ring() - Frees copyengine descriptor ring
1606  * @scn: softc instance
1607  * @ce_id: ce in question
1608  * @ce_ring: copyengine instance
1609  * @desc_size: ce desc size
1610  *
1611  * Return: None
1612  */
1613 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1614 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1615 {
1616 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1617 	    !ce_srng_based(scn)) {
1618 		if (scn->ipa_ce_ring) {
1619 			qdf_mem_shared_mem_free(scn->qdf_dev,
1620 						scn->ipa_ce_ring);
1621 			scn->ipa_ce_ring = NULL;
1622 		}
1623 		ce_ring->base_addr_owner_space_unaligned = NULL;
1624 	} else {
1625 		hif_mem_free_consistent_unaligned
1626 			(scn,
1627 			 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1628 			 ce_ring->base_addr_owner_space_unaligned,
1629 			 ce_ring->base_addr_CE_space, 0,
1630 			 ce_ring->is_ring_prealloc);
1631 		ce_ring->base_addr_owner_space_unaligned = NULL;
1632 	}
1633 }
1634 #else
1635 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1636 				     qdf_dma_addr_t *base_addr,
1637 				     struct CE_ring_state *ce_ring,
1638 				     unsigned int nentries, uint32_t desc_size)
1639 {
1640 	ce_ring->base_addr_owner_space_unaligned =
1641 			hif_mem_alloc_consistent_unaligned
1642 					(scn,
1643 					 (nentries * desc_size +
1644 					  CE_DESC_RING_ALIGN),
1645 					 base_addr,
1646 					 ce_ring->hal_ring_type,
1647 					 &ce_ring->is_ring_prealloc);
1648 
1649 	if (!ce_ring->base_addr_owner_space_unaligned) {
1650 		hif_err("Failed to allocate DMA memory for ce ring id: %u",
1651 		       CE_id);
1652 		return QDF_STATUS_E_NOMEM;
1653 	}
1654 	return QDF_STATUS_SUCCESS;
1655 }
1656 
1657 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1658 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1659 {
1660 	hif_mem_free_consistent_unaligned
1661 		(scn,
1662 		 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1663 		 ce_ring->base_addr_owner_space_unaligned,
1664 		 ce_ring->base_addr_CE_space, 0,
1665 		 ce_ring->is_ring_prealloc);
1666 	ce_ring->base_addr_owner_space_unaligned = NULL;
1667 }
1668 #endif /* IPA_OFFLOAD */
1669 
1670 /*
1671  * TODO: Need to explore the possibility of having this as part of a
1672  * target context instead of a global array.
1673  */
1674 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1675 
1676 void ce_service_register_module(enum ce_target_type target_type,
1677 				struct ce_ops* (*ce_attach)(void))
1678 {
1679 	if (target_type < CE_MAX_TARGET_TYPE)
1680 		ce_attach_register[target_type] = ce_attach;
1681 }
1682 
1683 qdf_export_symbol(ce_service_register_module);
1684 
1685 /**
1686  * ce_srng_based() - Does this target use srng
1687  * @ce_state : pointer to the state context of the CE
1688  *
1689  * Description:
1690  *   returns true if the target is SRNG based
1691  *
1692  * Return:
1693  *  false (attribute set to false)
1694  *  true  (attribute set to true);
1695  */
1696 bool ce_srng_based(struct hif_softc *scn)
1697 {
1698 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1699 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1700 
1701 	switch (tgt_info->target_type) {
1702 	case TARGET_TYPE_QCA8074:
1703 	case TARGET_TYPE_QCA8074V2:
1704 	case TARGET_TYPE_QCA6290:
1705 	case TARGET_TYPE_QCA6390:
1706 	case TARGET_TYPE_QCA6490:
1707 	case TARGET_TYPE_QCA6750:
1708 	case TARGET_TYPE_QCA6018:
1709 	case TARGET_TYPE_QCN9000:
1710 	case TARGET_TYPE_QCN6122:
1711 	case TARGET_TYPE_QCN9160:
1712 	case TARGET_TYPE_QCA5018:
1713 	case TARGET_TYPE_KIWI:
1714 	case TARGET_TYPE_MANGO:
1715 	case TARGET_TYPE_QCN9224:
1716 	case TARGET_TYPE_QCA9574:
1717 	case TARGET_TYPE_QCA5332:
1718 		return true;
1719 	default:
1720 		return false;
1721 	}
1722 	return false;
1723 }
1724 qdf_export_symbol(ce_srng_based);
1725 
1726 #ifdef QCA_WIFI_SUPPORT_SRNG
1727 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1728 {
1729 	struct ce_ops *ops = NULL;
1730 
1731 	if (ce_srng_based(scn)) {
1732 		if (ce_attach_register[CE_SVC_SRNG])
1733 			ops = ce_attach_register[CE_SVC_SRNG]();
1734 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1735 		ops = ce_attach_register[CE_SVC_LEGACY]();
1736 	}
1737 
1738 	return ops;
1739 }
1740 
1741 
1742 #else	/* QCA_LITHIUM */
1743 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1744 {
1745 	if (ce_attach_register[CE_SVC_LEGACY])
1746 		return ce_attach_register[CE_SVC_LEGACY]();
1747 
1748 	return NULL;
1749 }
1750 #endif /* QCA_LITHIUM */
1751 
1752 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1753 		struct pld_shadow_reg_v2_cfg **shadow_config,
1754 		int *num_shadow_registers_configured) {
1755 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1756 
1757 	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1758 			scn, shadow_config, num_shadow_registers_configured);
1759 
1760 	return;
1761 }
1762 
1763 #ifdef CONFIG_SHADOW_V3
1764 static inline void
1765 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1766 				  struct pld_wlan_enable_cfg *cfg)
1767 {
1768 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1769 
1770 	if (!hif_state->ce_services->ce_prepare_shadow_register_v3_cfg)
1771 		return;
1772 
1773 	hif_state->ce_services->ce_prepare_shadow_register_v3_cfg(
1774 			scn, &cfg->shadow_reg_v3_cfg,
1775 			&cfg->num_shadow_reg_v3_cfg);
1776 }
1777 #else
1778 static inline void
1779 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1780 				  struct pld_wlan_enable_cfg *cfg)
1781 {
1782 }
1783 #endif
1784 
1785 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1786 						uint8_t ring_type)
1787 {
1788 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1789 
1790 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1791 }
1792 
1793 #ifdef QCA_WIFI_SUPPORT_SRNG
1794 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1795 {
1796 	switch (ce_ring_type) {
1797 	case CE_RING_SRC:
1798 		return CE_SRC;
1799 	case CE_RING_DEST:
1800 		return CE_DST;
1801 	case CE_RING_STATUS:
1802 		return CE_DST_STATUS;
1803 	default:
1804 		return -EINVAL;
1805 	}
1806 }
1807 #else
1808 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1809 {
1810 	return 0;
1811 }
1812 #endif
1813 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1814 		uint8_t ring_type, uint32_t nentries)
1815 {
1816 	uint32_t ce_nbytes;
1817 	char *ptr;
1818 	qdf_dma_addr_t base_addr;
1819 	struct CE_ring_state *ce_ring;
1820 	uint32_t desc_size;
1821 	struct hif_softc *scn = CE_state->scn;
1822 
1823 	ce_nbytes = sizeof(struct CE_ring_state)
1824 		+ (nentries * sizeof(void *));
1825 	ptr = qdf_mem_malloc(ce_nbytes);
1826 	if (!ptr)
1827 		return NULL;
1828 
1829 	ce_ring = (struct CE_ring_state *)ptr;
1830 	ptr += sizeof(struct CE_ring_state);
1831 	ce_ring->nentries = nentries;
1832 	ce_ring->nentries_mask = nentries - 1;
1833 
1834 	ce_ring->low_water_mark_nentries = 0;
1835 	ce_ring->high_water_mark_nentries = nentries;
1836 	ce_ring->per_transfer_context = (void **)ptr;
1837 	ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type);
1838 
1839 	desc_size = ce_get_desc_size(scn, ring_type);
1840 
1841 	/* Legacy platforms that do not support cache
1842 	 * coherent DMA are unsupported
1843 	 */
1844 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1845 			       ce_ring, nentries,
1846 			       desc_size) !=
1847 	    QDF_STATUS_SUCCESS) {
1848 		hif_err("ring has no DMA mem");
1849 		qdf_mem_free(ce_ring);
1850 		return NULL;
1851 	}
1852 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1853 
1854 	/* Correctly initialize memory to 0 to
1855 	 * prevent garbage data crashing system
1856 	 * when download firmware
1857 	 */
1858 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1859 			nentries * desc_size +
1860 			CE_DESC_RING_ALIGN);
1861 
1862 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1863 
1864 		ce_ring->base_addr_CE_space =
1865 			(ce_ring->base_addr_CE_space_unaligned +
1866 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1867 
1868 		ce_ring->base_addr_owner_space = (void *)
1869 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1870 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1871 	} else {
1872 		ce_ring->base_addr_CE_space =
1873 				ce_ring->base_addr_CE_space_unaligned;
1874 		ce_ring->base_addr_owner_space =
1875 				ce_ring->base_addr_owner_space_unaligned;
1876 	}
1877 
1878 	return ce_ring;
1879 }
1880 
1881 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1882 			uint32_t ce_id, struct CE_ring_state *ring,
1883 			struct CE_attr *attr)
1884 {
1885 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1886 
1887 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1888 					      ring, attr);
1889 }
1890 
1891 static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state,
1892 			    uint8_t ring_type)
1893 {
1894 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1895 
1896 	if (hif_state->ce_services->ce_srng_cleanup)
1897 		hif_state->ce_services->ce_srng_cleanup(scn,
1898 					CE_state, ring_type);
1899 }
1900 
1901 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1902 {
1903 	uint8_t ul_pipe, dl_pipe;
1904 	int ce_id, status, ul_is_polled, dl_is_polled;
1905 	struct CE_state *ce_state;
1906 
1907 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1908 					 &ul_pipe, &dl_pipe,
1909 					 &ul_is_polled, &dl_is_polled);
1910 	if (status) {
1911 		hif_err("pipe_mapping failure");
1912 		return status;
1913 	}
1914 
1915 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1916 		if (ce_id == ul_pipe)
1917 			continue;
1918 		if (ce_id == dl_pipe)
1919 			continue;
1920 
1921 		ce_state = scn->ce_id_to_state[ce_id];
1922 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1923 		if (ce_state->state == CE_RUNNING)
1924 			ce_state->state = CE_PAUSED;
1925 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1926 	}
1927 
1928 	return status;
1929 }
1930 
1931 int hif_ce_bus_late_resume(struct hif_softc *scn)
1932 {
1933 	int ce_id;
1934 	struct CE_state *ce_state;
1935 	int write_index = 0;
1936 	bool index_updated;
1937 
1938 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1939 		ce_state = scn->ce_id_to_state[ce_id];
1940 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1941 		if (ce_state->state == CE_PENDING) {
1942 			write_index = ce_state->src_ring->write_index;
1943 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1944 					write_index);
1945 			ce_state->state = CE_RUNNING;
1946 			index_updated = true;
1947 		} else {
1948 			index_updated = false;
1949 		}
1950 
1951 		if (ce_state->state == CE_PAUSED)
1952 			ce_state->state = CE_RUNNING;
1953 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1954 
1955 		if (index_updated)
1956 			hif_record_ce_desc_event(scn, ce_id,
1957 				RESUME_WRITE_INDEX_UPDATE,
1958 				NULL, NULL, write_index, 0);
1959 	}
1960 
1961 	return 0;
1962 }
1963 
1964 /**
1965  * ce_oom_recovery() - try to recover rx ce from oom condition
1966  * @context: CE_state of the CE with oom rx ring
1967  *
1968  * the executing work Will continue to be rescheduled until
1969  * at least 1 descriptor is successfully posted to the rx ring.
1970  *
1971  * return: none
1972  */
1973 static void ce_oom_recovery(void *context)
1974 {
1975 	struct CE_state *ce_state = context;
1976 	struct hif_softc *scn = ce_state->scn;
1977 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1978 	struct HIF_CE_pipe_info *pipe_info =
1979 		&ce_softc->pipe_info[ce_state->id];
1980 
1981 	hif_post_recv_buffers_for_pipe(pipe_info);
1982 }
1983 
1984 #ifdef HIF_CE_DEBUG_DATA_BUF
1985 /**
1986  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1987  * the CE descriptors.
1988  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1989  * @scn: hif scn handle
1990  * ce_id: Copy Engine Id
1991  *
1992  * Return: QDF_STATUS
1993  */
1994 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1995 {
1996 	struct hif_ce_desc_event *event = NULL;
1997 	struct hif_ce_desc_event *hist_ev = NULL;
1998 	uint32_t index = 0;
1999 
2000 	hist_ev =
2001 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2002 
2003 	if (!hist_ev)
2004 		return QDF_STATUS_E_NOMEM;
2005 
2006 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
2007 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2008 		event = &hist_ev[index];
2009 		event->data =
2010 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
2011 		if (!event->data) {
2012 			hif_err_rl("ce debug data alloc failed");
2013 			scn->hif_ce_desc_hist.data_enable[ce_id] = false;
2014 			return QDF_STATUS_E_NOMEM;
2015 		}
2016 	}
2017 	return QDF_STATUS_SUCCESS;
2018 }
2019 
2020 /**
2021  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
2022  * the CE descriptors.
2023  * @scn: hif scn handle
2024  * ce_id: Copy Engine Id
2025  *
2026  * Return:
2027  */
2028 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
2029 {
2030 	struct hif_ce_desc_event *event = NULL;
2031 	struct hif_ce_desc_event *hist_ev = NULL;
2032 	uint32_t index = 0;
2033 
2034 	hist_ev =
2035 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2036 
2037 	if (!hist_ev)
2038 		return;
2039 
2040 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2041 		event = &hist_ev[index];
2042 		if (event->data)
2043 			qdf_mem_free(event->data);
2044 		event->data = NULL;
2045 		event = NULL;
2046 	}
2047 
2048 }
2049 #endif /* HIF_CE_DEBUG_DATA_BUF */
2050 
2051 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
2052 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2053 
2054 /* define below variables for crashscope parse */
2055 struct hif_ce_desc_event *hif_ce_desc_history[CE_COUNT_MAX];
2056 uint32_t hif_ce_history_max = HIF_CE_HISTORY_MAX;
2057 
2058 /**
2059  * for debug build, it will enable ce history for all ce, but for
2060  * perf build(if CONFIG_SLUB_DEBUG_ON is N), it only enable for
2061  * ce2(wmi event) & ce3(wmi cmd) history.
2062  */
2063 #if defined(CONFIG_SLUB_DEBUG_ON)
2064 #define CE_DESC_HISTORY_BUFF_CNT  CE_COUNT_MAX
2065 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE  0
2066 #else
2067 /* CE2, CE3, CE7 */
2068 #define CE_DESC_HISTORY_BUFF_CNT  3
2069 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(2) | BIT(3) | BIT(7))
2070 #endif
2071 struct hif_ce_desc_event
2072 	hif_ce_desc_history_buff[CE_DESC_HISTORY_BUFF_CNT][HIF_CE_HISTORY_MAX];
2073 
2074 static struct hif_ce_desc_event *
2075 	hif_ce_debug_history_buf_get(struct hif_softc *scn, unsigned int ce_id)
2076 {
2077 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2078 
2079 	hif_debug("get ce debug buffer ce_id %u, only_ce2/ce3=0x%x, idx=%u",
2080 		  ce_id, IS_CE_DEBUG_ONLY_FOR_CRIT_CE,
2081 		  ce_hist->ce_id_hist_map[ce_id]);
2082 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2083 	    (ce_id == CE_ID_2 || ce_id == CE_ID_3 || ce_id == CE_ID_7)) {
2084 		uint8_t idx = ce_hist->ce_id_hist_map[ce_id];
2085 
2086 		hif_ce_desc_history[ce_id] = hif_ce_desc_history_buff[idx];
2087 	} else {
2088 		hif_ce_desc_history[ce_id] =
2089 			hif_ce_desc_history_buff[ce_id];
2090 	}
2091 
2092 	return hif_ce_desc_history[ce_id];
2093 }
2094 
2095 /**
2096  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
2097  * @scn: hif scn handle
2098  * @ce_id: Copy Engine Id
2099  * @src_nentries: source ce ring entries
2100  * Return: QDF_STATUS
2101  */
2102 static QDF_STATUS
2103 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
2104 			   uint32_t src_nentries)
2105 {
2106 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2107 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2108 
2109 	/* For perf build, return directly for non ce2/ce3 */
2110 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2111 	    ce_id != CE_ID_2 &&
2112 	    ce_id != CE_ID_3 &&
2113 	    ce_id != CE_ID_7) {
2114 		ce_hist->enable[ce_id] = false;
2115 		ce_hist->data_enable[ce_id] = false;
2116 		return QDF_STATUS_SUCCESS;
2117 	}
2118 
2119 	ce_hist->hist_ev[ce_id] = hif_ce_debug_history_buf_get(scn, ce_id);
2120 	ce_hist->enable[ce_id] = true;
2121 
2122 	if (src_nentries) {
2123 		status = alloc_mem_ce_debug_hist_data(scn, ce_id);
2124 		if (status != QDF_STATUS_SUCCESS) {
2125 			ce_hist->enable[ce_id] = false;
2126 			ce_hist->hist_ev[ce_id] = NULL;
2127 			return status;
2128 		}
2129 	} else {
2130 		ce_hist->data_enable[ce_id] = false;
2131 	}
2132 
2133 	return QDF_STATUS_SUCCESS;
2134 }
2135 
2136 /**
2137  * free_mem_ce_debug_history() - Free CE descriptor history
2138  * @scn: hif scn handle
2139  * @ce_id: Copy Engine Id
2140  *
2141  * Return: None
2142  */
2143 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
2144 {
2145 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2146 
2147 	if (!ce_hist->enable[ce_id])
2148 		return;
2149 
2150 	ce_hist->enable[ce_id] = false;
2151 	if (ce_hist->data_enable[ce_id]) {
2152 		ce_hist->data_enable[ce_id] = false;
2153 		free_mem_ce_debug_hist_data(scn, ce_id);
2154 	}
2155 	ce_hist->hist_ev[ce_id] = NULL;
2156 }
2157 #else
2158 static inline QDF_STATUS
2159 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2160 			   uint32_t src_nentries)
2161 {
2162 	return QDF_STATUS_SUCCESS;
2163 }
2164 
2165 static inline void
2166 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2167 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
2168 #else
2169 #if defined(HIF_CE_DEBUG_DATA_BUF)
2170 
2171 static QDF_STATUS
2172 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2173 			   uint32_t src_nentries)
2174 {
2175 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
2176 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
2177 
2178 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
2179 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
2180 		return QDF_STATUS_E_NOMEM;
2181 	} else {
2182 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
2183 		return QDF_STATUS_SUCCESS;
2184 	}
2185 }
2186 
2187 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
2188 {
2189 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2190 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
2191 
2192 	if (!hist_ev)
2193 		return;
2194 
2195 	if (ce_hist->data_enable[CE_id]) {
2196 		ce_hist->data_enable[CE_id] = false;
2197 		free_mem_ce_debug_hist_data(scn, CE_id);
2198 	}
2199 
2200 	ce_hist->enable[CE_id] = false;
2201 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
2202 	ce_hist->hist_ev[CE_id] = NULL;
2203 }
2204 
2205 #else
2206 
2207 static inline QDF_STATUS
2208 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2209 			   uint32_t src_nentries)
2210 {
2211 	return QDF_STATUS_SUCCESS;
2212 }
2213 
2214 static inline void
2215 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2216 #endif /* HIF_CE_DEBUG_DATA_BUF */
2217 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
2218 
2219 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2220 /**
2221  * reset_ce_debug_history() - reset the index and ce id used for dumping the
2222  * CE records on the console using sysfs.
2223  * @scn: hif scn handle
2224  *
2225  * Return:
2226  */
2227 static inline void reset_ce_debug_history(struct hif_softc *scn)
2228 {
2229 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2230 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2231 	 * index. Disable data storing
2232 	 */
2233 	ce_hist->hist_index = 0;
2234 	ce_hist->hist_id = 0;
2235 }
2236 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2237 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
2238 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2239 
2240 void ce_enable_polling(void *cestate)
2241 {
2242 	struct CE_state *CE_state = (struct CE_state *)cestate;
2243 
2244 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2245 		CE_state->timer_inited = true;
2246 }
2247 
2248 void ce_disable_polling(void *cestate)
2249 {
2250 	struct CE_state *CE_state = (struct CE_state *)cestate;
2251 
2252 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2253 		CE_state->timer_inited = false;
2254 }
2255 
2256 /*
2257  * Initialize a Copy Engine based on caller-supplied attributes.
2258  * This may be called once to initialize both source and destination
2259  * rings or it may be called twice for separate source and destination
2260  * initialization. It may be that only one side or the other is
2261  * initialized by software/firmware.
2262  *
2263  * This should be called during the initialization sequence before
2264  * interrupts are enabled, so we don't have to worry about thread safety.
2265  */
2266 struct CE_handle *ce_init(struct hif_softc *scn,
2267 			  unsigned int CE_id, struct CE_attr *attr)
2268 {
2269 	struct CE_state *CE_state;
2270 	uint32_t ctrl_addr;
2271 	unsigned int nentries;
2272 	bool malloc_CE_state = false;
2273 	bool malloc_src_ring = false;
2274 	int status;
2275 	QDF_STATUS mem_status = QDF_STATUS_SUCCESS;
2276 
2277 	QDF_ASSERT(CE_id < scn->ce_count);
2278 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
2279 	CE_state = scn->ce_id_to_state[CE_id];
2280 
2281 	if (!CE_state) {
2282 		CE_state =
2283 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
2284 		if (!CE_state)
2285 			return NULL;
2286 
2287 		malloc_CE_state = true;
2288 		qdf_spinlock_create(&CE_state->ce_index_lock);
2289 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
2290 		qdf_spinlock_create(&CE_state->ce_interrupt_lock);
2291 #endif
2292 
2293 		CE_state->id = CE_id;
2294 		CE_state->ctrl_addr = ctrl_addr;
2295 		CE_state->state = CE_RUNNING;
2296 		CE_state->attr_flags = attr->flags;
2297 	}
2298 	CE_state->scn = scn;
2299 	CE_state->service = ce_engine_service_reg;
2300 
2301 	qdf_atomic_init(&CE_state->rx_pending);
2302 	if (!attr) {
2303 		/* Already initialized; caller wants the handle */
2304 		return (struct CE_handle *)CE_state;
2305 	}
2306 
2307 	if (CE_state->src_sz_max)
2308 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
2309 	else
2310 		CE_state->src_sz_max = attr->src_sz_max;
2311 
2312 	ce_init_ce_desc_event_log(scn, CE_id,
2313 				  attr->src_nentries + attr->dest_nentries);
2314 
2315 	/* source ring setup */
2316 	nentries = attr->src_nentries;
2317 	if (nentries) {
2318 		struct CE_ring_state *src_ring;
2319 
2320 		nentries = roundup_pwr2(nentries);
2321 		if (CE_state->src_ring) {
2322 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
2323 		} else {
2324 			src_ring = CE_state->src_ring =
2325 				ce_alloc_ring_state(CE_state,
2326 						CE_RING_SRC,
2327 						nentries);
2328 			if (!src_ring) {
2329 				/* cannot allocate src ring. If the
2330 				 * CE_state is allocated locally free
2331 				 * CE_State and return error.
2332 				 */
2333 				hif_err("src ring has no mem");
2334 				if (malloc_CE_state) {
2335 					/* allocated CE_state locally */
2336 					qdf_mem_free(CE_state);
2337 					malloc_CE_state = false;
2338 				}
2339 				return NULL;
2340 			}
2341 			/* we can allocate src ring. Mark that the src ring is
2342 			 * allocated locally
2343 			 */
2344 			malloc_src_ring = true;
2345 
2346 			/*
2347 			 * Also allocate a shadow src ring in
2348 			 * regular mem to use for faster access.
2349 			 */
2350 			src_ring->shadow_base_unaligned =
2351 				qdf_mem_malloc(nentries *
2352 					       sizeof(struct CE_src_desc) +
2353 					       CE_DESC_RING_ALIGN);
2354 			if (!src_ring->shadow_base_unaligned)
2355 				goto error_no_dma_mem;
2356 
2357 			src_ring->shadow_base = (struct CE_src_desc *)
2358 				(((size_t) src_ring->shadow_base_unaligned +
2359 				CE_DESC_RING_ALIGN - 1) &
2360 				 ~(CE_DESC_RING_ALIGN - 1));
2361 
2362 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
2363 					       src_ring, attr);
2364 			if (status < 0)
2365 				goto error_target_access;
2366 			ce_ring_test_initial_indexes(CE_id, src_ring,
2367 						     "src_ring");
2368 		}
2369 	}
2370 
2371 	/* destination ring setup */
2372 	nentries = attr->dest_nentries;
2373 	if (nentries) {
2374 		struct CE_ring_state *dest_ring;
2375 
2376 		nentries = roundup_pwr2(nentries);
2377 		if (CE_state->dest_ring) {
2378 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
2379 		} else {
2380 			dest_ring = CE_state->dest_ring =
2381 				ce_alloc_ring_state(CE_state,
2382 						CE_RING_DEST,
2383 						nentries);
2384 			if (!dest_ring) {
2385 				/* cannot allocate dst ring. If the CE_state
2386 				 * or src ring is allocated locally free
2387 				 * CE_State and src ring and return error.
2388 				 */
2389 				hif_err("dest ring has no mem");
2390 				goto error_no_dma_mem;
2391 			}
2392 
2393 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
2394 				      dest_ring, attr);
2395 			if (status < 0)
2396 				goto error_target_access;
2397 
2398 			ce_ring_test_initial_indexes(CE_id, dest_ring,
2399 						     "dest_ring");
2400 
2401 			/* For srng based target, init status ring here */
2402 			if (ce_srng_based(CE_state->scn)) {
2403 				CE_state->status_ring =
2404 					ce_alloc_ring_state(CE_state,
2405 							CE_RING_STATUS,
2406 							nentries);
2407 				if (!CE_state->status_ring) {
2408 					/*Allocation failed. Cleanup*/
2409 					qdf_mem_free(CE_state->dest_ring);
2410 					if (malloc_src_ring) {
2411 						qdf_mem_free
2412 							(CE_state->src_ring);
2413 						CE_state->src_ring = NULL;
2414 						malloc_src_ring = false;
2415 					}
2416 					if (malloc_CE_state) {
2417 						/* allocated CE_state locally */
2418 						scn->ce_id_to_state[CE_id] =
2419 							NULL;
2420 						qdf_mem_free(CE_state);
2421 						malloc_CE_state = false;
2422 					}
2423 
2424 					return NULL;
2425 				}
2426 
2427 				status = ce_ring_setup(scn, CE_RING_STATUS,
2428 					       CE_id, CE_state->status_ring,
2429 					       attr);
2430 				if (status < 0)
2431 					goto error_target_access;
2432 
2433 			}
2434 
2435 			/* epping */
2436 			/* poll timer */
2437 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2438 				qdf_timer_init(scn->qdf_dev,
2439 						&CE_state->poll_timer,
2440 						ce_poll_timeout,
2441 						CE_state,
2442 						QDF_TIMER_TYPE_WAKE_APPS);
2443 				ce_enable_polling(CE_state);
2444 				qdf_timer_mod(&CE_state->poll_timer,
2445 						      CE_POLL_TIMEOUT);
2446 			}
2447 		}
2448 	}
2449 
2450 	if (!ce_srng_based(scn)) {
2451 		/* Enable CE error interrupts */
2452 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2453 			goto error_target_access;
2454 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
2455 		if (Q_TARGET_ACCESS_END(scn) < 0)
2456 			goto error_target_access;
2457 	}
2458 
2459 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
2460 			ce_oom_recovery, CE_state);
2461 
2462 	/* update the htt_data attribute */
2463 	ce_mark_datapath(CE_state);
2464 	scn->ce_id_to_state[CE_id] = CE_state;
2465 
2466 	mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
2467 	if (mem_status != QDF_STATUS_SUCCESS)
2468 		goto error_target_access;
2469 
2470 	return (struct CE_handle *)CE_state;
2471 
2472 error_target_access:
2473 error_no_dma_mem:
2474 	ce_fini((struct CE_handle *)CE_state);
2475 	return NULL;
2476 }
2477 
2478 /**
2479  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
2480  * @hif_ctx: HIF Context
2481  *
2482  * API to check if polling is enabled on all CEs. Returns true when polling
2483  * is enabled on all CEs.
2484  *
2485  * Return: bool
2486  */
2487 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
2488 {
2489 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2490 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2491 	struct CE_attr *attr;
2492 	int id;
2493 
2494 	for (id = 0; id < scn->ce_count; id++) {
2495 		attr = &hif_state->host_ce_config[id];
2496 		if (attr && (attr->dest_nentries) &&
2497 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
2498 			return false;
2499 	}
2500 	return true;
2501 }
2502 qdf_export_symbol(hif_is_polled_mode_enabled);
2503 
2504 static int hif_get_pktlog_ce_num(struct hif_softc *scn)
2505 {
2506 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2507 	int id;
2508 
2509 	for (id = 0; id < hif_state->sz_tgt_svc_map; id++) {
2510 		if (hif_state->tgt_svc_map[id].service_id ==  PACKET_LOG_SVC)
2511 			return hif_state->tgt_svc_map[id].pipenum;
2512 	}
2513 	return -EINVAL;
2514 }
2515 
2516 #ifdef WLAN_FEATURE_FASTPATH
2517 /**
2518  * hif_enable_fastpath() Update that we have enabled fastpath mode
2519  * @hif_ctx: HIF context
2520  *
2521  * For use in data path
2522  *
2523  * Return: void
2524  */
2525 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
2526 {
2527 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2528 
2529 	if (ce_srng_based(scn)) {
2530 		hif_warn("srng rings do not support fastpath");
2531 		return;
2532 	}
2533 	hif_debug("Enabling fastpath mode");
2534 	scn->fastpath_mode_on = true;
2535 }
2536 
2537 /**
2538  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
2539  * @hif_ctx: HIF Context
2540  *
2541  * For use in data path to skip HTC
2542  *
2543  * Return: bool
2544  */
2545 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
2546 {
2547 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2548 
2549 	return scn->fastpath_mode_on;
2550 }
2551 
2552 /**
2553  * hif_get_ce_handle - API to get CE handle for FastPath mode
2554  * @hif_ctx: HIF Context
2555  * @id: CopyEngine Id
2556  *
2557  * API to return CE handle for fastpath mode
2558  *
2559  * Return: void
2560  */
2561 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
2562 {
2563 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2564 
2565 	return scn->ce_id_to_state[id];
2566 }
2567 qdf_export_symbol(hif_get_ce_handle);
2568 
2569 /**
2570  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
2571  * No processing is required inside this function.
2572  * @ce_hdl: Cope engine handle
2573  * Using an assert, this function makes sure that,
2574  * the TX CE has been processed completely.
2575  *
2576  * This is called while dismantling CE structures. No other thread
2577  * should be using these structures while dismantling is occurring
2578  * therefore no locking is needed.
2579  *
2580  * Return: none
2581  */
2582 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
2583 {
2584 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2585 	struct CE_ring_state *src_ring = ce_state->src_ring;
2586 	struct hif_softc *sc = ce_state->scn;
2587 	uint32_t sw_index, write_index;
2588 
2589 	if (hif_is_nss_wifi_enabled(sc))
2590 		return;
2591 
2592 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
2593 		hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE");
2594 		sw_index = src_ring->sw_index;
2595 		write_index = src_ring->sw_index;
2596 
2597 		/* At this point Tx CE should be clean */
2598 		qdf_assert_always(sw_index == write_index);
2599 	}
2600 }
2601 
2602 /**
2603  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
2604  * @ce_hdl: Handle to CE
2605  *
2606  * These buffers are never allocated on the fly, but
2607  * are allocated only once during HIF start and freed
2608  * only once during HIF stop.
2609  * NOTE:
2610  * The assumption here is there is no in-flight DMA in progress
2611  * currently, so that buffers can be freed up safely.
2612  *
2613  * Return: NONE
2614  */
2615 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
2616 {
2617 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2618 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
2619 	qdf_nbuf_t nbuf;
2620 	int i;
2621 
2622 	if (ce_state->scn->fastpath_mode_on == false)
2623 		return;
2624 
2625 	if (!ce_state->htt_rx_data)
2626 		return;
2627 
2628 	/*
2629 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
2630 	 * this CE is completely full: does not leave one blank space, to
2631 	 * distinguish between empty queue & full queue. So free all the
2632 	 * entries.
2633 	 */
2634 	for (i = 0; i < dst_ring->nentries; i++) {
2635 		nbuf = dst_ring->per_transfer_context[i];
2636 
2637 		/*
2638 		 * The reasons for doing this check are:
2639 		 * 1) Protect against calling cleanup before allocating buffers
2640 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
2641 		 *    could have a partially filled ring, because of a memory
2642 		 *    allocation failure in the middle of allocating ring.
2643 		 *    This check accounts for that case, checking
2644 		 *    fastpath_mode_on flag or started flag would not have
2645 		 *    covered that case. This is not in performance path,
2646 		 *    so OK to do this.
2647 		 */
2648 		if (nbuf) {
2649 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
2650 					      QDF_DMA_FROM_DEVICE);
2651 			qdf_nbuf_free(nbuf);
2652 		}
2653 	}
2654 }
2655 
2656 /**
2657  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
2658  * @scn: HIF handle
2659  *
2660  * Datapath Rx CEs are special case, where we reuse all the message buffers.
2661  * Hence we have to post all the entries in the pipe, even, in the beginning
2662  * unlike for other CE pipes where one less than dest_nentries are filled in
2663  * the beginning.
2664  *
2665  * Return: None
2666  */
2667 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2668 {
2669 	int pipe_num;
2670 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2671 
2672 	if (scn->fastpath_mode_on == false)
2673 		return;
2674 
2675 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2676 		struct HIF_CE_pipe_info *pipe_info =
2677 			&hif_state->pipe_info[pipe_num];
2678 		struct CE_state *ce_state =
2679 			scn->ce_id_to_state[pipe_info->pipe_num];
2680 
2681 		if (ce_state->htt_rx_data)
2682 			atomic_inc(&pipe_info->recv_bufs_needed);
2683 	}
2684 }
2685 #else
2686 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2687 {
2688 }
2689 
2690 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
2691 {
2692 	return false;
2693 }
2694 #endif /* WLAN_FEATURE_FASTPATH */
2695 
2696 void ce_fini(struct CE_handle *copyeng)
2697 {
2698 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2699 	unsigned int CE_id = CE_state->id;
2700 	struct hif_softc *scn = CE_state->scn;
2701 	uint32_t desc_size;
2702 
2703 	bool inited = CE_state->timer_inited;
2704 	CE_state->state = CE_UNUSED;
2705 	scn->ce_id_to_state[CE_id] = NULL;
2706 	/* Set the flag to false first to stop processing in ce_poll_timeout */
2707 	ce_disable_polling(CE_state);
2708 
2709 	qdf_lro_deinit(CE_state->lro_data);
2710 
2711 	if (CE_state->src_ring) {
2712 		/* Cleanup the datapath Tx ring */
2713 		ce_h2t_tx_ce_cleanup(copyeng);
2714 
2715 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2716 		if (CE_state->src_ring->shadow_base_unaligned)
2717 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2718 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2719 			ce_free_desc_ring(scn, CE_state->id,
2720 					  CE_state->src_ring,
2721 					  desc_size);
2722 		ce_srng_cleanup(scn, CE_state, CE_RING_SRC);
2723 		qdf_mem_free(CE_state->src_ring);
2724 	}
2725 	if (CE_state->dest_ring) {
2726 		/* Cleanup the datapath Rx ring */
2727 		ce_t2h_msg_ce_cleanup(copyeng);
2728 
2729 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
2730 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
2731 			ce_free_desc_ring(scn, CE_state->id,
2732 					  CE_state->dest_ring,
2733 					  desc_size);
2734 		ce_srng_cleanup(scn, CE_state, CE_RING_DEST);
2735 		qdf_mem_free(CE_state->dest_ring);
2736 
2737 		/* epping */
2738 		if (inited) {
2739 			qdf_timer_free(&CE_state->poll_timer);
2740 		}
2741 	}
2742 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
2743 		/* Cleanup the datapath Tx ring */
2744 		ce_h2t_tx_ce_cleanup(copyeng);
2745 
2746 		if (CE_state->status_ring->shadow_base_unaligned)
2747 			qdf_mem_free(
2748 				CE_state->status_ring->shadow_base_unaligned);
2749 
2750 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
2751 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
2752 			ce_free_desc_ring(scn, CE_state->id,
2753 					  CE_state->status_ring,
2754 					  desc_size);
2755 		ce_srng_cleanup(scn, CE_state, CE_RING_STATUS);
2756 		qdf_mem_free(CE_state->status_ring);
2757 	}
2758 
2759 	free_mem_ce_debug_history(scn, CE_id);
2760 	reset_ce_debug_history(scn);
2761 	ce_deinit_ce_desc_event_log(scn, CE_id);
2762 
2763 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2764 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
2765 	qdf_spinlock_destroy(&CE_state->ce_interrupt_lock);
2766 #endif
2767 	qdf_mem_free(CE_state);
2768 }
2769 
2770 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2771 {
2772 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2773 
2774 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2775 		  sizeof(hif_state->msg_callbacks_pending));
2776 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2777 		  sizeof(hif_state->msg_callbacks_current));
2778 }
2779 
2780 /* Send the first nbytes bytes of the buffer */
2781 QDF_STATUS
2782 hif_send_head(struct hif_opaque_softc *hif_ctx,
2783 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2784 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2785 {
2786 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2787 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2788 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2789 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2790 	int bytes = nbytes, nfrags = 0;
2791 	struct ce_sendlist sendlist;
2792 	int i = 0;
2793 	QDF_STATUS status;
2794 	unsigned int mux_id = 0;
2795 
2796 	if (nbytes > qdf_nbuf_len(nbuf)) {
2797 		hif_err("nbytes: %d nbuf_len: %d", nbytes,
2798 		       (uint32_t)qdf_nbuf_len(nbuf));
2799 		QDF_ASSERT(0);
2800 	}
2801 
2802 	transfer_id =
2803 		(mux_id & MUX_ID_MASK) |
2804 		(transfer_id & TRANSACTION_ID_MASK);
2805 	data_attr &= DESC_DATA_FLAG_MASK;
2806 	/*
2807 	 * The common case involves sending multiple fragments within a
2808 	 * single download (the tx descriptor and the tx frame header).
2809 	 * So, optimize for the case of multiple fragments by not even
2810 	 * checking whether it's necessary to use a sendlist.
2811 	 * The overhead of using a sendlist for a single buffer download
2812 	 * is not a big deal, since it happens rarely (for WMI messages).
2813 	 */
2814 	ce_sendlist_init(&sendlist);
2815 	do {
2816 		qdf_dma_addr_t frag_paddr;
2817 		int frag_bytes;
2818 
2819 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2820 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2821 		/*
2822 		 * Clear the packet offset for all but the first CE desc.
2823 		 */
2824 		if (i++ > 0)
2825 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2826 
2827 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2828 				    frag_bytes >
2829 				    bytes ? bytes : frag_bytes,
2830 				    qdf_nbuf_get_frag_is_wordstream
2831 				    (nbuf,
2832 				    nfrags) ? 0 :
2833 				    CE_SEND_FLAG_SWAP_DISABLE,
2834 				    data_attr);
2835 		if (status != QDF_STATUS_SUCCESS) {
2836 			hif_err("frag_num: %d larger than limit (status=%d)",
2837 			       nfrags, status);
2838 			return status;
2839 		}
2840 		bytes -= frag_bytes;
2841 		nfrags++;
2842 	} while (bytes > 0);
2843 
2844 	/* Make sure we have resources to handle this request */
2845 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2846 	if (pipe_info->num_sends_allowed < nfrags) {
2847 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2848 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2849 		return QDF_STATUS_E_RESOURCES;
2850 	}
2851 	pipe_info->num_sends_allowed -= nfrags;
2852 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2853 
2854 	if (qdf_unlikely(!ce_hdl)) {
2855 		hif_err("CE handle is null");
2856 		return A_ERROR;
2857 	}
2858 
2859 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2860 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2861 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2862 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2863 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2864 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2865 
2866 	return status;
2867 }
2868 
2869 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2870 								int force)
2871 {
2872 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2873 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2874 
2875 	if (!force) {
2876 		int resources;
2877 		/*
2878 		 * Decide whether to actually poll for completions, or just
2879 		 * wait for a later chance. If there seem to be plenty of
2880 		 * resources left, then just wait, since checking involves
2881 		 * reading a CE register, which is a relatively expensive
2882 		 * operation.
2883 		 */
2884 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2885 		/*
2886 		 * If at least 50% of the total resources are still available,
2887 		 * don't bother checking again yet.
2888 		 */
2889 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2890 									 1))
2891 			return;
2892 	}
2893 #ifdef ATH_11AC_TXCOMPACT
2894 	ce_per_engine_servicereap(scn, pipe);
2895 #else
2896 	ce_per_engine_service(scn, pipe);
2897 #endif
2898 }
2899 
2900 #if defined(CE_TASKLET_SCHEDULE_ON_FULL) && defined(CE_TASKLET_DEBUG_ENABLE)
2901 #define CE_RING_FULL_THRESHOLD_TIME 3000000
2902 #define CE_RING_FULL_THRESHOLD 1024
2903 /* This function is called from htc_send path. If there is no resourse to send
2904  * packet via HTC, then check if interrupts are not processed from that
2905  * CE for last 3 seconds. If so, schedule a tasklet to reap available entries.
2906  * Also if Queue has reached 1024 entries within 3 seconds, then also schedule
2907  * tasklet.
2908  */
2909 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2910 {
2911 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2912 	int64_t diff_time = qdf_get_log_timestamp_usecs() -
2913 			hif_state->stats.tasklet_sched_entry_ts[pipe];
2914 
2915 	hif_state->stats.ce_ring_full_count[pipe]++;
2916 
2917 	if (diff_time >= CE_RING_FULL_THRESHOLD_TIME ||
2918 	    hif_state->stats.ce_ring_full_count[pipe] >=
2919 	    CE_RING_FULL_THRESHOLD) {
2920 		hif_state->stats.ce_ring_full_count[pipe] = 0;
2921 		hif_state->stats.ce_manual_tasklet_schedule_count[pipe]++;
2922 		hif_state->stats.ce_last_manual_tasklet_schedule_ts[pipe] =
2923 			qdf_get_log_timestamp_usecs();
2924 		ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]);
2925 	}
2926 }
2927 #else
2928 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2929 {
2930 }
2931 #endif
2932 
2933 uint16_t
2934 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2935 {
2936 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2937 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2938 	uint16_t rv;
2939 
2940 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2941 	rv = pipe_info->num_sends_allowed;
2942 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2943 	return rv;
2944 }
2945 
2946 /* Called by lower (CE) layer when a send to Target completes. */
2947 static void
2948 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2949 		     void *transfer_context, qdf_dma_addr_t CE_data,
2950 		     unsigned int nbytes, unsigned int transfer_id,
2951 		     unsigned int sw_index, unsigned int hw_index,
2952 		     unsigned int toeplitz_hash_result)
2953 {
2954 	struct HIF_CE_pipe_info *pipe_info =
2955 		(struct HIF_CE_pipe_info *)ce_context;
2956 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2957 	struct hif_msg_callbacks *msg_callbacks =
2958 		&pipe_info->pipe_callbacks;
2959 
2960 	do {
2961 		/*
2962 		 * The upper layer callback will be triggered
2963 		 * when last fragment is complteted.
2964 		 */
2965 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2966 			msg_callbacks->txCompletionHandler(
2967 				msg_callbacks->Context,
2968 				transfer_context, transfer_id,
2969 				toeplitz_hash_result);
2970 
2971 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2972 		pipe_info->num_sends_allowed++;
2973 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2974 	} while (ce_completed_send_next(copyeng,
2975 			&ce_context, &transfer_context,
2976 			&CE_data, &nbytes, &transfer_id,
2977 			&sw_idx, &hw_idx,
2978 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2979 }
2980 
2981 /**
2982  * hif_ce_do_recv(): send message from copy engine to upper layers
2983  * @msg_callbacks: structure containing callback and callback context
2984  * @netbuff: skb containing message
2985  * @nbytes: number of bytes in the message
2986  * @pipe_info: used for the pipe_number info
2987  *
2988  * Checks the packet length, configures the length in the netbuff,
2989  * and calls the upper layer callback.
2990  *
2991  * return: None
2992  */
2993 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2994 		qdf_nbuf_t netbuf, int nbytes,
2995 		struct HIF_CE_pipe_info *pipe_info) {
2996 	if (nbytes <= pipe_info->buf_sz) {
2997 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2998 		msg_callbacks->
2999 			rxCompletionHandler(msg_callbacks->Context,
3000 					netbuf, pipe_info->pipe_num);
3001 	} else {
3002 		hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
3003 		qdf_nbuf_free(netbuf);
3004 	}
3005 }
3006 
3007 /* Called by lower (CE) layer when data is received from the Target. */
3008 static void
3009 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
3010 		     void *transfer_context, qdf_dma_addr_t CE_data,
3011 		     unsigned int nbytes, unsigned int transfer_id,
3012 		     unsigned int flags)
3013 {
3014 	struct HIF_CE_pipe_info *pipe_info =
3015 		(struct HIF_CE_pipe_info *)ce_context;
3016 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
3017 	struct CE_state *ce_state = (struct CE_state *) copyeng;
3018 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3019 	struct hif_msg_callbacks *msg_callbacks = &pipe_info->pipe_callbacks;
3020 
3021 	do {
3022 		hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE);
3023 		qdf_nbuf_unmap_single(scn->qdf_dev,
3024 				      (qdf_nbuf_t) transfer_context,
3025 				      QDF_DMA_FROM_DEVICE);
3026 
3027 		atomic_inc(&pipe_info->recv_bufs_needed);
3028 		hif_post_recv_buffers_for_pipe(pipe_info);
3029 		if (scn->target_status == TARGET_STATUS_RESET)
3030 			qdf_nbuf_free(transfer_context);
3031 		else
3032 			hif_ce_do_recv(msg_callbacks, transfer_context,
3033 				nbytes, pipe_info);
3034 
3035 		/* Set up force_break flag if num of receices reaches
3036 		 * MAX_NUM_OF_RECEIVES
3037 		 */
3038 		ce_state->receive_count++;
3039 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
3040 			ce_state->force_break = 1;
3041 			break;
3042 		}
3043 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
3044 					&CE_data, &nbytes, &transfer_id,
3045 					&flags) == QDF_STATUS_SUCCESS);
3046 
3047 }
3048 
3049 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
3050 
3051 void
3052 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
3053 	      struct hif_msg_callbacks *callbacks)
3054 {
3055 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3056 
3057 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3058 	spin_lock_init(&pcie_access_log_lock);
3059 #endif
3060 	/* Save callbacks for later installation */
3061 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
3062 		 sizeof(hif_state->msg_callbacks_pending));
3063 
3064 }
3065 
3066 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state,
3067 						 int pipe_num)
3068 {
3069 	struct CE_attr attr;
3070 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3071 	struct hif_msg_callbacks *hif_msg_callbacks =
3072 		&hif_state->msg_callbacks_current;
3073 	struct HIF_CE_pipe_info *pipe_info;
3074 	struct CE_state *ce_state;
3075 
3076 	if (pipe_num >= CE_COUNT_MAX)
3077 		return -EINVAL;
3078 
3079 	pipe_info = &hif_state->pipe_info[pipe_num];
3080 	ce_state = scn->ce_id_to_state[pipe_num];
3081 
3082 	if (!hif_msg_callbacks ||
3083 	    !hif_msg_callbacks->rxCompletionHandler ||
3084 	    !hif_msg_callbacks->txCompletionHandler) {
3085 		hif_err("%s: no completion handler registered", __func__);
3086 		return -EFAULT;
3087 	}
3088 
3089 	attr = hif_state->host_ce_config[pipe_num];
3090 	if (attr.src_nentries) {
3091 		/* pipe used to send to target */
3092 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
3093 			  __func__, pipe_num, pipe_info);
3094 		ce_send_cb_register(pipe_info->ce_hdl,
3095 				    hif_pci_ce_send_done, pipe_info,
3096 				    attr.flags & CE_ATTR_DISABLE_INTR);
3097 		pipe_info->num_sends_allowed = attr.src_nentries - 1;
3098 	}
3099 	if (attr.dest_nentries) {
3100 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
3101 			  __func__, pipe_num, pipe_info);
3102 		/* pipe used to receive from target */
3103 		ce_recv_cb_register(pipe_info->ce_hdl,
3104 				    hif_pci_ce_recv_data, pipe_info,
3105 				    attr.flags & CE_ATTR_DISABLE_INTR);
3106 	}
3107 
3108 	if (attr.src_nentries)
3109 		qdf_spinlock_create(&pipe_info->completion_freeq_lock);
3110 
3111 	if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND))
3112 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
3113 			     sizeof(pipe_info->pipe_callbacks));
3114 
3115 	return 0;
3116 }
3117 
3118 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
3119 {
3120 	struct CE_handle *ce_diag = hif_state->ce_diag;
3121 	int pipe_num, ret;
3122 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3123 
3124 	/* daemonize("hif_compl_thread"); */
3125 
3126 	if (scn->ce_count == 0) {
3127 		hif_err("ce_count is 0");
3128 		return -EINVAL;
3129 	}
3130 
3131 
3132 	A_TARGET_ACCESS_LIKELY(scn);
3133 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3134 		struct HIF_CE_pipe_info *pipe_info;
3135 
3136 		pipe_info = &hif_state->pipe_info[pipe_num];
3137 		if (pipe_info->ce_hdl == ce_diag)
3138 			continue;       /* Handle Diagnostic CE specially */
3139 
3140 		ret = hif_completion_thread_startup_by_ceid(hif_state,
3141 							    pipe_num);
3142 		if (ret < 0)
3143 			return ret;
3144 
3145 	}
3146 
3147 	A_TARGET_ACCESS_UNLIKELY(scn);
3148 	return 0;
3149 }
3150 
3151 /*
3152  * Install pending msg callbacks.
3153  *
3154  * TBDXXX: This hack is needed because upper layers install msg callbacks
3155  * for use with HTC before BMI is done; yet this HIF implementation
3156  * needs to continue to use BMI msg callbacks. Really, upper layers
3157  * should not register HTC callbacks until AFTER BMI phase.
3158  */
3159 static void hif_msg_callbacks_install(struct hif_softc *scn)
3160 {
3161 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3162 
3163 	qdf_mem_copy(&hif_state->msg_callbacks_current,
3164 		 &hif_state->msg_callbacks_pending,
3165 		 sizeof(hif_state->msg_callbacks_pending));
3166 }
3167 
3168 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
3169 							uint8_t *DLPipe)
3170 {
3171 	int ul_is_polled, dl_is_polled;
3172 
3173 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
3174 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
3175 }
3176 
3177 /**
3178  * hif_dump_pipe_debug_count() - Log error count
3179  * @scn: hif_softc pointer.
3180  *
3181  * Output the pipe error counts of each pipe to log file
3182  *
3183  * Return: N/A
3184  */
3185 void hif_dump_pipe_debug_count(struct hif_softc *scn)
3186 {
3187 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3188 	int pipe_num;
3189 
3190 	if (!hif_state) {
3191 		hif_err("hif_state is NULL");
3192 		return;
3193 	}
3194 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3195 		struct HIF_CE_pipe_info *pipe_info;
3196 
3197 	pipe_info = &hif_state->pipe_info[pipe_num];
3198 
3199 	if (pipe_info->nbuf_alloc_err_count > 0 ||
3200 			pipe_info->nbuf_dma_err_count > 0 ||
3201 			pipe_info->nbuf_ce_enqueue_err_count)
3202 		hif_err(
3203 			"pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
3204 			pipe_info->pipe_num,
3205 			atomic_read(&pipe_info->recv_bufs_needed),
3206 			pipe_info->nbuf_alloc_err_count,
3207 			pipe_info->nbuf_dma_err_count,
3208 			pipe_info->nbuf_ce_enqueue_err_count);
3209 	}
3210 }
3211 
3212 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
3213 					  void *nbuf, uint32_t *error_cnt,
3214 					  enum hif_ce_event_type failure_type,
3215 					  const char *failure_type_string)
3216 {
3217 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
3218 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
3219 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3220 	int ce_id = CE_state->id;
3221 	uint32_t error_cnt_tmp;
3222 
3223 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3224 	error_cnt_tmp = ++(*error_cnt);
3225 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3226 	hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s",
3227 		  pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
3228 		  failure_type_string);
3229 	hif_record_ce_desc_event(scn, ce_id, failure_type,
3230 				 NULL, nbuf, bufs_needed_tmp, 0);
3231 	/* if we fail to allocate the last buffer for an rx pipe,
3232 	 *	there is no trigger to refill the ce and we will
3233 	 *	eventually crash
3234 	 */
3235 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
3236 	    (ce_srng_based(scn) &&
3237 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
3238 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
3239 
3240 }
3241 
3242 
3243 
3244 
3245 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
3246 {
3247 	struct CE_handle *ce_hdl;
3248 	qdf_size_t buf_sz;
3249 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3250 	QDF_STATUS status;
3251 	uint32_t bufs_posted = 0;
3252 	unsigned int ce_id;
3253 
3254 	buf_sz = pipe_info->buf_sz;
3255 	if (buf_sz == 0) {
3256 		/* Unused Copy Engine */
3257 		return QDF_STATUS_SUCCESS;
3258 	}
3259 
3260 	ce_hdl = pipe_info->ce_hdl;
3261 	ce_id = ((struct CE_state *)ce_hdl)->id;
3262 
3263 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3264 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
3265 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
3266 		qdf_nbuf_t nbuf;
3267 
3268 		atomic_dec(&pipe_info->recv_bufs_needed);
3269 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3270 
3271 		hif_record_ce_desc_event(scn, ce_id,
3272 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
3273 					 0, 0);
3274 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
3275 		if (!nbuf) {
3276 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3277 					&pipe_info->nbuf_alloc_err_count,
3278 					 HIF_RX_NBUF_ALLOC_FAILURE,
3279 					"HIF_RX_NBUF_ALLOC_FAILURE");
3280 			return QDF_STATUS_E_NOMEM;
3281 		}
3282 
3283 		hif_record_ce_desc_event(scn, ce_id,
3284 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
3285 					 0, 0);
3286 		/*
3287 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
3288 		 * CE_data = dma_map_single(dev, data, buf_sz, );
3289 		 * DMA_FROM_DEVICE);
3290 		 */
3291 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
3292 					    QDF_DMA_FROM_DEVICE);
3293 
3294 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3295 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3296 					&pipe_info->nbuf_dma_err_count,
3297 					 HIF_RX_NBUF_MAP_FAILURE,
3298 					"HIF_RX_NBUF_MAP_FAILURE");
3299 			qdf_nbuf_free(nbuf);
3300 			return status;
3301 		}
3302 
3303 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
3304 		hif_record_ce_desc_event(scn, ce_id,
3305 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
3306 					 0, 0);
3307 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
3308 					       buf_sz, DMA_FROM_DEVICE);
3309 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
3310 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3311 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3312 					&pipe_info->nbuf_ce_enqueue_err_count,
3313 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
3314 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
3315 
3316 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
3317 						QDF_DMA_FROM_DEVICE);
3318 			qdf_nbuf_free(nbuf);
3319 			return status;
3320 		}
3321 
3322 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3323 		bufs_posted++;
3324 	}
3325 	pipe_info->nbuf_alloc_err_count =
3326 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
3327 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
3328 	pipe_info->nbuf_dma_err_count =
3329 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
3330 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
3331 	pipe_info->nbuf_ce_enqueue_err_count =
3332 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
3333 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
3334 
3335 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3336 
3337 	return QDF_STATUS_SUCCESS;
3338 }
3339 
3340 /*
3341  * Try to post all desired receive buffers for all pipes.
3342  * Returns 0 for non fastpath rx copy engine as
3343  * oom_allocation_work will be scheduled to recover any
3344  * failures, non-zero if unable to completely replenish
3345  * receive buffers for fastpath rx Copy engine.
3346  */
3347 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
3348 {
3349 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3350 	int pipe_num;
3351 	struct CE_state *ce_state = NULL;
3352 	QDF_STATUS qdf_status;
3353 
3354 	A_TARGET_ACCESS_LIKELY(scn);
3355 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3356 		struct HIF_CE_pipe_info *pipe_info;
3357 
3358 		ce_state = scn->ce_id_to_state[pipe_num];
3359 		pipe_info = &hif_state->pipe_info[pipe_num];
3360 
3361 		if (!ce_state)
3362 			continue;
3363 
3364 		/* Do not init dynamic CEs, during initial load */
3365 		if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)
3366 			continue;
3367 
3368 		if (hif_is_nss_wifi_enabled(scn) &&
3369 		    ce_state && (ce_state->htt_rx_data))
3370 			continue;
3371 
3372 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
3373 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
3374 			ce_state->htt_rx_data &&
3375 			scn->fastpath_mode_on) {
3376 			A_TARGET_ACCESS_UNLIKELY(scn);
3377 			return qdf_status;
3378 		}
3379 	}
3380 
3381 	A_TARGET_ACCESS_UNLIKELY(scn);
3382 
3383 	return QDF_STATUS_SUCCESS;
3384 }
3385 
3386 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
3387 {
3388 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3389 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3390 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
3391 
3392 	hif_update_fastpath_recv_bufs_cnt(scn);
3393 
3394 	hif_msg_callbacks_install(scn);
3395 
3396 	if (hif_completion_thread_startup(hif_state))
3397 		return QDF_STATUS_E_FAILURE;
3398 
3399 	/* enable buffer cleanup */
3400 	hif_state->started = true;
3401 
3402 	/* Post buffers once to start things off. */
3403 	qdf_status = hif_post_recv_buffers(scn);
3404 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3405 		/* cleanup is done in hif_ce_disable */
3406 		hif_err("Failed to post buffers");
3407 		return qdf_status;
3408 	}
3409 
3410 	return qdf_status;
3411 }
3412 
3413 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3414 {
3415 	struct hif_softc *scn;
3416 	struct CE_handle *ce_hdl;
3417 	uint32_t buf_sz;
3418 	struct HIF_CE_state *hif_state;
3419 	qdf_nbuf_t netbuf;
3420 	qdf_dma_addr_t CE_data;
3421 	void *per_CE_context;
3422 
3423 	buf_sz = pipe_info->buf_sz;
3424 	/* Unused Copy Engine */
3425 	if (buf_sz == 0)
3426 		return;
3427 
3428 
3429 	hif_state = pipe_info->HIF_CE_state;
3430 	if (!hif_state->started)
3431 		return;
3432 
3433 	scn = HIF_GET_SOFTC(hif_state);
3434 	ce_hdl = pipe_info->ce_hdl;
3435 
3436 	if (!scn->qdf_dev)
3437 		return;
3438 	while (ce_revoke_recv_next
3439 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
3440 			&CE_data) == QDF_STATUS_SUCCESS) {
3441 		if (netbuf) {
3442 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
3443 					      QDF_DMA_FROM_DEVICE);
3444 			qdf_nbuf_free(netbuf);
3445 		}
3446 	}
3447 }
3448 
3449 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3450 {
3451 	struct CE_handle *ce_hdl;
3452 	struct HIF_CE_state *hif_state;
3453 	struct hif_softc *scn;
3454 	qdf_nbuf_t netbuf;
3455 	void *per_CE_context;
3456 	qdf_dma_addr_t CE_data;
3457 	unsigned int nbytes;
3458 	unsigned int id;
3459 	uint32_t buf_sz;
3460 	uint32_t toeplitz_hash_result;
3461 
3462 	buf_sz = pipe_info->buf_sz;
3463 	if (buf_sz == 0) {
3464 		/* Unused Copy Engine */
3465 		return;
3466 	}
3467 
3468 	hif_state = pipe_info->HIF_CE_state;
3469 	if (!hif_state->started) {
3470 		return;
3471 	}
3472 
3473 	scn = HIF_GET_SOFTC(hif_state);
3474 
3475 	ce_hdl = pipe_info->ce_hdl;
3476 
3477 	while (ce_cancel_send_next
3478 		       (ce_hdl, &per_CE_context,
3479 		       (void **)&netbuf, &CE_data, &nbytes,
3480 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
3481 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
3482 			/*
3483 			 * Packets enqueued by htt_h2t_ver_req_msg() and
3484 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
3485 			 * freed in htt_htc_misc_pkt_pool_free() in
3486 			 * wlantl_close(), so do not free them here again
3487 			 * by checking whether it's the endpoint
3488 			 * which they are queued in.
3489 			 */
3490 			if (id == scn->htc_htt_tx_endpoint)
3491 				return;
3492 			/* Indicate the completion to higher
3493 			 * layer to free the buffer
3494 			 */
3495 			if (pipe_info->pipe_callbacks.txCompletionHandler)
3496 				pipe_info->pipe_callbacks.
3497 				    txCompletionHandler(pipe_info->
3498 					    pipe_callbacks.Context,
3499 					    netbuf, id, toeplitz_hash_result);
3500 		}
3501 	}
3502 }
3503 
3504 /*
3505  * Cleanup residual buffers for device shutdown:
3506  *    buffers that were enqueued for receive
3507  *    buffers that were to be sent
3508  * Note: Buffers that had completed but which were
3509  * not yet processed are on a completion queue. They
3510  * are handled when the completion thread shuts down.
3511  */
3512 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
3513 {
3514 	int pipe_num;
3515 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3516 	struct CE_state *ce_state;
3517 
3518 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3519 		struct HIF_CE_pipe_info *pipe_info;
3520 
3521 		ce_state = scn->ce_id_to_state[pipe_num];
3522 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3523 				((ce_state->htt_tx_data) ||
3524 				 (ce_state->htt_rx_data))) {
3525 			continue;
3526 		}
3527 
3528 		pipe_info = &hif_state->pipe_info[pipe_num];
3529 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
3530 		hif_send_buffer_cleanup_on_pipe(pipe_info);
3531 	}
3532 }
3533 
3534 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
3535 {
3536 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3537 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3538 
3539 	hif_buffer_cleanup(hif_state);
3540 }
3541 
3542 static void hif_destroy_oom_work(struct hif_softc *scn)
3543 {
3544 	struct CE_state *ce_state;
3545 	int ce_id;
3546 
3547 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3548 		ce_state = scn->ce_id_to_state[ce_id];
3549 		if (ce_state)
3550 			qdf_destroy_work(scn->qdf_dev,
3551 					 &ce_state->oom_allocation_work);
3552 	}
3553 }
3554 
3555 void hif_ce_stop(struct hif_softc *scn)
3556 {
3557 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3558 	int pipe_num;
3559 
3560 	/*
3561 	 * before cleaning up any memory, ensure irq &
3562 	 * bottom half contexts will not be re-entered
3563 	 */
3564 	hif_disable_isr(&scn->osc);
3565 	hif_destroy_oom_work(scn);
3566 	scn->hif_init_done = false;
3567 
3568 	/*
3569 	 * At this point, asynchronous threads are stopped,
3570 	 * The Target should not DMA nor interrupt, Host code may
3571 	 * not initiate anything more.  So we just need to clean
3572 	 * up Host-side state.
3573 	 */
3574 
3575 	if (scn->athdiag_procfs_inited) {
3576 		athdiag_procfs_remove();
3577 		scn->athdiag_procfs_inited = false;
3578 	}
3579 
3580 	hif_buffer_cleanup(hif_state);
3581 
3582 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3583 		struct HIF_CE_pipe_info *pipe_info;
3584 		struct CE_attr attr;
3585 		struct CE_handle *ce_diag = hif_state->ce_diag;
3586 
3587 		pipe_info = &hif_state->pipe_info[pipe_num];
3588 		if (pipe_info->ce_hdl) {
3589 			if (pipe_info->ce_hdl != ce_diag &&
3590 			    hif_state->started) {
3591 				attr = hif_state->host_ce_config[pipe_num];
3592 				if (attr.src_nentries)
3593 					qdf_spinlock_destroy(&pipe_info->
3594 							completion_freeq_lock);
3595 			}
3596 			ce_fini(pipe_info->ce_hdl);
3597 			pipe_info->ce_hdl = NULL;
3598 			pipe_info->buf_sz = 0;
3599 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3600 		}
3601 	}
3602 
3603 	if (hif_state->sleep_timer_init) {
3604 		qdf_timer_stop(&hif_state->sleep_timer);
3605 		qdf_timer_free(&hif_state->sleep_timer);
3606 		hif_state->sleep_timer_init = false;
3607 	}
3608 
3609 	hif_state->started = false;
3610 }
3611 
3612 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
3613 				   struct shadow_reg_cfg
3614 				   **target_shadow_reg_cfg_ret,
3615 				   uint32_t *shadow_cfg_sz_ret)
3616 {
3617 	if (target_shadow_reg_cfg_ret)
3618 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
3619 	if (shadow_cfg_sz_ret)
3620 		*shadow_cfg_sz_ret = shadow_cfg_sz;
3621 }
3622 
3623 /**
3624  * hif_get_target_ce_config() - get copy engine configuration
3625  * @target_ce_config_ret: basic copy engine configuration
3626  * @target_ce_config_sz_ret: size of the basic configuration in bytes
3627  * @target_service_to_ce_map_ret: service mapping for the copy engines
3628  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
3629  * @target_shadow_reg_cfg_ret: shadow register configuration
3630  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
3631  *
3632  * providing accessor to these values outside of this file.
3633  * currently these are stored in static pointers to const sections.
3634  * there are multiple configurations that are selected from at compile time.
3635  * Runtime selection would need to consider mode, target type and bus type.
3636  *
3637  * Return: return by parameter.
3638  */
3639 void hif_get_target_ce_config(struct hif_softc *scn,
3640 		struct CE_pipe_config **target_ce_config_ret,
3641 		uint32_t *target_ce_config_sz_ret,
3642 		struct service_to_pipe **target_service_to_ce_map_ret,
3643 		uint32_t *target_service_to_ce_map_sz_ret,
3644 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
3645 		uint32_t *shadow_cfg_sz_ret)
3646 {
3647 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3648 
3649 	*target_ce_config_ret = hif_state->target_ce_config;
3650 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
3651 
3652 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
3653 				       target_service_to_ce_map_sz_ret);
3654 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
3655 			       shadow_cfg_sz_ret);
3656 }
3657 
3658 #ifdef CONFIG_SHADOW_V3
3659 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3660 {
3661 	int i;
3662 
3663 	hif_err("v3: num_config %d", cfg->num_shadow_reg_v3_cfg);
3664 
3665 	for (i = 0; i < cfg->num_shadow_reg_v3_cfg; i++) {
3666 		hif_err("i %d, val %x", i, cfg->shadow_reg_v3_cfg[i].addr);
3667 	}
3668 }
3669 
3670 #elif defined(CONFIG_SHADOW_V2)
3671 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3672 {
3673 	int i;
3674 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3675 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
3676 
3677 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
3678 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3679 		     "%s: i %d, val %x", __func__, i,
3680 		     cfg->shadow_reg_v2_cfg[i].addr);
3681 	}
3682 }
3683 
3684 #else
3685 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3686 {
3687 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3688 		  "%s: CONFIG_SHADOW V2/V3 not defined", __func__);
3689 }
3690 #endif
3691 
3692 #ifdef ADRASTEA_RRI_ON_DDR
3693 /**
3694  * hif_get_src_ring_read_index(): Called to get the SRRI
3695  *
3696  * @scn: hif_softc pointer
3697  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3698  *
3699  * This function returns the SRRI to the caller. For CEs that
3700  * dont have interrupts enabled, we look at the DDR based SRRI
3701  *
3702  * Return: SRRI
3703  */
3704 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3705 		uint32_t CE_ctrl_addr)
3706 {
3707 	struct CE_attr attr;
3708 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3709 
3710 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3711 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3712 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3713 	} else {
3714 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3715 			return A_TARGET_READ(scn,
3716 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3717 		else
3718 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3719 					CE_ctrl_addr);
3720 	}
3721 }
3722 
3723 /**
3724  * hif_get_dst_ring_read_index(): Called to get the DRRI
3725  *
3726  * @scn: hif_softc pointer
3727  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3728  *
3729  * This function returns the DRRI to the caller. For CEs that
3730  * dont have interrupts enabled, we look at the DDR based DRRI
3731  *
3732  * Return: DRRI
3733  */
3734 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3735 		uint32_t CE_ctrl_addr)
3736 {
3737 	struct CE_attr attr;
3738 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3739 
3740 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3741 
3742 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3743 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3744 	} else {
3745 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3746 			return A_TARGET_READ(scn,
3747 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3748 		else
3749 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3750 					CE_ctrl_addr);
3751 	}
3752 }
3753 
3754 /**
3755  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
3756  * @scn: hif_softc pointer
3757  *
3758  * Return: qdf status
3759  */
3760 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
3761 {
3762 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
3763 
3764 	scn->vaddr_rri_on_ddr =
3765 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3766 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
3767 		&paddr_rri_on_ddr);
3768 
3769 	if (!scn->vaddr_rri_on_ddr) {
3770 		hif_err("dmaable page alloc fail");
3771 		return QDF_STATUS_E_NOMEM;
3772 	}
3773 
3774 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3775 
3776 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
3777 
3778 	return QDF_STATUS_SUCCESS;
3779 }
3780 #endif
3781 
3782 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
3783 /**
3784  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3785  *
3786  * @scn: hif_softc pointer
3787  *
3788  * This function allocates non cached memory on ddr and sends
3789  * the physical address of this memory to the CE hardware. The
3790  * hardware updates the RRI on this particular location.
3791  *
3792  * Return: None
3793  */
3794 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3795 {
3796 	unsigned int i;
3797 	uint32_t high_paddr, low_paddr;
3798 
3799 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3800 		return;
3801 
3802 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
3803 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
3804 
3805 	hif_debug("using srri and drri from DDR");
3806 
3807 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3808 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3809 
3810 	for (i = 0; i < CE_COUNT; i++)
3811 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3812 }
3813 #else
3814 /**
3815  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3816  *
3817  * @scn: hif_softc pointer
3818  *
3819  * This is a dummy implementation for platforms that don't
3820  * support this functionality.
3821  *
3822  * Return: None
3823  */
3824 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3825 {
3826 }
3827 #endif
3828 
3829 /**
3830  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
3831  *                                    QMI command
3832  * @scn: hif context
3833  * @cfg: wlan enable config
3834  *
3835  * In case of Genoa, rri_over_ddr memory configuration is passed
3836  * to firmware through QMI configure command.
3837  */
3838 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3839 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3840 					   struct pld_wlan_enable_cfg *cfg)
3841 {
3842 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3843 		return;
3844 
3845 	cfg->rri_over_ddr_cfg_valid = true;
3846 	cfg->rri_over_ddr_cfg.base_addr_low =
3847 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3848 	cfg->rri_over_ddr_cfg.base_addr_high =
3849 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3850 }
3851 #else
3852 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3853 					   struct pld_wlan_enable_cfg *cfg)
3854 {
3855 }
3856 #endif
3857 
3858 /**
3859  * hif_wlan_enable(): call the platform driver to enable wlan
3860  * @scn: HIF Context
3861  *
3862  * This function passes the con_mode and CE configuration to
3863  * platform driver to enable wlan.
3864  *
3865  * Return: linux error code
3866  */
3867 int hif_wlan_enable(struct hif_softc *scn)
3868 {
3869 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3870 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3871 	struct pld_wlan_enable_cfg cfg = { 0 };
3872 	enum pld_driver_mode mode;
3873 	uint32_t con_mode = hif_get_conparam(scn);
3874 
3875 	hif_get_target_ce_config(scn,
3876 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3877 			&cfg.num_ce_tgt_cfg,
3878 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3879 			&cfg.num_ce_svc_pipe_cfg,
3880 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3881 			&cfg.num_shadow_reg_cfg);
3882 
3883 	/* translate from structure size to array size */
3884 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3885 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3886 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3887 
3888 	switch (tgt_info->target_type) {
3889 	case TARGET_TYPE_KIWI:
3890 	case TARGET_TYPE_MANGO:
3891 		hif_prepare_hal_shadow_reg_cfg_v3(scn, &cfg);
3892 		break;
3893 	default:
3894 		hif_prepare_hal_shadow_register_cfg(scn,
3895 						    &cfg.shadow_reg_v2_cfg,
3896 						    &cfg.num_shadow_reg_v2_cfg);
3897 		break;
3898 	}
3899 
3900 	hif_print_hal_shadow_register_cfg(&cfg);
3901 
3902 	hif_update_rri_over_ddr_config(scn, &cfg);
3903 
3904 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3905 		mode = PLD_FTM;
3906 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3907 		mode = PLD_COLDBOOT_CALIBRATION;
3908 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3909 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3910 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3911 		mode = PLD_EPPING;
3912 	else
3913 		mode = PLD_MISSION;
3914 
3915 	if (BYPASS_QMI)
3916 		return 0;
3917 	else
3918 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3919 }
3920 
3921 #ifdef WLAN_FEATURE_EPPING
3922 
3923 #define CE_EPPING_USES_IRQ true
3924 
3925 void hif_ce_prepare_epping_config(struct hif_softc *scn,
3926 				  struct HIF_CE_state *hif_state)
3927 {
3928 	if (CE_EPPING_USES_IRQ)
3929 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3930 	else
3931 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3932 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3933 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3934 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3935 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3936 	scn->ce_count = EPPING_HOST_CE_COUNT;
3937 }
3938 #endif
3939 
3940 #ifdef QCN7605_SUPPORT
3941 static inline
3942 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3943 			       struct HIF_CE_state *hif_state)
3944 {
3945 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3946 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3947 	hif_state->target_ce_config_sz =
3948 				 sizeof(target_ce_config_wlan_qcn7605);
3949 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3950 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3951 	scn->ce_count = QCN7605_CE_COUNT;
3952 }
3953 #else
3954 static inline
3955 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3956 			       struct HIF_CE_state *hif_state)
3957 {
3958 	hif_err("QCN7605 not supported");
3959 }
3960 #endif
3961 
3962 #ifdef CE_SVC_CMN_INIT
3963 #ifdef QCA_WIFI_SUPPORT_SRNG
3964 static inline void hif_ce_service_init(void)
3965 {
3966 	ce_service_srng_init();
3967 }
3968 #else
3969 static inline void hif_ce_service_init(void)
3970 {
3971 	ce_service_legacy_init();
3972 }
3973 #endif
3974 #else
3975 static inline void hif_ce_service_init(void)
3976 {
3977 }
3978 #endif
3979 
3980 
3981 /**
3982  * hif_ce_prepare_config() - load the correct static tables.
3983  * @scn: hif context
3984  *
3985  * Epping uses different static attribute tables than mission mode.
3986  */
3987 void hif_ce_prepare_config(struct hif_softc *scn)
3988 {
3989 	uint32_t mode = hif_get_conparam(scn);
3990 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3991 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3992 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3993 	int ret;
3994 	int msi_data_count = 0;
3995 	int msi_data_start = 0;
3996 	int msi_irq_start = 0;
3997 
3998 	hif_ce_service_init();
3999 	hif_state->ce_services = ce_services_attach(scn);
4000 
4001 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4002 					  &msi_data_count, &msi_data_start,
4003 					  &msi_irq_start);
4004 
4005 	scn->ce_count = HOST_CE_COUNT;
4006 	scn->int_assignment = &ce_int_context[msi_data_count];
4007 	scn->free_irq_done = false;
4008 	/* if epping is enabled we need to use the epping configuration. */
4009 	if (QDF_IS_EPPING_ENABLED(mode)) {
4010 		hif_ce_prepare_epping_config(scn, hif_state);
4011 		return;
4012 	}
4013 
4014 	switch (tgt_info->target_type) {
4015 	default:
4016 		hif_state->host_ce_config = host_ce_config_wlan;
4017 		hif_state->target_ce_config = target_ce_config_wlan;
4018 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
4019 		break;
4020 	case TARGET_TYPE_QCN7605:
4021 		hif_set_ce_config_qcn7605(scn, hif_state);
4022 		break;
4023 	case TARGET_TYPE_AR900B:
4024 	case TARGET_TYPE_QCA9984:
4025 	case TARGET_TYPE_QCA9888:
4026 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4027 			hif_state->host_ce_config =
4028 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
4029 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4030 			hif_state->host_ce_config =
4031 				host_lowdesc_ce_cfg_wlan_ar900b;
4032 		} else {
4033 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
4034 		}
4035 
4036 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
4037 		hif_state->target_ce_config_sz =
4038 				sizeof(target_ce_config_wlan_ar900b);
4039 
4040 		break;
4041 
4042 	case TARGET_TYPE_AR9888:
4043 	case TARGET_TYPE_AR9888V2:
4044 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4045 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
4046 		} else {
4047 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
4048 		}
4049 
4050 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
4051 		hif_state->target_ce_config_sz =
4052 					sizeof(target_ce_config_wlan_ar9888);
4053 
4054 		break;
4055 
4056 	case TARGET_TYPE_QCA8074:
4057 	case TARGET_TYPE_QCA8074V2:
4058 	case TARGET_TYPE_QCA6018:
4059 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
4060 			hif_state->host_ce_config =
4061 					host_ce_config_wlan_qca8074_pci;
4062 			hif_state->target_ce_config =
4063 				target_ce_config_wlan_qca8074_pci;
4064 			hif_state->target_ce_config_sz =
4065 				sizeof(target_ce_config_wlan_qca8074_pci);
4066 		} else {
4067 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
4068 			hif_state->target_ce_config =
4069 					target_ce_config_wlan_qca8074;
4070 			hif_state->target_ce_config_sz =
4071 				sizeof(target_ce_config_wlan_qca8074);
4072 		}
4073 		break;
4074 	case TARGET_TYPE_QCA6290:
4075 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
4076 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
4077 		hif_state->target_ce_config_sz =
4078 					sizeof(target_ce_config_wlan_qca6290);
4079 
4080 		scn->ce_count = QCA_6290_CE_COUNT;
4081 		break;
4082 	case TARGET_TYPE_QCN9000:
4083 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
4084 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
4085 		hif_state->target_ce_config_sz =
4086 					sizeof(target_ce_config_wlan_qcn9000);
4087 		scn->ce_count = QCN_9000_CE_COUNT;
4088 		scn->disable_wake_irq = 1;
4089 		break;
4090 	case TARGET_TYPE_QCN9224:
4091 		hif_set_ce_config_qcn9224(scn, hif_state);
4092 		break;
4093 	case TARGET_TYPE_QCA5332:
4094 		hif_state->host_ce_config = host_ce_config_wlan_qca5332;
4095 		hif_state->target_ce_config = target_ce_config_wlan_qca5332;
4096 		hif_state->target_ce_config_sz =
4097 					 sizeof(target_ce_config_wlan_qca5332);
4098 		scn->ce_count = QCA_5332_CE_COUNT;
4099 		break;
4100 	case TARGET_TYPE_QCN6122:
4101 		hif_state->host_ce_config = host_ce_config_wlan_qcn6122;
4102 		hif_state->target_ce_config = target_ce_config_wlan_qcn6122;
4103 		hif_state->target_ce_config_sz =
4104 					sizeof(target_ce_config_wlan_qcn6122);
4105 		scn->ce_count = QCN_6122_CE_COUNT;
4106 		scn->disable_wake_irq = 1;
4107 		break;
4108 	case TARGET_TYPE_QCN9160:
4109 		hif_state->host_ce_config = host_ce_config_wlan_qcn9160;
4110 		hif_state->target_ce_config = target_ce_config_wlan_qcn9160;
4111 		hif_state->target_ce_config_sz =
4112 					sizeof(target_ce_config_wlan_qcn9160);
4113 		scn->ce_count = QCN_9160_CE_COUNT;
4114 		scn->disable_wake_irq = 1;
4115 		break;
4116 	case TARGET_TYPE_QCA5018:
4117 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
4118 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
4119 		hif_state->target_ce_config_sz =
4120 					sizeof(target_ce_config_wlan_qca5018);
4121 		scn->ce_count = QCA_5018_CE_COUNT;
4122 		break;
4123 	case TARGET_TYPE_QCA9574:
4124 		hif_state->host_ce_config = host_ce_config_wlan_qca9574;
4125 		hif_state->target_ce_config = target_ce_config_wlan_qca9574;
4126 		hif_state->target_ce_config_sz =
4127 					sizeof(target_ce_config_wlan_qca9574);
4128 		break;
4129 	case TARGET_TYPE_QCA6390:
4130 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
4131 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
4132 		hif_state->target_ce_config_sz =
4133 					sizeof(target_ce_config_wlan_qca6390);
4134 
4135 		scn->ce_count = QCA_6390_CE_COUNT;
4136 		break;
4137 	case TARGET_TYPE_QCA6490:
4138 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
4139 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
4140 		hif_state->target_ce_config_sz =
4141 					sizeof(target_ce_config_wlan_qca6490);
4142 
4143 		scn->ce_count = QCA_6490_CE_COUNT;
4144 		break;
4145 	case TARGET_TYPE_QCA6750:
4146 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
4147 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
4148 		hif_state->target_ce_config_sz =
4149 					sizeof(target_ce_config_wlan_qca6750);
4150 
4151 		scn->ce_count = QCA_6750_CE_COUNT;
4152 		break;
4153 	case TARGET_TYPE_KIWI:
4154 	case TARGET_TYPE_MANGO:
4155 		hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4156 		hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4157 		hif_state->target_ce_config_sz =
4158 					sizeof(target_ce_config_wlan_kiwi);
4159 		scn->ce_count = KIWI_CE_COUNT;
4160 		break;
4161 	case TARGET_TYPE_ADRASTEA:
4162 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4163 			hif_state->host_ce_config =
4164 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
4165 			hif_state->target_ce_config =
4166 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
4167 			hif_state->target_ce_config_sz =
4168 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
4169 		} else {
4170 			hif_state->host_ce_config =
4171 				host_ce_config_wlan_adrastea;
4172 			hif_state->target_ce_config =
4173 					target_ce_config_wlan_adrastea;
4174 			hif_state->target_ce_config_sz =
4175 					sizeof(target_ce_config_wlan_adrastea);
4176 		}
4177 		break;
4178 
4179 	}
4180 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
4181 }
4182 
4183 /**
4184  * hif_ce_open() - do ce specific allocations
4185  * @hif_sc: pointer to hif context
4186  *
4187  * return: 0 for success or QDF_STATUS_E_NOMEM
4188  */
4189 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
4190 {
4191 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4192 
4193 	qdf_spinlock_create(&hif_state->irq_reg_lock);
4194 	qdf_spinlock_create(&hif_state->keep_awake_lock);
4195 	return QDF_STATUS_SUCCESS;
4196 }
4197 
4198 /**
4199  * hif_ce_close() - do ce specific free
4200  * @hif_sc: pointer to hif context
4201  */
4202 void hif_ce_close(struct hif_softc *hif_sc)
4203 {
4204 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4205 
4206 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
4207 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
4208 }
4209 
4210 /**
4211  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
4212  * @hif_sc: hif context
4213  *
4214  * uses state variables to support cleaning up when hif_config_ce fails.
4215  */
4216 void hif_unconfig_ce(struct hif_softc *hif_sc)
4217 {
4218 	int pipe_num;
4219 	struct HIF_CE_pipe_info *pipe_info;
4220 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4221 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
4222 
4223 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4224 		pipe_info = &hif_state->pipe_info[pipe_num];
4225 		if (pipe_info->ce_hdl) {
4226 			ce_unregister_irq(hif_state, (1 << pipe_num));
4227 		}
4228 	}
4229 	deinit_tasklet_workers(hif_hdl);
4230 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4231 		pipe_info = &hif_state->pipe_info[pipe_num];
4232 		if (pipe_info->ce_hdl) {
4233 			ce_fini(pipe_info->ce_hdl);
4234 			pipe_info->ce_hdl = NULL;
4235 			pipe_info->buf_sz = 0;
4236 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
4237 		}
4238 	}
4239 	if (hif_sc->athdiag_procfs_inited) {
4240 		athdiag_procfs_remove();
4241 		hif_sc->athdiag_procfs_inited = false;
4242 	}
4243 }
4244 
4245 #ifdef CONFIG_BYPASS_QMI
4246 #ifdef QCN7605_SUPPORT
4247 /**
4248  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4249  * @scn: pointer to HIF structure
4250  *
4251  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4252  *
4253  * Return: void
4254  */
4255 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4256 {
4257 	phys_addr_t target_pa;
4258 	struct ce_info *ce_info_ptr;
4259 	uint32_t msi_data_start;
4260 	uint32_t msi_data_count;
4261 	uint32_t msi_irq_start;
4262 	uint32_t i = 0;
4263 	int ret;
4264 
4265 	scn->vaddr_qmi_bypass =
4266 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4267 							     scn->qdf_dev->dev,
4268 							     FW_SHARED_MEM,
4269 							     &target_pa);
4270 	if (!scn->vaddr_qmi_bypass) {
4271 		hif_err("Memory allocation failed could not post target buf");
4272 		return;
4273 	}
4274 
4275 	scn->paddr_qmi_bypass = target_pa;
4276 
4277 	ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass;
4278 
4279 	if (scn->vaddr_rri_on_ddr) {
4280 		ce_info_ptr->rri_over_ddr_low_paddr  =
4281 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
4282 		ce_info_ptr->rri_over_ddr_high_paddr =
4283 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
4284 	}
4285 
4286 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4287 					  &msi_data_count, &msi_data_start,
4288 					  &msi_irq_start);
4289 	if (ret) {
4290 		hif_err("Failed to get CE msi config");
4291 		return;
4292 	}
4293 
4294 	for (i = 0; i < CE_COUNT_MAX; i++) {
4295 		ce_info_ptr->cfg[i].ce_id = i;
4296 		ce_info_ptr->cfg[i].msi_vector =
4297 			 (i % msi_data_count) + msi_irq_start;
4298 	}
4299 
4300 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4301 	hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass,
4302 		 &target_pa);
4303 }
4304 
4305 /**
4306  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4307  * @scn: pointer to HIF structure
4308  *
4309  *
4310  * Return: void
4311  */
4312 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4313 {
4314 	void *target_va = scn->vaddr_qmi_bypass;
4315 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4316 
4317 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4318 				FW_SHARED_MEM, target_va,
4319 				target_pa, 0);
4320 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4321 }
4322 #else
4323 /**
4324  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4325  * @scn: pointer to HIF structure
4326  *
4327  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4328  *
4329  * Return: void
4330  */
4331 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4332 {
4333 	qdf_dma_addr_t target_pa;
4334 
4335 	scn->vaddr_qmi_bypass =
4336 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4337 							     scn->qdf_dev->dev,
4338 							     FW_SHARED_MEM,
4339 							     &target_pa);
4340 	if (!scn->vaddr_qmi_bypass) {
4341 		hif_err("Memory allocation failed could not post target buf");
4342 		return;
4343 	}
4344 
4345 	scn->paddr_qmi_bypass = target_pa;
4346 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4347 }
4348 
4349 /**
4350  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4351  * @scn: pointer to HIF structure
4352  *
4353  *
4354  * Return: void
4355  */
4356 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4357 {
4358 	void *target_va = scn->vaddr_qmi_bypass;
4359 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4360 
4361 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4362 				FW_SHARED_MEM, target_va,
4363 				target_pa, 0);
4364 	hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4365 }
4366 #endif
4367 
4368 #else
4369 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
4370 {
4371 }
4372 
4373 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4374 {
4375 }
4376 #endif
4377 
4378 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
4379 				bool wait_for_it)
4380 {
4381 	/* todo */
4382 	return 0;
4383 }
4384 
4385 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num)
4386 {
4387 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4388 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4389 	struct HIF_CE_pipe_info *pipe_info;
4390 	struct CE_state *ce_state = NULL;
4391 	struct CE_attr *attr;
4392 	int rv = 0;
4393 
4394 	if (pipe_num >= CE_COUNT_MAX)
4395 		return -EINVAL;
4396 
4397 	pipe_info = &hif_state->pipe_info[pipe_num];
4398 	pipe_info->pipe_num = pipe_num;
4399 	pipe_info->HIF_CE_state = hif_state;
4400 	attr = &hif_state->host_ce_config[pipe_num];
4401 	ce_state = scn->ce_id_to_state[pipe_num];
4402 
4403 	if (ce_state) {
4404 		/* Do not reinitialize the CE if its done already */
4405 		rv = QDF_STATUS_E_BUSY;
4406 		goto err;
4407 	}
4408 
4409 	pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
4410 	ce_state = scn->ce_id_to_state[pipe_num];
4411 	if (!ce_state) {
4412 		A_TARGET_ACCESS_UNLIKELY(scn);
4413 		rv = QDF_STATUS_E_FAILURE;
4414 		goto err;
4415 	}
4416 	qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
4417 	QDF_ASSERT(pipe_info->ce_hdl);
4418 	if (!pipe_info->ce_hdl) {
4419 		rv = QDF_STATUS_E_FAILURE;
4420 		A_TARGET_ACCESS_UNLIKELY(scn);
4421 		goto err;
4422 	}
4423 
4424 	ce_state->lro_data = qdf_lro_init();
4425 
4426 	if (attr->flags & CE_ATTR_DIAG) {
4427 		/* Reserve the ultimate CE for
4428 		 * Diagnostic Window support
4429 		 */
4430 		hif_state->ce_diag = pipe_info->ce_hdl;
4431 		goto skip;
4432 	}
4433 
4434 	if (hif_is_nss_wifi_enabled(scn) && ce_state &&
4435 	    (ce_state->htt_rx_data)) {
4436 		goto skip;
4437 	}
4438 
4439 	pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max);
4440 	if (attr->dest_nentries > 0) {
4441 		atomic_set(&pipe_info->recv_bufs_needed,
4442 			   init_buffer_count(attr->dest_nentries - 1));
4443 		/*SRNG based CE has one entry less */
4444 		if (ce_srng_based(scn))
4445 			atomic_dec(&pipe_info->recv_bufs_needed);
4446 	} else {
4447 		atomic_set(&pipe_info->recv_bufs_needed, 0);
4448 	}
4449 	ce_tasklet_init(hif_state, (1 << pipe_num));
4450 	ce_register_irq(hif_state, (1 << pipe_num));
4451 
4452 	init_tasklet_worker_by_ceid(hif_hdl, pipe_num);
4453 skip:
4454 	return 0;
4455 err:
4456 	return rv;
4457 }
4458 
4459 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
4460 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
4461 {
4462 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
4463 	uint8_t ce_id, hist_idx = 0;
4464 
4465 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
4466 		if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & (1 << ce_id))
4467 			ce_hist->ce_id_hist_map[ce_id] = hist_idx++;
4468 		else
4469 			ce_hist->ce_id_hist_map[ce_id] = -1;
4470 	}
4471 }
4472 #else
4473 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
4474 {
4475 }
4476 #endif
4477 
4478 /**
4479  * hif_config_ce() - configure copy engines
4480  * @scn: hif context
4481  *
4482  * Prepares fw, copy engine hardware and host sw according
4483  * to the attributes selected by hif_ce_prepare_config.
4484  *
4485  * also calls athdiag_procfs_init
4486  *
4487  * return: 0 for success nonzero for failure.
4488  */
4489 int hif_config_ce(struct hif_softc *scn)
4490 {
4491 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4492 	struct HIF_CE_pipe_info *pipe_info;
4493 	int pipe_num;
4494 
4495 #ifdef ADRASTEA_SHADOW_REGISTERS
4496 	int i;
4497 #endif
4498 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
4499 
4500 	scn->notice_send = true;
4501 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
4502 
4503 	hif_post_static_buf_to_target(scn);
4504 
4505 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
4506 
4507 	hif_config_rri_on_ddr(scn);
4508 
4509 	if (ce_srng_based(scn))
4510 		scn->bus_ops.hif_target_sleep_state_adjust =
4511 			&hif_srng_sleep_state_adjust;
4512 
4513 	/* Initialise the CE debug history sysfs interface inputs ce_id and
4514 	 * index. Disable data storing
4515 	 */
4516 	reset_ce_debug_history(scn);
4517 	hif_gen_ce_id_history_idx_mapping(scn);
4518 
4519 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4520 		struct CE_attr *attr;
4521 
4522 		pipe_info = &hif_state->pipe_info[pipe_num];
4523 		attr = &hif_state->host_ce_config[pipe_num];
4524 
4525 		if (attr->flags & CE_ATTR_INIT_ON_DEMAND)
4526 			continue;
4527 
4528 		if (hif_config_ce_by_id(scn, pipe_num))
4529 			goto err;
4530 	}
4531 
4532 	if (athdiag_procfs_init(scn) != 0) {
4533 		A_TARGET_ACCESS_UNLIKELY(scn);
4534 		goto err;
4535 	}
4536 	scn->athdiag_procfs_inited = true;
4537 
4538 	hif_debug("ce_init done");
4539 	hif_debug("%s: X, ret = %d", __func__, rv);
4540 
4541 #ifdef ADRASTEA_SHADOW_REGISTERS
4542 	hif_debug("Using Shadow Registers instead of CE Registers");
4543 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
4544 		hif_debug("Shadow Register%d is mapped to address %x",
4545 			  i,
4546 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
4547 	}
4548 #endif
4549 
4550 	return rv != QDF_STATUS_SUCCESS;
4551 err:
4552 	/* Failure, so clean up */
4553 	hif_unconfig_ce(scn);
4554 	hif_info("X, ret = %d", rv);
4555 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4556 }
4557 
4558 /**
4559  * hif_config_ce_pktlog() - configure copy engines
4560  * @scn: hif context
4561  *
4562  * Prepares fw, copy engine hardware and host sw according
4563  * to the attributes selected by hif_ce_prepare_config.
4564  *
4565  * also calls athdiag_procfs_init
4566  *
4567  * return: 0 for success nonzero for failure.
4568  */
4569 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl)
4570 {
4571 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4572 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4573 	int pipe_num;
4574 	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
4575 	struct HIF_CE_pipe_info *pipe_info;
4576 
4577 	if (!scn)
4578 		goto err;
4579 
4580 	if (scn->pktlog_init)
4581 		return QDF_STATUS_SUCCESS;
4582 
4583 	pipe_num =  hif_get_pktlog_ce_num(scn);
4584 	if (pipe_num < 0) {
4585 		qdf_status = QDF_STATUS_E_FAILURE;
4586 		goto err;
4587 	}
4588 
4589 	pipe_info = &hif_state->pipe_info[pipe_num];
4590 
4591 	qdf_status = hif_config_ce_by_id(scn, pipe_num);
4592 	/* CE Already initialized. Do not try to reinitialized again */
4593 	if (qdf_status == QDF_STATUS_E_BUSY)
4594 		return QDF_STATUS_SUCCESS;
4595 
4596 	qdf_status = hif_config_irq_by_ceid(scn, pipe_num);
4597 	if (qdf_status < 0)
4598 		goto err;
4599 
4600 	qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num);
4601 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4602 		hif_err("%s:failed to start hif thread", __func__);
4603 		goto err;
4604 	}
4605 
4606 	/* Post buffers for pktlog copy engine. */
4607 	qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
4608 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4609 		/* cleanup is done in hif_ce_disable */
4610 		hif_err("%s:failed to post buffers", __func__);
4611 		return qdf_status;
4612 	}
4613 	scn->pktlog_init = true;
4614 	return qdf_status != QDF_STATUS_SUCCESS;
4615 
4616 err:
4617 	hif_debug("%s: X, ret = %d", __func__, qdf_status);
4618 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4619 }
4620 
4621 #ifdef IPA_OFFLOAD
4622 /**
4623  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
4624  * @scn: bus context
4625  * @ce_sr_base_paddr: copyengine source ring base physical address
4626  * @ce_sr_ring_size: copyengine source ring size
4627  * @ce_reg_paddr: copyengine register physical address
4628  *
4629  * IPA micro controller data path offload feature enabled,
4630  * HIF should release copy engine related resource information to IPA UC
4631  * IPA UC will access hardware resource with released information
4632  *
4633  * Return: None
4634  */
4635 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
4636 			     qdf_shared_mem_t **ce_sr,
4637 			     uint32_t *ce_sr_ring_size,
4638 			     qdf_dma_addr_t *ce_reg_paddr)
4639 {
4640 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4641 	struct HIF_CE_pipe_info *pipe_info =
4642 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
4643 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4644 
4645 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
4646 			    ce_reg_paddr);
4647 }
4648 #endif /* IPA_OFFLOAD */
4649 
4650 
4651 #ifdef ADRASTEA_SHADOW_REGISTERS
4652 
4653 /*
4654  * Current shadow register config
4655  *
4656  * -----------------------------------------------------------
4657  * Shadow Register      |     CE   |    src/dst write index
4658  * -----------------------------------------------------------
4659  *         0            |     0    |           src
4660  *         1     No Config - Doesn't point to anything
4661  *         2     No Config - Doesn't point to anything
4662  *         3            |     3    |           src
4663  *         4            |     4    |           src
4664  *         5            |     5    |           src
4665  *         6     No Config - Doesn't point to anything
4666  *         7            |     7    |           src
4667  *         8     No Config - Doesn't point to anything
4668  *         9     No Config - Doesn't point to anything
4669  *         10    No Config - Doesn't point to anything
4670  *         11    No Config - Doesn't point to anything
4671  * -----------------------------------------------------------
4672  *         12    No Config - Doesn't point to anything
4673  *         13           |     1    |           dst
4674  *         14           |     2    |           dst
4675  *         15    No Config - Doesn't point to anything
4676  *         16    No Config - Doesn't point to anything
4677  *         17    No Config - Doesn't point to anything
4678  *         18    No Config - Doesn't point to anything
4679  *         19           |     7    |           dst
4680  *         20           |     8    |           dst
4681  *         21    No Config - Doesn't point to anything
4682  *         22    No Config - Doesn't point to anything
4683  *         23    No Config - Doesn't point to anything
4684  * -----------------------------------------------------------
4685  *
4686  *
4687  * ToDo - Move shadow register config to following in the future
4688  * This helps free up a block of shadow registers towards the end.
4689  * Can be used for other purposes
4690  *
4691  * -----------------------------------------------------------
4692  * Shadow Register      |     CE   |    src/dst write index
4693  * -----------------------------------------------------------
4694  *      0            |     0    |           src
4695  *      1            |     3    |           src
4696  *      2            |     4    |           src
4697  *      3            |     5    |           src
4698  *      4            |     7    |           src
4699  * -----------------------------------------------------------
4700  *      5            |     1    |           dst
4701  *      6            |     2    |           dst
4702  *      7            |     7    |           dst
4703  *      8            |     8    |           dst
4704  * -----------------------------------------------------------
4705  *      9     No Config - Doesn't point to anything
4706  *      12    No Config - Doesn't point to anything
4707  *      13    No Config - Doesn't point to anything
4708  *      14    No Config - Doesn't point to anything
4709  *      15    No Config - Doesn't point to anything
4710  *      16    No Config - Doesn't point to anything
4711  *      17    No Config - Doesn't point to anything
4712  *      18    No Config - Doesn't point to anything
4713  *      19    No Config - Doesn't point to anything
4714  *      20    No Config - Doesn't point to anything
4715  *      21    No Config - Doesn't point to anything
4716  *      22    No Config - Doesn't point to anything
4717  *      23    No Config - Doesn't point to anything
4718  * -----------------------------------------------------------
4719 */
4720 #ifndef QCN7605_SUPPORT
4721 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4722 {
4723 	u32 addr = 0;
4724 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4725 
4726 	switch (ce) {
4727 	case 0:
4728 		addr = SHADOW_VALUE0;
4729 		break;
4730 	case 3:
4731 		addr = SHADOW_VALUE3;
4732 		break;
4733 	case 4:
4734 		addr = SHADOW_VALUE4;
4735 		break;
4736 	case 5:
4737 		addr = SHADOW_VALUE5;
4738 		break;
4739 	case 7:
4740 		addr = SHADOW_VALUE7;
4741 		break;
4742 	default:
4743 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4744 		QDF_ASSERT(0);
4745 	}
4746 	return addr;
4747 
4748 }
4749 
4750 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4751 {
4752 	u32 addr = 0;
4753 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4754 
4755 	switch (ce) {
4756 	case 1:
4757 		addr = SHADOW_VALUE13;
4758 		break;
4759 	case 2:
4760 		addr = SHADOW_VALUE14;
4761 		break;
4762 	case 5:
4763 		addr = SHADOW_VALUE17;
4764 		break;
4765 	case 7:
4766 		addr = SHADOW_VALUE19;
4767 		break;
4768 	case 8:
4769 		addr = SHADOW_VALUE20;
4770 		break;
4771 	case 9:
4772 		addr = SHADOW_VALUE21;
4773 		break;
4774 	case 10:
4775 		addr = SHADOW_VALUE22;
4776 		break;
4777 	case 11:
4778 		addr = SHADOW_VALUE23;
4779 		break;
4780 	default:
4781 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4782 		QDF_ASSERT(0);
4783 	}
4784 
4785 	return addr;
4786 
4787 }
4788 #else
4789 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4790 {
4791 	u32 addr = 0;
4792 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4793 
4794 	switch (ce) {
4795 	case 0:
4796 		addr = SHADOW_VALUE0;
4797 		break;
4798 	case 3:
4799 		addr = SHADOW_VALUE3;
4800 		break;
4801 	case 4:
4802 		addr = SHADOW_VALUE4;
4803 		break;
4804 	case 5:
4805 		addr = SHADOW_VALUE5;
4806 		break;
4807 	default:
4808 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4809 		QDF_ASSERT(0);
4810 	}
4811 	return addr;
4812 }
4813 
4814 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4815 {
4816 	u32 addr = 0;
4817 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4818 
4819 	switch (ce) {
4820 	case 1:
4821 		addr = SHADOW_VALUE13;
4822 		break;
4823 	case 2:
4824 		addr = SHADOW_VALUE14;
4825 		break;
4826 	case 3:
4827 		addr = SHADOW_VALUE15;
4828 		break;
4829 	case 5:
4830 		addr = SHADOW_VALUE17;
4831 		break;
4832 	case 7:
4833 		addr = SHADOW_VALUE19;
4834 		break;
4835 	case 8:
4836 		addr = SHADOW_VALUE20;
4837 		break;
4838 	case 9:
4839 		addr = SHADOW_VALUE21;
4840 		break;
4841 	case 10:
4842 		addr = SHADOW_VALUE22;
4843 		break;
4844 	case 11:
4845 		addr = SHADOW_VALUE23;
4846 		break;
4847 	default:
4848 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4849 		QDF_ASSERT(0);
4850 	}
4851 
4852 	return addr;
4853 }
4854 #endif
4855 #endif
4856 
4857 #if defined(FEATURE_LRO)
4858 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
4859 {
4860 	struct CE_state *ce_state;
4861 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4862 
4863 	ce_state = scn->ce_id_to_state[ctx_id];
4864 
4865 	return ce_state->lro_data;
4866 }
4867 #endif
4868 
4869 /**
4870  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
4871  * this service
4872  * @scn: hif_softc pointer.
4873  * @svc_id: Service ID for which the mapping is needed.
4874  * @ul_pipe: address of the container in which ul pipe is returned.
4875  * @dl_pipe: address of the container in which dl pipe is returned.
4876  * @ul_is_polled: address of the container in which a bool
4877  *			indicating if the UL CE for this service
4878  *			is polled is returned.
4879  * @dl_is_polled: address of the container in which a bool
4880  *			indicating if the DL CE for this service
4881  *			is polled is returned.
4882  *
4883  * Return: Indicates whether the service has been found in the table.
4884  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
4885  *         There will be warning logs if either leg has not been updated
4886  *         because it missed the entry in the table (but this is not an err).
4887  */
4888 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
4889 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
4890 			int *dl_is_polled)
4891 {
4892 	int status = -EINVAL;
4893 	unsigned int i;
4894 	struct service_to_pipe element;
4895 	struct service_to_pipe *tgt_svc_map_to_use;
4896 	uint32_t sz_tgt_svc_map_to_use;
4897 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4898 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4899 	bool dl_updated = false;
4900 	bool ul_updated = false;
4901 
4902 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
4903 				       &sz_tgt_svc_map_to_use);
4904 
4905 	*dl_is_polled = 0;  /* polling for received messages not supported */
4906 
4907 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
4908 
4909 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
4910 		if (element.service_id == svc_id) {
4911 			if (element.pipedir == PIPEDIR_OUT) {
4912 				*ul_pipe = element.pipenum;
4913 				*ul_is_polled =
4914 					(hif_state->host_ce_config[*ul_pipe].flags &
4915 					 CE_ATTR_DISABLE_INTR) != 0;
4916 				ul_updated = true;
4917 			} else if (element.pipedir == PIPEDIR_IN) {
4918 				*dl_pipe = element.pipenum;
4919 				dl_updated = true;
4920 			}
4921 			status = 0;
4922 		}
4923 	}
4924 	if (ul_updated == false)
4925 		hif_debug("ul pipe is NOT updated for service %d", svc_id);
4926 	if (dl_updated == false)
4927 		hif_debug("dl pipe is NOT updated for service %d", svc_id);
4928 
4929 	return status;
4930 }
4931 
4932 #ifdef SHADOW_REG_DEBUG
4933 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
4934 		uint32_t CE_ctrl_addr)
4935 {
4936 	uint32_t read_from_hw, srri_from_ddr = 0;
4937 
4938 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
4939 
4940 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4941 
4942 	if (read_from_hw != srri_from_ddr) {
4943 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4944 		       srri_from_ddr, read_from_hw,
4945 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4946 		QDF_ASSERT(0);
4947 	}
4948 	return srri_from_ddr;
4949 }
4950 
4951 
4952 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
4953 		uint32_t CE_ctrl_addr)
4954 {
4955 	uint32_t read_from_hw, drri_from_ddr = 0;
4956 
4957 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
4958 
4959 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4960 
4961 	if (read_from_hw != drri_from_ddr) {
4962 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4963 		       drri_from_ddr, read_from_hw,
4964 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4965 		QDF_ASSERT(0);
4966 	}
4967 	return drri_from_ddr;
4968 }
4969 
4970 #endif
4971 
4972 /**
4973  * hif_dump_ce_registers() - dump ce registers
4974  * @scn: hif_opaque_softc pointer.
4975  *
4976  * Output the copy engine registers
4977  *
4978  * Return: 0 for success or error code
4979  */
4980 int hif_dump_ce_registers(struct hif_softc *scn)
4981 {
4982 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4983 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
4984 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
4985 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
4986 	uint16_t i;
4987 	QDF_STATUS status;
4988 
4989 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
4990 		if (!scn->ce_id_to_state[i]) {
4991 			hif_debug("CE%d not used", i);
4992 			continue;
4993 		}
4994 
4995 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
4996 					   (uint8_t *) &ce_reg_values[0],
4997 					   ce_reg_word_size * sizeof(uint32_t));
4998 
4999 		if (status != QDF_STATUS_SUCCESS) {
5000 			hif_err("Dumping CE register failed!");
5001 			return -EACCES;
5002 		}
5003 		hif_debug("CE%d=>", i);
5004 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
5005 				   (uint8_t *) &ce_reg_values[0],
5006 				   ce_reg_word_size * sizeof(uint32_t));
5007 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
5008 				+ SR_WR_INDEX_ADDRESS),
5009 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
5010 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
5011 				+ CURRENT_SRRI_ADDRESS),
5012 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
5013 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
5014 				+ DST_WR_INDEX_ADDRESS),
5015 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
5016 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
5017 				+ CURRENT_DRRI_ADDRESS),
5018 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
5019 		qdf_print("---");
5020 	}
5021 	return 0;
5022 }
5023 qdf_export_symbol(hif_dump_ce_registers);
5024 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
5025 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
5026 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
5027 {
5028 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5029 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
5030 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
5031 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
5032 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
5033 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
5034 	struct CE_ring_state *src_ring = ce_state->src_ring;
5035 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
5036 
5037 	if (src_ring) {
5038 		hif_info->ul_pipe.nentries = src_ring->nentries;
5039 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
5040 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
5041 		hif_info->ul_pipe.write_index = src_ring->write_index;
5042 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
5043 		hif_info->ul_pipe.base_addr_CE_space =
5044 			src_ring->base_addr_CE_space;
5045 		hif_info->ul_pipe.base_addr_owner_space =
5046 			src_ring->base_addr_owner_space;
5047 	}
5048 
5049 
5050 	if (dest_ring) {
5051 		hif_info->dl_pipe.nentries = dest_ring->nentries;
5052 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
5053 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
5054 		hif_info->dl_pipe.write_index = dest_ring->write_index;
5055 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
5056 		hif_info->dl_pipe.base_addr_CE_space =
5057 			dest_ring->base_addr_CE_space;
5058 		hif_info->dl_pipe.base_addr_owner_space =
5059 			dest_ring->base_addr_owner_space;
5060 	}
5061 
5062 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
5063 	hif_info->ctrl_addr = ce_state->ctrl_addr;
5064 
5065 	return hif_info;
5066 }
5067 qdf_export_symbol(hif_get_addl_pipe_info);
5068 
5069 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
5070 {
5071 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5072 
5073 	scn->nss_wifi_ol_mode = mode;
5074 	return 0;
5075 }
5076 qdf_export_symbol(hif_set_nss_wifiol_mode);
5077 #endif
5078 
5079 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
5080 {
5081 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5082 	scn->hif_attribute = hif_attrib;
5083 }
5084 
5085 
5086 /* disable interrupts (only applicable for legacy copy engine currently */
5087 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
5088 {
5089 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5090 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
5091 	uint32_t ctrl_addr = CE_state->ctrl_addr;
5092 
5093 	Q_TARGET_ACCESS_BEGIN(scn);
5094 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
5095 	Q_TARGET_ACCESS_END(scn);
5096 }
5097 qdf_export_symbol(hif_disable_interrupt);
5098 
5099 /**
5100  * hif_fw_event_handler() - hif fw event handler
5101  * @hif_state: pointer to hif ce state structure
5102  *
5103  * Process fw events and raise HTC callback to process fw events.
5104  *
5105  * Return: none
5106  */
5107 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
5108 {
5109 	struct hif_msg_callbacks *msg_callbacks =
5110 		&hif_state->msg_callbacks_current;
5111 
5112 	if (!msg_callbacks->fwEventHandler)
5113 		return;
5114 
5115 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
5116 			QDF_STATUS_E_FAILURE);
5117 }
5118 
5119 #ifndef QCA_WIFI_3_0
5120 /**
5121  * hif_fw_interrupt_handler() - FW interrupt handler
5122  * @irq: irq number
5123  * @arg: the user pointer
5124  *
5125  * Called from the PCI interrupt handler when a
5126  * firmware-generated interrupt to the Host.
5127  *
5128  * only registered for legacy ce devices
5129  *
5130  * Return: status of handled irq
5131  */
5132 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5133 {
5134 	struct hif_softc *scn = arg;
5135 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5136 	uint32_t fw_indicator_address, fw_indicator;
5137 
5138 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
5139 		return ATH_ISR_NOSCHED;
5140 
5141 	fw_indicator_address = hif_state->fw_indicator_address;
5142 	/* For sudden unplug this will return ~0 */
5143 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
5144 
5145 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
5146 		/* ACK: clear Target-side pending event */
5147 		A_TARGET_WRITE(scn, fw_indicator_address,
5148 			       fw_indicator & ~FW_IND_EVENT_PENDING);
5149 		if (Q_TARGET_ACCESS_END(scn) < 0)
5150 			return ATH_ISR_SCHED;
5151 
5152 		if (hif_state->started) {
5153 			hif_fw_event_handler(hif_state);
5154 		} else {
5155 			/*
5156 			 * Probable Target failure before we're prepared
5157 			 * to handle it.  Generally unexpected.
5158 			 * fw_indicator used as bitmap, and defined as below:
5159 			 *     FW_IND_EVENT_PENDING    0x1
5160 			 *     FW_IND_INITIALIZED      0x2
5161 			 *     FW_IND_NEEDRECOVER      0x4
5162 			 */
5163 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
5164 				("%s: Early firmware event indicated 0x%x\n",
5165 				 __func__, fw_indicator));
5166 		}
5167 	} else {
5168 		if (Q_TARGET_ACCESS_END(scn) < 0)
5169 			return ATH_ISR_SCHED;
5170 	}
5171 
5172 	return ATH_ISR_SCHED;
5173 }
5174 #else
5175 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5176 {
5177 	return ATH_ISR_SCHED;
5178 }
5179 #endif /* #ifdef QCA_WIFI_3_0 */
5180 
5181 
5182 /**
5183  * hif_wlan_disable(): call the platform driver to disable wlan
5184  * @scn: HIF Context
5185  *
5186  * This function passes the con_mode to platform driver to disable
5187  * wlan.
5188  *
5189  * Return: void
5190  */
5191 void hif_wlan_disable(struct hif_softc *scn)
5192 {
5193 	enum pld_driver_mode mode;
5194 	uint32_t con_mode = hif_get_conparam(scn);
5195 
5196 	if (scn->target_status == TARGET_STATUS_RESET)
5197 		return;
5198 
5199 	if (QDF_GLOBAL_FTM_MODE == con_mode)
5200 		mode = PLD_FTM;
5201 	else if (QDF_IS_EPPING_ENABLED(con_mode))
5202 		mode = PLD_EPPING;
5203 	else
5204 		mode = PLD_MISSION;
5205 
5206 	pld_wlan_disable(scn->qdf_dev->dev, mode);
5207 }
5208 
5209 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5210 {
5211 	int status;
5212 	uint8_t ul_pipe, dl_pipe;
5213 	int ul_is_polled, dl_is_polled;
5214 
5215 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
5216 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5217 					 HTC_CTRL_RSVD_SVC,
5218 					 &ul_pipe, &dl_pipe,
5219 					 &ul_is_polled, &dl_is_polled);
5220 	if (status) {
5221 		hif_err("Failed to map pipe: %d", status);
5222 		return status;
5223 	}
5224 
5225 	*ce_id = dl_pipe;
5226 
5227 	return 0;
5228 }
5229 
5230 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5231 {
5232 	int status;
5233 	uint8_t ul_pipe, dl_pipe;
5234 	int ul_is_polled, dl_is_polled;
5235 
5236 	/* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */
5237 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5238 					 WMI_CONTROL_DIAG_SVC,
5239 					 &ul_pipe, &dl_pipe,
5240 					 &ul_is_polled, &dl_is_polled);
5241 	if (status) {
5242 		hif_err("Failed to map pipe: %d", status);
5243 		return status;
5244 	}
5245 
5246 	*ce_id = dl_pipe;
5247 
5248 	return 0;
5249 }
5250 
5251 #ifdef HIF_CE_LOG_INFO
5252 /**
5253  * ce_get_index_info(): Get CE index info
5254  * @scn: HIF Context
5255  * @ce_state: CE opaque handle
5256  * @info: CE info
5257  *
5258  * Return: 0 for success and non zero for failure
5259  */
5260 static
5261 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
5262 		      struct ce_index *info)
5263 {
5264 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5265 
5266 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
5267 }
5268 
5269 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
5270 		     unsigned int *offset)
5271 {
5272 	struct hang_event_info info = {0};
5273 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
5274 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
5275 	uint8_t curr_index = 0;
5276 	uint8_t i;
5277 	uint16_t size;
5278 
5279 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
5280 	info.active_grp_tasklet_cnt =
5281 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
5282 
5283 	for (i = 0; i < scn->ce_count; i++) {
5284 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
5285 			continue;
5286 
5287 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
5288 				      &info.ce_info[curr_index]))
5289 			continue;
5290 
5291 		curr_index++;
5292 	}
5293 
5294 	info.ce_count = curr_index;
5295 	size = sizeof(info) -
5296 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
5297 
5298 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
5299 		return;
5300 
5301 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
5302 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
5303 
5304 	qdf_mem_copy(data + *offset, &info, size);
5305 	*offset = *offset + size;
5306 }
5307 #endif
5308