xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "ce_api.h"
34 #include "qdf_trace.h"
35 #include "pld_common.h"
36 #include "hif_debug.h"
37 #include "ce_internal.h"
38 #include "ce_reg.h"
39 #include "ce_assignment.h"
40 #include "ce_tasklet.h"
41 #include "qdf_module.h"
42 
43 #define CE_POLL_TIMEOUT 10      /* ms */
44 
45 #define AGC_DUMP         1
46 #define CHANINFO_DUMP    2
47 #define BB_WATCHDOG_DUMP 3
48 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
49 #define PCIE_ACCESS_DUMP 4
50 #endif
51 #include "mp_dev.h"
52 #ifdef HIF_CE_LOG_INFO
53 #include "qdf_hang_event_notifier.h"
54 #endif
55 
56 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
57 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \
58 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCA9574)) && \
59 	!defined(QCA_WIFI_SUPPORT_SRNG)
60 #define QCA_WIFI_SUPPORT_SRNG
61 #endif
62 
63 #ifdef QCA_WIFI_SUPPORT_SRNG
64 #include <hal_api.h>
65 #endif
66 
67 /* Forward references */
68 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
69 
70 /*
71  * Fix EV118783, poll to check whether a BMI response comes
72  * other than waiting for the interruption which may be lost.
73  */
74 /* #define BMI_RSP_POLLING */
75 #define BMI_RSP_TO_MILLISEC  1000
76 
77 #ifdef CONFIG_BYPASS_QMI
78 #define BYPASS_QMI 1
79 #else
80 #define BYPASS_QMI 0
81 #endif
82 
83 #ifdef ENABLE_10_4_FW_HDR
84 #if (ENABLE_10_4_FW_HDR == 1)
85 #define WDI_IPA_SERVICE_GROUP 5
86 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
87 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
88 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
89 #endif /* ENABLE_10_4_FW_HDR == 1 */
90 #endif /* ENABLE_10_4_FW_HDR */
91 
92 static void hif_config_rri_on_ddr(struct hif_softc *scn);
93 
94 /**
95  * hif_target_access_log_dump() - dump access log
96  *
97  * dump access log
98  *
99  * Return: n/a
100  */
101 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102 static void hif_target_access_log_dump(void)
103 {
104 	hif_target_dump_access_log();
105 }
106 #endif
107 
108 /*
109  * This structure contains the interrupt index for each Copy engine
110  * for various number of MSIs available in the system.
111  */
112 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = {
113 	/* Default configuration */
114 	{{ CE_INTERRUPT_IDX(0),
115 	  CE_INTERRUPT_IDX(1),
116 	  CE_INTERRUPT_IDX(2),
117 	  CE_INTERRUPT_IDX(3),
118 	  CE_INTERRUPT_IDX(4),
119 	  CE_INTERRUPT_IDX(5),
120 	  CE_INTERRUPT_IDX(6),
121 	  CE_INTERRUPT_IDX(7),
122 	  CE_INTERRUPT_IDX(8),
123 	  CE_INTERRUPT_IDX(9),
124 	  CE_INTERRUPT_IDX(10),
125 	  CE_INTERRUPT_IDX(11),
126 #ifdef QCA_WIFI_QCN9224
127 	  CE_INTERRUPT_IDX(12),
128 	  CE_INTERRUPT_IDX(13),
129 	  CE_INTERRUPT_IDX(14),
130 	  CE_INTERRUPT_IDX(15),
131 #endif
132 	} },
133 	/* Interrupt assignment for 1 MSI combination */
134 	{{ CE_INTERRUPT_IDX(0),
135 	  CE_INTERRUPT_IDX(0),
136 	  CE_INTERRUPT_IDX(0),
137 	  CE_INTERRUPT_IDX(0),
138 	  CE_INTERRUPT_IDX(0),
139 	  CE_INTERRUPT_IDX(0),
140 	  CE_INTERRUPT_IDX(0),
141 	  CE_INTERRUPT_IDX(0),
142 	  CE_INTERRUPT_IDX(0),
143 	  CE_INTERRUPT_IDX(0),
144 	  CE_INTERRUPT_IDX(0),
145 	  CE_INTERRUPT_IDX(0),
146 #ifdef QCA_WIFI_QCN9224
147 	  CE_INTERRUPT_IDX(0),
148 	  CE_INTERRUPT_IDX(0),
149 	  CE_INTERRUPT_IDX(0),
150 	  CE_INTERRUPT_IDX(0),
151 #endif
152 	} },
153 	/* Interrupt assignment for 2 MSI combination */
154 	{{ CE_INTERRUPT_IDX(0),
155 	  CE_INTERRUPT_IDX(1),
156 	  CE_INTERRUPT_IDX(0),
157 	  CE_INTERRUPT_IDX(1),
158 	  CE_INTERRUPT_IDX(0),
159 	  CE_INTERRUPT_IDX(1),
160 	  CE_INTERRUPT_IDX(0),
161 	  CE_INTERRUPT_IDX(0),
162 	  CE_INTERRUPT_IDX(0),
163 	  CE_INTERRUPT_IDX(0),
164 	  CE_INTERRUPT_IDX(0),
165 	  CE_INTERRUPT_IDX(0),
166 #ifdef QCA_WIFI_QCN9224
167 	  CE_INTERRUPT_IDX(0),
168 	  CE_INTERRUPT_IDX(0),
169 	  CE_INTERRUPT_IDX(0),
170 	  CE_INTERRUPT_IDX(0),
171 #endif
172 	} },
173 	/* Interrupt assignment for 3 MSI combination */
174 	{{ CE_INTERRUPT_IDX(0),
175 	  CE_INTERRUPT_IDX(1),
176 	  CE_INTERRUPT_IDX(2),
177 	  CE_INTERRUPT_IDX(1),
178 	  CE_INTERRUPT_IDX(0),
179 	  CE_INTERRUPT_IDX(1),
180 	  CE_INTERRUPT_IDX(0),
181 	  CE_INTERRUPT_IDX(0),
182 	  CE_INTERRUPT_IDX(0),
183 	  CE_INTERRUPT_IDX(0),
184 	  CE_INTERRUPT_IDX(0),
185 	  CE_INTERRUPT_IDX(0),
186 #ifdef QCA_WIFI_QCN9224
187 	  CE_INTERRUPT_IDX(0),
188 	  CE_INTERRUPT_IDX(0),
189 	  CE_INTERRUPT_IDX(0),
190 	  CE_INTERRUPT_IDX(0),
191 #endif
192 	} },
193 	/* Interrupt assignment for 4 MSI combination */
194 	{{ CE_INTERRUPT_IDX(0),
195 	  CE_INTERRUPT_IDX(1),
196 	  CE_INTERRUPT_IDX(2),
197 	  CE_INTERRUPT_IDX(3),
198 	  CE_INTERRUPT_IDX(0),
199 	  CE_INTERRUPT_IDX(1),
200 	  CE_INTERRUPT_IDX(0),
201 	  CE_INTERRUPT_IDX(0),
202 	  CE_INTERRUPT_IDX(0),
203 	  CE_INTERRUPT_IDX(0),
204 	  CE_INTERRUPT_IDX(0),
205 	  CE_INTERRUPT_IDX(0),
206 #ifdef QCA_WIFI_QCN9224
207 	  CE_INTERRUPT_IDX(0),
208 	  CE_INTERRUPT_IDX(0),
209 	  CE_INTERRUPT_IDX(0),
210 	  CE_INTERRUPT_IDX(0),
211 #endif
212 	} },
213 	/* Interrupt assignment for 5 MSI combination */
214 	{{ CE_INTERRUPT_IDX(0),
215 	  CE_INTERRUPT_IDX(1),
216 	  CE_INTERRUPT_IDX(2),
217 	  CE_INTERRUPT_IDX(3),
218 	  CE_INTERRUPT_IDX(0),
219 	  CE_INTERRUPT_IDX(4),
220 	  CE_INTERRUPT_IDX(0),
221 	  CE_INTERRUPT_IDX(0),
222 	  CE_INTERRUPT_IDX(0),
223 	  CE_INTERRUPT_IDX(0),
224 	  CE_INTERRUPT_IDX(0),
225 	  CE_INTERRUPT_IDX(0),
226 #ifdef QCA_WIFI_QCN9224
227 	  CE_INTERRUPT_IDX(0),
228 	  CE_INTERRUPT_IDX(0),
229 	  CE_INTERRUPT_IDX(0),
230 	  CE_INTERRUPT_IDX(0),
231 #endif
232 	} },
233 	/* Interrupt assignment for 6 MSI combination */
234 	{{ CE_INTERRUPT_IDX(0),
235 	  CE_INTERRUPT_IDX(1),
236 	  CE_INTERRUPT_IDX(2),
237 	  CE_INTERRUPT_IDX(3),
238 	  CE_INTERRUPT_IDX(4),
239 	  CE_INTERRUPT_IDX(5),
240 	  CE_INTERRUPT_IDX(0),
241 	  CE_INTERRUPT_IDX(0),
242 	  CE_INTERRUPT_IDX(0),
243 	  CE_INTERRUPT_IDX(0),
244 	  CE_INTERRUPT_IDX(0),
245 	  CE_INTERRUPT_IDX(0),
246 #ifdef QCA_WIFI_QCN9224
247 	  CE_INTERRUPT_IDX(0),
248 	  CE_INTERRUPT_IDX(0),
249 	  CE_INTERRUPT_IDX(0),
250 	  CE_INTERRUPT_IDX(0),
251 #endif
252 	} },
253 	/* Interrupt assignment for 7 MSI combination */
254 	{{ CE_INTERRUPT_IDX(0),
255 	  CE_INTERRUPT_IDX(1),
256 	  CE_INTERRUPT_IDX(2),
257 	  CE_INTERRUPT_IDX(3),
258 	  CE_INTERRUPT_IDX(4),
259 	  CE_INTERRUPT_IDX(5),
260 	  CE_INTERRUPT_IDX(6),
261 	  CE_INTERRUPT_IDX(0),
262 	  CE_INTERRUPT_IDX(0),
263 	  CE_INTERRUPT_IDX(0),
264 	  CE_INTERRUPT_IDX(0),
265 	  CE_INTERRUPT_IDX(0),
266 #ifdef QCA_WIFI_QCN9224
267 	  CE_INTERRUPT_IDX(0),
268 	  CE_INTERRUPT_IDX(0),
269 	  CE_INTERRUPT_IDX(0),
270 	  CE_INTERRUPT_IDX(0),
271 #endif
272 	} },
273 	/* Interrupt assignment for 8 MSI combination */
274 	{{ CE_INTERRUPT_IDX(0),
275 	  CE_INTERRUPT_IDX(1),
276 	  CE_INTERRUPT_IDX(2),
277 	  CE_INTERRUPT_IDX(3),
278 	  CE_INTERRUPT_IDX(4),
279 	  CE_INTERRUPT_IDX(5),
280 	  CE_INTERRUPT_IDX(6),
281 	  CE_INTERRUPT_IDX(7),
282 	  CE_INTERRUPT_IDX(0),
283 	  CE_INTERRUPT_IDX(0),
284 	  CE_INTERRUPT_IDX(0),
285 	  CE_INTERRUPT_IDX(0),
286 #ifdef QCA_WIFI_QCN9224
287 	  CE_INTERRUPT_IDX(0),
288 	  CE_INTERRUPT_IDX(0),
289 	  CE_INTERRUPT_IDX(0),
290 	  CE_INTERRUPT_IDX(0),
291 #endif
292 	} },
293 	/* Interrupt assignment for 9 MSI combination */
294 	{{ CE_INTERRUPT_IDX(0),
295 	  CE_INTERRUPT_IDX(1),
296 	  CE_INTERRUPT_IDX(2),
297 	  CE_INTERRUPT_IDX(3),
298 	  CE_INTERRUPT_IDX(4),
299 	  CE_INTERRUPT_IDX(5),
300 	  CE_INTERRUPT_IDX(6),
301 	  CE_INTERRUPT_IDX(7),
302 	  CE_INTERRUPT_IDX(8),
303 	  CE_INTERRUPT_IDX(0),
304 	  CE_INTERRUPT_IDX(0),
305 	  CE_INTERRUPT_IDX(0),
306 #ifdef QCA_WIFI_QCN9224
307 	  CE_INTERRUPT_IDX(0),
308 	  CE_INTERRUPT_IDX(0),
309 	  CE_INTERRUPT_IDX(0),
310 	  CE_INTERRUPT_IDX(0),
311 #endif
312 	} },
313 	/* Interrupt assignment for 10 MSI combination */
314 	{{ CE_INTERRUPT_IDX(0),
315 	  CE_INTERRUPT_IDX(1),
316 	  CE_INTERRUPT_IDX(2),
317 	  CE_INTERRUPT_IDX(3),
318 	  CE_INTERRUPT_IDX(4),
319 	  CE_INTERRUPT_IDX(5),
320 	  CE_INTERRUPT_IDX(6),
321 	  CE_INTERRUPT_IDX(7),
322 	  CE_INTERRUPT_IDX(8),
323 	  CE_INTERRUPT_IDX(9),
324 	  CE_INTERRUPT_IDX(0),
325 	  CE_INTERRUPT_IDX(0),
326 #ifdef QCA_WIFI_QCN9224
327 	  CE_INTERRUPT_IDX(0),
328 	  CE_INTERRUPT_IDX(0),
329 	  CE_INTERRUPT_IDX(0),
330 	  CE_INTERRUPT_IDX(0),
331 #endif
332 	} },
333 	/* Interrupt assignment for 11 MSI combination */
334 	{{ CE_INTERRUPT_IDX(0),
335 	  CE_INTERRUPT_IDX(1),
336 	  CE_INTERRUPT_IDX(2),
337 	  CE_INTERRUPT_IDX(3),
338 	  CE_INTERRUPT_IDX(4),
339 	  CE_INTERRUPT_IDX(5),
340 	  CE_INTERRUPT_IDX(6),
341 	  CE_INTERRUPT_IDX(7),
342 	  CE_INTERRUPT_IDX(8),
343 	  CE_INTERRUPT_IDX(9),
344 	  CE_INTERRUPT_IDX(10),
345 	  CE_INTERRUPT_IDX(0),
346 #ifdef QCA_WIFI_QCN9224
347 	  CE_INTERRUPT_IDX(0),
348 	  CE_INTERRUPT_IDX(0),
349 	  CE_INTERRUPT_IDX(0),
350 	  CE_INTERRUPT_IDX(0),
351 #endif
352 	} },
353 	/* Interrupt assignment for 12 MSI combination */
354 	{{ CE_INTERRUPT_IDX(0),
355 	  CE_INTERRUPT_IDX(1),
356 	  CE_INTERRUPT_IDX(2),
357 	  CE_INTERRUPT_IDX(3),
358 	  CE_INTERRUPT_IDX(4),
359 	  CE_INTERRUPT_IDX(5),
360 	  CE_INTERRUPT_IDX(6),
361 	  CE_INTERRUPT_IDX(7),
362 	  CE_INTERRUPT_IDX(8),
363 	  CE_INTERRUPT_IDX(9),
364 	  CE_INTERRUPT_IDX(10),
365 	  CE_INTERRUPT_IDX(11),
366 #ifdef QCA_WIFI_QCN9224
367 	  CE_INTERRUPT_IDX(0),
368 	  CE_INTERRUPT_IDX(0),
369 	  CE_INTERRUPT_IDX(0),
370 	  CE_INTERRUPT_IDX(0),
371 #endif
372 	} },
373 #ifdef QCA_WIFI_QCN9224
374 	/* Interrupt assignment for 13 MSI combination */
375 	{{ CE_INTERRUPT_IDX(0),
376 	  CE_INTERRUPT_IDX(1),
377 	  CE_INTERRUPT_IDX(2),
378 	  CE_INTERRUPT_IDX(3),
379 	  CE_INTERRUPT_IDX(4),
380 	  CE_INTERRUPT_IDX(5),
381 	  CE_INTERRUPT_IDX(6),
382 	  CE_INTERRUPT_IDX(7),
383 	  CE_INTERRUPT_IDX(8),
384 	  CE_INTERRUPT_IDX(9),
385 	  CE_INTERRUPT_IDX(10),
386 	  CE_INTERRUPT_IDX(11),
387 	  CE_INTERRUPT_IDX(12),
388 	  CE_INTERRUPT_IDX(0),
389 	  CE_INTERRUPT_IDX(0),
390 	  CE_INTERRUPT_IDX(0),
391 	} },
392 	/* Interrupt assignment for 14 MSI combination */
393 	{{ CE_INTERRUPT_IDX(0),
394 	  CE_INTERRUPT_IDX(1),
395 	  CE_INTERRUPT_IDX(2),
396 	  CE_INTERRUPT_IDX(3),
397 	  CE_INTERRUPT_IDX(4),
398 	  CE_INTERRUPT_IDX(5),
399 	  CE_INTERRUPT_IDX(6),
400 	  CE_INTERRUPT_IDX(7),
401 	  CE_INTERRUPT_IDX(8),
402 	  CE_INTERRUPT_IDX(9),
403 	  CE_INTERRUPT_IDX(10),
404 	  CE_INTERRUPT_IDX(11),
405 	  CE_INTERRUPT_IDX(12),
406 	  CE_INTERRUPT_IDX(13),
407 	  CE_INTERRUPT_IDX(0),
408 	  CE_INTERRUPT_IDX(0),
409 	} },
410 	/* Interrupt assignment for 15 MSI combination */
411 	{{ CE_INTERRUPT_IDX(0),
412 	  CE_INTERRUPT_IDX(1),
413 	  CE_INTERRUPT_IDX(2),
414 	  CE_INTERRUPT_IDX(3),
415 	  CE_INTERRUPT_IDX(4),
416 	  CE_INTERRUPT_IDX(5),
417 	  CE_INTERRUPT_IDX(6),
418 	  CE_INTERRUPT_IDX(7),
419 	  CE_INTERRUPT_IDX(8),
420 	  CE_INTERRUPT_IDX(9),
421 	  CE_INTERRUPT_IDX(10),
422 	  CE_INTERRUPT_IDX(11),
423 	  CE_INTERRUPT_IDX(12),
424 	  CE_INTERRUPT_IDX(13),
425 	  CE_INTERRUPT_IDX(14),
426 	  CE_INTERRUPT_IDX(0),
427 	} },
428 	/* Interrupt assignment for 16 MSI combination */
429 	{{ CE_INTERRUPT_IDX(0),
430 	  CE_INTERRUPT_IDX(1),
431 	  CE_INTERRUPT_IDX(2),
432 	  CE_INTERRUPT_IDX(3),
433 	  CE_INTERRUPT_IDX(4),
434 	  CE_INTERRUPT_IDX(5),
435 	  CE_INTERRUPT_IDX(6),
436 	  CE_INTERRUPT_IDX(7),
437 	  CE_INTERRUPT_IDX(8),
438 	  CE_INTERRUPT_IDX(9),
439 	  CE_INTERRUPT_IDX(10),
440 	  CE_INTERRUPT_IDX(11),
441 	  CE_INTERRUPT_IDX(12),
442 	  CE_INTERRUPT_IDX(13),
443 	  CE_INTERRUPT_IDX(14),
444 	  CE_INTERRUPT_IDX(15),
445 	} },
446 #endif
447 };
448 
449 
450 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
451 		      uint8_t cmd_id, bool start)
452 {
453 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
454 
455 	switch (cmd_id) {
456 	case AGC_DUMP:
457 		if (start)
458 			priv_start_agc(scn);
459 		else
460 			priv_dump_agc(scn);
461 		break;
462 	case CHANINFO_DUMP:
463 		if (start)
464 			priv_start_cap_chaninfo(scn);
465 		else
466 			priv_dump_chaninfo(scn);
467 		break;
468 	case BB_WATCHDOG_DUMP:
469 		priv_dump_bbwatchdog(scn);
470 		break;
471 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
472 	case PCIE_ACCESS_DUMP:
473 		hif_target_access_log_dump();
474 		break;
475 #endif
476 	default:
477 		hif_err("Invalid htc dump command: %d", cmd_id);
478 		break;
479 	}
480 }
481 
482 static void ce_poll_timeout(void *arg)
483 {
484 	struct CE_state *CE_state = (struct CE_state *)arg;
485 
486 	if (CE_state->timer_inited) {
487 		ce_per_engine_service(CE_state->scn, CE_state->id);
488 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
489 	}
490 }
491 
492 static unsigned int roundup_pwr2(unsigned int n)
493 {
494 	int i;
495 	unsigned int test_pwr2;
496 
497 	if (!(n & (n - 1)))
498 		return n; /* already a power of 2 */
499 
500 	test_pwr2 = 4;
501 	for (i = 0; i < 29; i++) {
502 		if (test_pwr2 > n)
503 			return test_pwr2;
504 		test_pwr2 = test_pwr2 << 1;
505 	}
506 
507 	QDF_ASSERT(0); /* n too large */
508 	return 0;
509 }
510 
511 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
512 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
513 
514 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
515 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
516 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
517 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
518 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
519 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
520 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
521 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
522 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
523 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
524 #ifdef QCA_WIFI_3_0_ADRASTEA
525 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
526 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
527 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
528 #endif
529 };
530 
531 #ifdef QCN7605_SUPPORT
532 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
533 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
534 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
535 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
536 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
537 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
538 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
539 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
540 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
541 };
542 #endif
543 
544 #ifdef WLAN_FEATURE_EPPING
545 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
546 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
547 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
548 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
549 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
550 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
551 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
552 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
553 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
554 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
555 };
556 #endif
557 
558 /* CE_PCI TABLE */
559 /*
560  * NOTE: the table below is out of date, though still a useful reference.
561  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
562  * mapping of HTC services to HIF pipes.
563  */
564 /*
565  * This authoritative table defines Copy Engine configuration and the mapping
566  * of services/endpoints to CEs.  A subset of this information is passed to
567  * the Target during startup as a prerequisite to entering BMI phase.
568  * See:
569  *    target_service_to_ce_map - Target-side mapping
570  *    hif_map_service_to_pipe      - Host-side mapping
571  *    target_ce_config         - Target-side configuration
572  *    host_ce_config           - Host-side configuration
573    ============================================================================
574    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
575  |                      |      | ctio | Size     | Frequency
576  |                      |      | n    |          |
577    ============================================================================
578    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
579    descriptor |                      |      |      | O(100B)  | and regular
580    download   |                      |      |      |          |
581    ----------------------------------------------------------------------------
582    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
583    indication |                      |      |      | O(10B)   | regular
584    upload     |                      |      |      |          |
585    ----------------------------------------------------------------------------
586    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
587    upload     |                      |      |      | O(1000B) | (frequent
588    e.g. noise |                      |      |      |          | during IP1.0
589    packets    |                      |      |      |          | testing)
590    ----------------------------------------------------------------------------
591    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
592    download   |                      |      |      | O(1000B) | (frequent
593    e.g.       |                      |      |      |          | during IP1.0
594    misdirecte |                      |      |      |          | testing)
595    d EAPOL    |                      |      |      |          |
596    packets    |                      |      |      |          |
597    ----------------------------------------------------------------------------
598    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
599  | DATA_VO (uplink)     |      |      |          |
600    ----------------------------------------------------------------------------
601    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
602  | DATA_VO (downlink)   |      |      |          |
603    ----------------------------------------------------------------------------
604    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
605  |                      |      |      | O(100B)  |
606    ----------------------------------------------------------------------------
607    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
608    messages   | (downlink)           |      |      | O(100B)  |
609  |                      |      |      |          |
610    ----------------------------------------------------------------------------
611    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
612  | HTC_RAW_STREAMS      |      |      |          |
613  | (uplink)             |      |      |          |
614    ----------------------------------------------------------------------------
615    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
616  | HTC_RAW_STREAMS      |      |      |          |
617  | (downlink)           |      |      |          |
618    ----------------------------------------------------------------------------
619    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
620  |                      |      |      |          | infrequent
621    ============================================================================
622  */
623 
624 /*
625  * Map from service/endpoint to Copy Engine.
626  * This table is derived from the CE_PCI TABLE, above.
627  * It is passed to the Target at startup for use by firmware.
628  */
629 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
630 	{
631 		WMI_DATA_VO_SVC,
632 		PIPEDIR_OUT,    /* out = UL = host -> target */
633 		3,
634 	},
635 	{
636 		WMI_DATA_VO_SVC,
637 		PIPEDIR_IN,     /* in = DL = target -> host */
638 		2,
639 	},
640 	{
641 		WMI_DATA_BK_SVC,
642 		PIPEDIR_OUT,    /* out = UL = host -> target */
643 		3,
644 	},
645 	{
646 		WMI_DATA_BK_SVC,
647 		PIPEDIR_IN,     /* in = DL = target -> host */
648 		2,
649 	},
650 	{
651 		WMI_DATA_BE_SVC,
652 		PIPEDIR_OUT,    /* out = UL = host -> target */
653 		3,
654 	},
655 	{
656 		WMI_DATA_BE_SVC,
657 		PIPEDIR_IN,     /* in = DL = target -> host */
658 		2,
659 	},
660 	{
661 		WMI_DATA_VI_SVC,
662 		PIPEDIR_OUT,    /* out = UL = host -> target */
663 		3,
664 	},
665 	{
666 		WMI_DATA_VI_SVC,
667 		PIPEDIR_IN,     /* in = DL = target -> host */
668 		2,
669 	},
670 	{
671 		WMI_CONTROL_SVC,
672 		PIPEDIR_OUT,    /* out = UL = host -> target */
673 		3,
674 	},
675 	{
676 		WMI_CONTROL_SVC,
677 		PIPEDIR_IN,     /* in = DL = target -> host */
678 		2,
679 	},
680 	{
681 		HTC_CTRL_RSVD_SVC,
682 		PIPEDIR_OUT,    /* out = UL = host -> target */
683 		0,              /* could be moved to 3 (share with WMI) */
684 	},
685 	{
686 		HTC_CTRL_RSVD_SVC,
687 		PIPEDIR_IN,     /* in = DL = target -> host */
688 		2,
689 	},
690 	{
691 		HTC_RAW_STREAMS_SVC, /* not currently used */
692 		PIPEDIR_OUT,    /* out = UL = host -> target */
693 		0,
694 	},
695 	{
696 		HTC_RAW_STREAMS_SVC, /* not currently used */
697 		PIPEDIR_IN,     /* in = DL = target -> host */
698 		2,
699 	},
700 	{
701 		HTT_DATA_MSG_SVC,
702 		PIPEDIR_OUT,    /* out = UL = host -> target */
703 		4,
704 	},
705 	{
706 		HTT_DATA_MSG_SVC,
707 		PIPEDIR_IN,     /* in = DL = target -> host */
708 		1,
709 	},
710 	{
711 		WDI_IPA_TX_SVC,
712 		PIPEDIR_OUT,    /* in = DL = target -> host */
713 		5,
714 	},
715 #if defined(QCA_WIFI_3_0_ADRASTEA)
716 	{
717 		HTT_DATA2_MSG_SVC,
718 		PIPEDIR_IN,    /* in = DL = target -> host */
719 		9,
720 	},
721 	{
722 		HTT_DATA3_MSG_SVC,
723 		PIPEDIR_IN,    /* in = DL = target -> host */
724 		10,
725 	},
726 	{
727 		PACKET_LOG_SVC,
728 		PIPEDIR_IN,    /* in = DL = target -> host */
729 		11,
730 	},
731 #endif
732 	/* (Additions here) */
733 
734 	{                       /* Must be last */
735 		0,
736 		0,
737 		0,
738 	},
739 };
740 
741 /* PIPEDIR_OUT = HOST to Target */
742 /* PIPEDIR_IN  = TARGET to HOST */
743 #if (defined(QCA_WIFI_QCA8074))
744 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
745 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
746 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
747 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
748 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
749 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
750 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
751 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
752 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
753 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
754 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
755 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
756 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
757 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
758 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
759 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
760 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
761 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
762 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
763 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
764 	/* (Additions here) */
765 	{ 0, 0, 0, },
766 };
767 #else
768 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
769 };
770 #endif
771 
772 #if (defined(QCA_WIFI_QCA9574))
773 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
774 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
775 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
776 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
777 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
778 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
779 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
780 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
781 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
782 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
783 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
784 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
785 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
786 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
787 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
788 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
789 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
790 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
791 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
792 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
793 	/* (Additions here) */
794 	{ 0, 0, 0, },
795 };
796 #else
797 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
798 };
799 #endif
800 
801 #if (defined(QCA_WIFI_QCA8074V2))
802 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
803 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
804 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
805 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
806 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
807 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
808 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
809 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
810 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
811 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
812 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
813 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
814 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
815 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
816 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
817 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
818 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
819 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
820 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
821 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
822 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
823 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
824 	/* (Additions here) */
825 	{ 0, 0, 0, },
826 };
827 #else
828 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
829 };
830 #endif
831 
832 #if (defined(QCA_WIFI_QCA6018))
833 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
834 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
835 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
836 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
837 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
838 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
839 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
840 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
841 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
842 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
843 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
844 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
845 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
846 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
847 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
848 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
849 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
850 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
851 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
852 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
853 	/* (Additions here) */
854 	{ 0, 0, 0, },
855 };
856 #else
857 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
858 };
859 #endif
860 
861 #if (defined(QCA_WIFI_QCN9000))
862 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
863 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
864 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
865 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
866 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
867 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
868 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
869 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
870 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
871 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
872 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
873 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
874 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
875 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
876 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
877 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
878 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
879 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
880 	/* (Additions here) */
881 	{ 0, 0, 0, },
882 };
883 #else
884 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
885 };
886 #endif
887 
888 #if (defined(QCA_WIFI_QCN9224))
889 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = {
890 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
891 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
892 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
893 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
894 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
895 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
896 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
897 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
898 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
899 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
900 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
901 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
902 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
903 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
904 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
905 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
906 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, },
907 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, },
908 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
909 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, },
910 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 14, },
911 	/* (Additions here) */
912 	{ 0, 0, 0, },
913 };
914 #endif
915 
916 #if (defined(QCA_WIFI_QCA5018))
917 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
918 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
919 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
920 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
921 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
922 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
923 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
924 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
925 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
926 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
927 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
928 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
929 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
930 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
931 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
932 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
933 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
934 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
935 	/* (Additions here) */
936 	{ 0, 0, 0, },
937 };
938 #else
939 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
940 };
941 #endif
942 
943 /* PIPEDIR_OUT = HOST to Target */
944 /* PIPEDIR_IN  = TARGET to HOST */
945 #ifdef QCN7605_SUPPORT
946 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
947 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
948 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
949 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
950 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
951 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
952 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
953 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
954 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
955 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
956 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
957 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
958 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
959 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
960 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
961 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
962 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
963 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
964 #ifdef IPA_OFFLOAD
965 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
966 #else
967 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
968 #endif
969 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
970 	/* (Additions here) */
971 	{ 0, 0, 0, },
972 };
973 #endif
974 
975 #if (defined(QCA_WIFI_QCA6290))
976 #ifdef QCA_6290_AP_MODE
977 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
978 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
979 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
980 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
981 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
982 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
983 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
984 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
985 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
986 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
987 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
988 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
989 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
990 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
991 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
992 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
993 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
994 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
995 	/* (Additions here) */
996 	{ 0, 0, 0, },
997 };
998 #else
999 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1000 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1001 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1002 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1003 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1004 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1005 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1006 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1007 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1008 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1009 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1010 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1011 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1012 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1013 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1014 	/* (Additions here) */
1015 	{ 0, 0, 0, },
1016 };
1017 #endif
1018 #else
1019 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1020 };
1021 #endif
1022 
1023 #if (defined(QCA_WIFI_QCA6390))
1024 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1025 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1026 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1027 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1028 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1029 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1030 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1031 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1032 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1033 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1034 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1035 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1036 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1037 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1038 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1039 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1040 	/* (Additions here) */
1041 	{ 0, 0, 0, },
1042 };
1043 #else
1044 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1045 };
1046 #endif
1047 
1048 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
1049 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1050 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1051 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1052 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1053 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1054 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1055 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1056 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1057 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1058 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1059 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1060 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1061 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1062 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1063 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1064 	/* (Additions here) */
1065 	{ 0, 0, 0, },
1066 };
1067 
1068 #if (defined(QCA_WIFI_QCA6750))
1069 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1070 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1071 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1072 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1073 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1074 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1075 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1076 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1077 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1078 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1079 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1080 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1081 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1082 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1083 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1084 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1085 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1086 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1087 #endif
1088 	/* (Additions here) */
1089 	{ 0, 0, 0, },
1090 };
1091 #else
1092 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1093 };
1094 #endif
1095 
1096 #if (defined(QCA_WIFI_KIWI))
1097 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1098 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1099 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1100 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1101 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1102 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1103 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1104 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1105 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1106 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1107 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1108 #ifdef FEATURE_XPAN
1109 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 4, },
1110 #else
1111 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1112 #endif
1113 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1114 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1115 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1116 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1117 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1118 #endif
1119 #ifdef FEATURE_XPAN
1120 	{ LPASS_DATA_MSG_SVC, PIPEDIR_OUT, 0, },
1121 	{ LPASS_DATA_MSG_SVC, PIPEDIR_IN, 5, },
1122 #endif
1123 	/* (Additions here) */
1124 	{ 0, 0, 0, },
1125 };
1126 #else
1127 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1128 };
1129 #endif
1130 
1131 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
1132 	{
1133 		WMI_DATA_VO_SVC,
1134 		PIPEDIR_OUT,    /* out = UL = host -> target */
1135 		3,
1136 	},
1137 	{
1138 		WMI_DATA_VO_SVC,
1139 		PIPEDIR_IN,     /* in = DL = target -> host */
1140 		2,
1141 	},
1142 	{
1143 		WMI_DATA_BK_SVC,
1144 		PIPEDIR_OUT,    /* out = UL = host -> target */
1145 		3,
1146 	},
1147 	{
1148 		WMI_DATA_BK_SVC,
1149 		PIPEDIR_IN,     /* in = DL = target -> host */
1150 		2,
1151 	},
1152 	{
1153 		WMI_DATA_BE_SVC,
1154 		PIPEDIR_OUT,    /* out = UL = host -> target */
1155 		3,
1156 	},
1157 	{
1158 		WMI_DATA_BE_SVC,
1159 		PIPEDIR_IN,     /* in = DL = target -> host */
1160 		2,
1161 	},
1162 	{
1163 		WMI_DATA_VI_SVC,
1164 		PIPEDIR_OUT,    /* out = UL = host -> target */
1165 		3,
1166 	},
1167 	{
1168 		WMI_DATA_VI_SVC,
1169 		PIPEDIR_IN,     /* in = DL = target -> host */
1170 		2,
1171 	},
1172 	{
1173 		WMI_CONTROL_SVC,
1174 		PIPEDIR_OUT,    /* out = UL = host -> target */
1175 		3,
1176 	},
1177 	{
1178 		WMI_CONTROL_SVC,
1179 		PIPEDIR_IN,     /* in = DL = target -> host */
1180 		2,
1181 	},
1182 	{
1183 		HTC_CTRL_RSVD_SVC,
1184 		PIPEDIR_OUT,    /* out = UL = host -> target */
1185 		0,              /* could be moved to 3 (share with WMI) */
1186 	},
1187 	{
1188 		HTC_CTRL_RSVD_SVC,
1189 		PIPEDIR_IN,     /* in = DL = target -> host */
1190 		1,
1191 	},
1192 	{
1193 		HTC_RAW_STREAMS_SVC, /* not currently used */
1194 		PIPEDIR_OUT,    /* out = UL = host -> target */
1195 		0,
1196 	},
1197 	{
1198 		HTC_RAW_STREAMS_SVC, /* not currently used */
1199 		PIPEDIR_IN,     /* in = DL = target -> host */
1200 		1,
1201 	},
1202 	{
1203 		HTT_DATA_MSG_SVC,
1204 		PIPEDIR_OUT,    /* out = UL = host -> target */
1205 		4,
1206 	},
1207 #ifdef WLAN_FEATURE_FASTPATH
1208 	{
1209 		HTT_DATA_MSG_SVC,
1210 		PIPEDIR_IN,     /* in = DL = target -> host */
1211 		5,
1212 	},
1213 #else /* WLAN_FEATURE_FASTPATH */
1214 	{
1215 		HTT_DATA_MSG_SVC,
1216 		PIPEDIR_IN,  /* in = DL = target -> host */
1217 		1,
1218 	},
1219 #endif /* WLAN_FEATURE_FASTPATH */
1220 
1221 	/* (Additions here) */
1222 
1223 	{                       /* Must be last */
1224 		0,
1225 		0,
1226 		0,
1227 	},
1228 };
1229 
1230 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1231 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1232 
1233 #ifdef WLAN_FEATURE_EPPING
1234 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1235 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1236 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1237 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
1238 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
1239 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1240 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1241 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1242 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1243 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1244 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1245 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
1246 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
1247 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1248 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
1249 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
1250 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
1251 	{0, 0, 0,},             /* Must be last */
1252 };
1253 
1254 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
1255 					   **tgt_svc_map_to_use,
1256 					   uint32_t *sz_tgt_svc_map_to_use)
1257 {
1258 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
1259 	*sz_tgt_svc_map_to_use =
1260 			sizeof(target_service_to_ce_map_wlan_epping);
1261 }
1262 #endif
1263 
1264 #ifdef QCN7605_SUPPORT
1265 static inline
1266 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1267 			       uint32_t *sz_tgt_svc_map_to_use)
1268 {
1269 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
1270 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
1271 }
1272 #else
1273 static inline
1274 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1275 			       uint32_t *sz_tgt_svc_map_to_use)
1276 {
1277 	hif_err("QCN7605 not supported");
1278 }
1279 #endif
1280 
1281 #ifdef QCA_WIFI_QCN9224
1282 static
1283 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1284 			       struct HIF_CE_state *hif_state)
1285 {
1286 	hif_state->host_ce_config = host_ce_config_wlan_qcn9224;
1287 	hif_state->target_ce_config = target_ce_config_wlan_qcn9224;
1288 	hif_state->target_ce_config_sz =
1289 				 sizeof(target_ce_config_wlan_qcn9224);
1290 	scn->ce_count = QCN_9224_CE_COUNT;
1291 	scn->disable_wake_irq = 1;
1292 }
1293 
1294 static
1295 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1296 			       uint32_t *sz_tgt_svc_map_to_use)
1297 {
1298 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn9224;
1299 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224);
1300 }
1301 #else
1302 static inline
1303 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1304 			       struct HIF_CE_state *hif_state)
1305 {
1306 	hif_err("QCN9224 not supported");
1307 }
1308 
1309 static inline
1310 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1311 			       uint32_t *sz_tgt_svc_map_to_use)
1312 {
1313 	hif_err("QCN9224 not supported");
1314 }
1315 #endif
1316 
1317 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
1318 				    struct service_to_pipe **tgt_svc_map_to_use,
1319 				    uint32_t *sz_tgt_svc_map_to_use)
1320 {
1321 	uint32_t mode = hif_get_conparam(scn);
1322 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1323 	struct hif_target_info *tgt_info = &scn->target_info;
1324 
1325 	if (QDF_IS_EPPING_ENABLED(mode)) {
1326 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
1327 						      sz_tgt_svc_map_to_use);
1328 	} else {
1329 		switch (tgt_info->target_type) {
1330 		default:
1331 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
1332 			*sz_tgt_svc_map_to_use =
1333 				sizeof(target_service_to_ce_map_wlan);
1334 			break;
1335 		case TARGET_TYPE_QCN7605:
1336 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
1337 						  sz_tgt_svc_map_to_use);
1338 			break;
1339 		case TARGET_TYPE_AR900B:
1340 		case TARGET_TYPE_QCA9984:
1341 		case TARGET_TYPE_QCA9888:
1342 		case TARGET_TYPE_AR9888:
1343 		case TARGET_TYPE_AR9888V2:
1344 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
1345 			*sz_tgt_svc_map_to_use =
1346 				sizeof(target_service_to_ce_map_ar900b);
1347 			break;
1348 		case TARGET_TYPE_QCA6290:
1349 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
1350 			*sz_tgt_svc_map_to_use =
1351 				sizeof(target_service_to_ce_map_qca6290);
1352 			break;
1353 		case TARGET_TYPE_QCA6390:
1354 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
1355 			*sz_tgt_svc_map_to_use =
1356 				sizeof(target_service_to_ce_map_qca6390);
1357 			break;
1358 		case TARGET_TYPE_QCA6490:
1359 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
1360 			*sz_tgt_svc_map_to_use =
1361 				sizeof(target_service_to_ce_map_qca6490);
1362 			break;
1363 		case TARGET_TYPE_QCA6750:
1364 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
1365 			*sz_tgt_svc_map_to_use =
1366 				sizeof(target_service_to_ce_map_qca6750);
1367 			break;
1368 		case TARGET_TYPE_KIWI:
1369 		case TARGET_TYPE_MANGO:
1370 			*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1371 			*sz_tgt_svc_map_to_use =
1372 				sizeof(target_service_to_ce_map_kiwi);
1373 			break;
1374 		case TARGET_TYPE_QCA8074:
1375 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
1376 			*sz_tgt_svc_map_to_use =
1377 				sizeof(target_service_to_ce_map_qca8074);
1378 			break;
1379 		case TARGET_TYPE_QCA8074V2:
1380 			*tgt_svc_map_to_use =
1381 				target_service_to_ce_map_qca8074_v2;
1382 			*sz_tgt_svc_map_to_use =
1383 				sizeof(target_service_to_ce_map_qca8074_v2);
1384 			break;
1385 		case TARGET_TYPE_QCA9574:
1386 			*tgt_svc_map_to_use =
1387 				target_service_to_ce_map_qca9574;
1388 			*sz_tgt_svc_map_to_use =
1389 				sizeof(target_service_to_ce_map_qca9574);
1390 			break;
1391 		case TARGET_TYPE_QCA6018:
1392 			*tgt_svc_map_to_use =
1393 				target_service_to_ce_map_qca6018;
1394 			*sz_tgt_svc_map_to_use =
1395 				sizeof(target_service_to_ce_map_qca6018);
1396 			break;
1397 		case TARGET_TYPE_QCN9000:
1398 			*tgt_svc_map_to_use =
1399 				target_service_to_ce_map_qcn9000;
1400 			*sz_tgt_svc_map_to_use =
1401 				sizeof(target_service_to_ce_map_qcn9000);
1402 			break;
1403 		case TARGET_TYPE_QCN9224:
1404 			hif_select_ce_map_qcn9224(tgt_svc_map_to_use,
1405 						  sz_tgt_svc_map_to_use);
1406 			break;
1407 		case TARGET_TYPE_QCA5018:
1408 		case TARGET_TYPE_QCN6122:
1409 			*tgt_svc_map_to_use =
1410 				target_service_to_ce_map_qca5018;
1411 			*sz_tgt_svc_map_to_use =
1412 				sizeof(target_service_to_ce_map_qca5018);
1413 			break;
1414 		}
1415 	}
1416 	hif_state->tgt_svc_map = *tgt_svc_map_to_use;
1417 	hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use /
1418 					sizeof(struct service_to_pipe);
1419 }
1420 
1421 /**
1422  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
1423  * @ce_state : pointer to the state context of the CE
1424  *
1425  * Description:
1426  *   Sets htt_rx_data attribute of the state structure if the
1427  *   CE serves one of the HTT DATA services.
1428  *
1429  * Return:
1430  *  false (attribute set to false)
1431  *  true  (attribute set to true);
1432  */
1433 static bool ce_mark_datapath(struct CE_state *ce_state)
1434 {
1435 	struct service_to_pipe *svc_map;
1436 	uint32_t map_sz, map_len;
1437 	int    i;
1438 	bool   rc = false;
1439 
1440 	if (ce_state) {
1441 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1442 					       &map_sz);
1443 
1444 		map_len = map_sz / sizeof(struct service_to_pipe);
1445 		for (i = 0; i < map_len; i++) {
1446 			if ((svc_map[i].pipenum == ce_state->id) &&
1447 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1448 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1449 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
1450 				/* HTT CEs are unidirectional */
1451 				if (svc_map[i].pipedir == PIPEDIR_IN)
1452 					ce_state->htt_rx_data = true;
1453 				else
1454 					ce_state->htt_tx_data = true;
1455 				rc = true;
1456 			}
1457 		}
1458 	}
1459 	return rc;
1460 }
1461 
1462 /**
1463  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
1464  * @hif_ctx: hif opaque handle
1465  *
1466  * Description:
1467  *   Gets number of WMI EPs configured in target svc map. Since EP map
1468  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
1469  *   configured for WMI service.
1470  *
1471  * Return:
1472  *  uint8_t: count for WMI eps in target svc map
1473  */
1474 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *hif_ctx)
1475 {
1476 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1477 	struct service_to_pipe *svc_map;
1478 	uint32_t map_sz, map_len;
1479 	int    i;
1480 	uint8_t   wmi_ep_count = 0;
1481 
1482 	hif_select_service_to_pipe_map(scn, &svc_map,
1483 				       &map_sz);
1484 	map_len = map_sz / sizeof(struct service_to_pipe);
1485 
1486 	for (i = 0; i < map_len; i++) {
1487 		/* Count number of WMI EPs based on out direction */
1488 		if ((svc_map[i].pipedir == PIPEDIR_OUT) &&
1489 		    ((svc_map[i].service_id == WMI_CONTROL_SVC)  ||
1490 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC1) ||
1491 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC2))) {
1492 			wmi_ep_count++;
1493 		}
1494 	}
1495 
1496 	return wmi_ep_count;
1497 }
1498 
1499 /**
1500  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
1501  * @ce_id: ce in question
1502  * @ring: ring state being examined
1503  * @type: "src_ring" or "dest_ring" string for identifying the ring
1504  *
1505  * Warns on non-zero index values.
1506  * Causes a kernel panic if the ring is not empty durring initialization.
1507  */
1508 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
1509 					 char *type)
1510 {
1511 	if (ring->write_index != 0 || ring->sw_index != 0)
1512 		hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d",
1513 			  ce_id, type, ring->sw_index, ring->write_index);
1514 	if (ring->write_index != ring->sw_index)
1515 		QDF_BUG(0);
1516 }
1517 
1518 #ifdef IPA_OFFLOAD
1519 /**
1520  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
1521  * @scn: softc instance
1522  * @ce_id: ce in question
1523  * @base_addr: pointer to copyengine ring base address
1524  * @ce_ring: copyengine instance
1525  * @nentries: number of entries should be allocated
1526  * @desc_size: ce desc size
1527  *
1528  * Return: QDF_STATUS_SUCCESS - for success
1529  */
1530 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1531 				     qdf_dma_addr_t *base_addr,
1532 				     struct CE_ring_state *ce_ring,
1533 				     unsigned int nentries, uint32_t desc_size)
1534 {
1535 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1536 	    !ce_srng_based(scn)) {
1537 		if (!scn->ipa_ce_ring) {
1538 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1539 				scn->qdf_dev,
1540 				nentries * desc_size + CE_DESC_RING_ALIGN);
1541 			if (!scn->ipa_ce_ring) {
1542 				hif_err(
1543 				"Failed to allocate memory for IPA ce ring");
1544 				return QDF_STATUS_E_NOMEM;
1545 			}
1546 		}
1547 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1548 						&scn->ipa_ce_ring->mem_info);
1549 		ce_ring->base_addr_owner_space_unaligned =
1550 						scn->ipa_ce_ring->vaddr;
1551 	} else {
1552 		ce_ring->base_addr_owner_space_unaligned =
1553 			hif_mem_alloc_consistent_unaligned
1554 					(scn,
1555 					 (nentries * desc_size +
1556 					  CE_DESC_RING_ALIGN),
1557 					 base_addr,
1558 					 ce_ring->hal_ring_type,
1559 					 &ce_ring->is_ring_prealloc);
1560 
1561 		if (!ce_ring->base_addr_owner_space_unaligned) {
1562 			hif_err("Failed to allocate DMA memory for ce ring id: %u",
1563 			       CE_id);
1564 			return QDF_STATUS_E_NOMEM;
1565 		}
1566 	}
1567 	return QDF_STATUS_SUCCESS;
1568 }
1569 
1570 /**
1571  * ce_free_desc_ring() - Frees copyengine descriptor ring
1572  * @scn: softc instance
1573  * @ce_id: ce in question
1574  * @ce_ring: copyengine instance
1575  * @desc_size: ce desc size
1576  *
1577  * Return: None
1578  */
1579 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1580 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1581 {
1582 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1583 	    !ce_srng_based(scn)) {
1584 		if (scn->ipa_ce_ring) {
1585 			qdf_mem_shared_mem_free(scn->qdf_dev,
1586 						scn->ipa_ce_ring);
1587 			scn->ipa_ce_ring = NULL;
1588 		}
1589 		ce_ring->base_addr_owner_space_unaligned = NULL;
1590 	} else {
1591 		hif_mem_free_consistent_unaligned
1592 			(scn,
1593 			 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1594 			 ce_ring->base_addr_owner_space_unaligned,
1595 			 ce_ring->base_addr_CE_space, 0,
1596 			 ce_ring->is_ring_prealloc);
1597 		ce_ring->base_addr_owner_space_unaligned = NULL;
1598 	}
1599 }
1600 #else
1601 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1602 				     qdf_dma_addr_t *base_addr,
1603 				     struct CE_ring_state *ce_ring,
1604 				     unsigned int nentries, uint32_t desc_size)
1605 {
1606 	ce_ring->base_addr_owner_space_unaligned =
1607 			hif_mem_alloc_consistent_unaligned
1608 					(scn,
1609 					 (nentries * desc_size +
1610 					  CE_DESC_RING_ALIGN),
1611 					 base_addr,
1612 					 ce_ring->hal_ring_type,
1613 					 &ce_ring->is_ring_prealloc);
1614 
1615 	if (!ce_ring->base_addr_owner_space_unaligned) {
1616 		hif_err("Failed to allocate DMA memory for ce ring id: %u",
1617 		       CE_id);
1618 		return QDF_STATUS_E_NOMEM;
1619 	}
1620 	return QDF_STATUS_SUCCESS;
1621 }
1622 
1623 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1624 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1625 {
1626 	hif_mem_free_consistent_unaligned
1627 		(scn,
1628 		 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1629 		 ce_ring->base_addr_owner_space_unaligned,
1630 		 ce_ring->base_addr_CE_space, 0,
1631 		 ce_ring->is_ring_prealloc);
1632 	ce_ring->base_addr_owner_space_unaligned = NULL;
1633 }
1634 #endif /* IPA_OFFLOAD */
1635 
1636 /*
1637  * TODO: Need to explore the possibility of having this as part of a
1638  * target context instead of a global array.
1639  */
1640 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1641 
1642 void ce_service_register_module(enum ce_target_type target_type,
1643 				struct ce_ops* (*ce_attach)(void))
1644 {
1645 	if (target_type < CE_MAX_TARGET_TYPE)
1646 		ce_attach_register[target_type] = ce_attach;
1647 }
1648 
1649 qdf_export_symbol(ce_service_register_module);
1650 
1651 /**
1652  * ce_srng_based() - Does this target use srng
1653  * @ce_state : pointer to the state context of the CE
1654  *
1655  * Description:
1656  *   returns true if the target is SRNG based
1657  *
1658  * Return:
1659  *  false (attribute set to false)
1660  *  true  (attribute set to true);
1661  */
1662 bool ce_srng_based(struct hif_softc *scn)
1663 {
1664 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1665 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1666 
1667 	switch (tgt_info->target_type) {
1668 	case TARGET_TYPE_QCA8074:
1669 	case TARGET_TYPE_QCA8074V2:
1670 	case TARGET_TYPE_QCA6290:
1671 	case TARGET_TYPE_QCA6390:
1672 	case TARGET_TYPE_QCA6490:
1673 	case TARGET_TYPE_QCA6750:
1674 	case TARGET_TYPE_QCA6018:
1675 	case TARGET_TYPE_QCN9000:
1676 	case TARGET_TYPE_QCN6122:
1677 	case TARGET_TYPE_QCA5018:
1678 	case TARGET_TYPE_KIWI:
1679 	case TARGET_TYPE_MANGO:
1680 	case TARGET_TYPE_QCN9224:
1681 	case TARGET_TYPE_QCA9574:
1682 		return true;
1683 	default:
1684 		return false;
1685 	}
1686 	return false;
1687 }
1688 qdf_export_symbol(ce_srng_based);
1689 
1690 #ifdef QCA_WIFI_SUPPORT_SRNG
1691 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1692 {
1693 	struct ce_ops *ops = NULL;
1694 
1695 	if (ce_srng_based(scn)) {
1696 		if (ce_attach_register[CE_SVC_SRNG])
1697 			ops = ce_attach_register[CE_SVC_SRNG]();
1698 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1699 		ops = ce_attach_register[CE_SVC_LEGACY]();
1700 	}
1701 
1702 	return ops;
1703 }
1704 
1705 
1706 #else	/* QCA_LITHIUM */
1707 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1708 {
1709 	if (ce_attach_register[CE_SVC_LEGACY])
1710 		return ce_attach_register[CE_SVC_LEGACY]();
1711 
1712 	return NULL;
1713 }
1714 #endif /* QCA_LITHIUM */
1715 
1716 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1717 		struct pld_shadow_reg_v2_cfg **shadow_config,
1718 		int *num_shadow_registers_configured) {
1719 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1720 
1721 	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1722 			scn, shadow_config, num_shadow_registers_configured);
1723 
1724 	return;
1725 }
1726 
1727 #ifdef CONFIG_SHADOW_V3
1728 static inline void
1729 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1730 				  struct pld_wlan_enable_cfg *cfg)
1731 {
1732 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1733 
1734 	if (!hif_state->ce_services->ce_prepare_shadow_register_v3_cfg)
1735 		return;
1736 
1737 	hif_state->ce_services->ce_prepare_shadow_register_v3_cfg(
1738 			scn, &cfg->shadow_reg_v3_cfg,
1739 			&cfg->num_shadow_reg_v3_cfg);
1740 }
1741 #else
1742 static inline void
1743 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1744 				  struct pld_wlan_enable_cfg *cfg)
1745 {
1746 }
1747 #endif
1748 
1749 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1750 						uint8_t ring_type)
1751 {
1752 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1753 
1754 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1755 }
1756 
1757 #ifdef QCA_WIFI_SUPPORT_SRNG
1758 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1759 {
1760 	switch (ce_ring_type) {
1761 	case CE_RING_SRC:
1762 		return CE_SRC;
1763 	case CE_RING_DEST:
1764 		return CE_DST;
1765 	case CE_RING_STATUS:
1766 		return CE_DST_STATUS;
1767 	default:
1768 		return -EINVAL;
1769 	}
1770 }
1771 #else
1772 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1773 {
1774 	return 0;
1775 }
1776 #endif
1777 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1778 		uint8_t ring_type, uint32_t nentries)
1779 {
1780 	uint32_t ce_nbytes;
1781 	char *ptr;
1782 	qdf_dma_addr_t base_addr;
1783 	struct CE_ring_state *ce_ring;
1784 	uint32_t desc_size;
1785 	struct hif_softc *scn = CE_state->scn;
1786 
1787 	ce_nbytes = sizeof(struct CE_ring_state)
1788 		+ (nentries * sizeof(void *));
1789 	ptr = qdf_mem_malloc(ce_nbytes);
1790 	if (!ptr)
1791 		return NULL;
1792 
1793 	ce_ring = (struct CE_ring_state *)ptr;
1794 	ptr += sizeof(struct CE_ring_state);
1795 	ce_ring->nentries = nentries;
1796 	ce_ring->nentries_mask = nentries - 1;
1797 
1798 	ce_ring->low_water_mark_nentries = 0;
1799 	ce_ring->high_water_mark_nentries = nentries;
1800 	ce_ring->per_transfer_context = (void **)ptr;
1801 	ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type);
1802 
1803 	desc_size = ce_get_desc_size(scn, ring_type);
1804 
1805 	/* Legacy platforms that do not support cache
1806 	 * coherent DMA are unsupported
1807 	 */
1808 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1809 			       ce_ring, nentries,
1810 			       desc_size) !=
1811 	    QDF_STATUS_SUCCESS) {
1812 		hif_err("ring has no DMA mem");
1813 		qdf_mem_free(ce_ring);
1814 		return NULL;
1815 	}
1816 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1817 
1818 	/* Correctly initialize memory to 0 to
1819 	 * prevent garbage data crashing system
1820 	 * when download firmware
1821 	 */
1822 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1823 			nentries * desc_size +
1824 			CE_DESC_RING_ALIGN);
1825 
1826 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1827 
1828 		ce_ring->base_addr_CE_space =
1829 			(ce_ring->base_addr_CE_space_unaligned +
1830 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1831 
1832 		ce_ring->base_addr_owner_space = (void *)
1833 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1834 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1835 	} else {
1836 		ce_ring->base_addr_CE_space =
1837 				ce_ring->base_addr_CE_space_unaligned;
1838 		ce_ring->base_addr_owner_space =
1839 				ce_ring->base_addr_owner_space_unaligned;
1840 	}
1841 
1842 	return ce_ring;
1843 }
1844 
1845 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1846 			uint32_t ce_id, struct CE_ring_state *ring,
1847 			struct CE_attr *attr)
1848 {
1849 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1850 
1851 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1852 					      ring, attr);
1853 }
1854 
1855 static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state,
1856 			    uint8_t ring_type)
1857 {
1858 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1859 
1860 	if (hif_state->ce_services->ce_srng_cleanup)
1861 		hif_state->ce_services->ce_srng_cleanup(scn,
1862 					CE_state, ring_type);
1863 }
1864 
1865 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1866 {
1867 	uint8_t ul_pipe, dl_pipe;
1868 	int ce_id, status, ul_is_polled, dl_is_polled;
1869 	struct CE_state *ce_state;
1870 
1871 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1872 					 &ul_pipe, &dl_pipe,
1873 					 &ul_is_polled, &dl_is_polled);
1874 	if (status) {
1875 		hif_err("pipe_mapping failure");
1876 		return status;
1877 	}
1878 
1879 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1880 		if (ce_id == ul_pipe)
1881 			continue;
1882 		if (ce_id == dl_pipe)
1883 			continue;
1884 
1885 		ce_state = scn->ce_id_to_state[ce_id];
1886 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1887 		if (ce_state->state == CE_RUNNING)
1888 			ce_state->state = CE_PAUSED;
1889 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1890 	}
1891 
1892 	return status;
1893 }
1894 
1895 int hif_ce_bus_late_resume(struct hif_softc *scn)
1896 {
1897 	int ce_id;
1898 	struct CE_state *ce_state;
1899 	int write_index = 0;
1900 	bool index_updated;
1901 
1902 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1903 		ce_state = scn->ce_id_to_state[ce_id];
1904 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1905 		if (ce_state->state == CE_PENDING) {
1906 			write_index = ce_state->src_ring->write_index;
1907 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1908 					write_index);
1909 			ce_state->state = CE_RUNNING;
1910 			index_updated = true;
1911 		} else {
1912 			index_updated = false;
1913 		}
1914 
1915 		if (ce_state->state == CE_PAUSED)
1916 			ce_state->state = CE_RUNNING;
1917 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1918 
1919 		if (index_updated)
1920 			hif_record_ce_desc_event(scn, ce_id,
1921 				RESUME_WRITE_INDEX_UPDATE,
1922 				NULL, NULL, write_index, 0);
1923 	}
1924 
1925 	return 0;
1926 }
1927 
1928 /**
1929  * ce_oom_recovery() - try to recover rx ce from oom condition
1930  * @context: CE_state of the CE with oom rx ring
1931  *
1932  * the executing work Will continue to be rescheduled until
1933  * at least 1 descriptor is successfully posted to the rx ring.
1934  *
1935  * return: none
1936  */
1937 static void ce_oom_recovery(void *context)
1938 {
1939 	struct CE_state *ce_state = context;
1940 	struct hif_softc *scn = ce_state->scn;
1941 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1942 	struct HIF_CE_pipe_info *pipe_info =
1943 		&ce_softc->pipe_info[ce_state->id];
1944 
1945 	hif_post_recv_buffers_for_pipe(pipe_info);
1946 }
1947 
1948 #ifdef HIF_CE_DEBUG_DATA_BUF
1949 /**
1950  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1951  * the CE descriptors.
1952  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1953  * @scn: hif scn handle
1954  * ce_id: Copy Engine Id
1955  *
1956  * Return: QDF_STATUS
1957  */
1958 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1959 {
1960 	struct hif_ce_desc_event *event = NULL;
1961 	struct hif_ce_desc_event *hist_ev = NULL;
1962 	uint32_t index = 0;
1963 
1964 	hist_ev =
1965 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1966 
1967 	if (!hist_ev)
1968 		return QDF_STATUS_E_NOMEM;
1969 
1970 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1971 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1972 		event = &hist_ev[index];
1973 		event->data =
1974 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1975 		if (!event->data) {
1976 			hif_err_rl("ce debug data alloc failed");
1977 			scn->hif_ce_desc_hist.data_enable[ce_id] = false;
1978 			return QDF_STATUS_E_NOMEM;
1979 		}
1980 	}
1981 	return QDF_STATUS_SUCCESS;
1982 }
1983 
1984 /**
1985  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1986  * the CE descriptors.
1987  * @scn: hif scn handle
1988  * ce_id: Copy Engine Id
1989  *
1990  * Return:
1991  */
1992 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1993 {
1994 	struct hif_ce_desc_event *event = NULL;
1995 	struct hif_ce_desc_event *hist_ev = NULL;
1996 	uint32_t index = 0;
1997 
1998 	hist_ev =
1999 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2000 
2001 	if (!hist_ev)
2002 		return;
2003 
2004 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2005 		event = &hist_ev[index];
2006 		if (event->data)
2007 			qdf_mem_free(event->data);
2008 		event->data = NULL;
2009 		event = NULL;
2010 	}
2011 
2012 }
2013 #endif /* HIF_CE_DEBUG_DATA_BUF */
2014 
2015 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
2016 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2017 
2018 /* define below variables for crashscope parse */
2019 struct hif_ce_desc_event *hif_ce_desc_history[CE_COUNT_MAX];
2020 uint32_t hif_ce_history_max = HIF_CE_HISTORY_MAX;
2021 
2022 /**
2023  * for debug build, it will enable ce history for all ce, but for
2024  * perf build(if CONFIG_SLUB_DEBUG_ON is N), it only enable for
2025  * ce2(wmi event) & ce3(wmi cmd) history.
2026  */
2027 #if defined(CONFIG_SLUB_DEBUG_ON)
2028 #define CE_DESC_HISTORY_BUFF_CNT  CE_COUNT_MAX
2029 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE  0
2030 #else
2031 /* CE2, CE3, CE7 */
2032 #define CE_DESC_HISTORY_BUFF_CNT  3
2033 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(2) | BIT(3) | BIT(7))
2034 #endif
2035 struct hif_ce_desc_event
2036 	hif_ce_desc_history_buff[CE_DESC_HISTORY_BUFF_CNT][HIF_CE_HISTORY_MAX];
2037 
2038 static struct hif_ce_desc_event *
2039 	hif_ce_debug_history_buf_get(struct hif_softc *scn, unsigned int ce_id)
2040 {
2041 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2042 
2043 	hif_debug("get ce debug buffer ce_id %u, only_ce2/ce3=0x%x, idx=%u",
2044 		  ce_id, IS_CE_DEBUG_ONLY_FOR_CRIT_CE,
2045 		  ce_hist->ce_id_hist_map[ce_id]);
2046 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2047 	    (ce_id == CE_ID_2 || ce_id == CE_ID_3 || ce_id == CE_ID_7)) {
2048 		uint8_t idx = ce_hist->ce_id_hist_map[ce_id];
2049 
2050 		hif_ce_desc_history[ce_id] = hif_ce_desc_history_buff[idx];
2051 	} else {
2052 		hif_ce_desc_history[ce_id] =
2053 			hif_ce_desc_history_buff[ce_id];
2054 	}
2055 
2056 	return hif_ce_desc_history[ce_id];
2057 }
2058 
2059 /**
2060  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
2061  * @scn: hif scn handle
2062  * @ce_id: Copy Engine Id
2063  * @src_nentries: source ce ring entries
2064  * Return: QDF_STATUS
2065  */
2066 static QDF_STATUS
2067 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
2068 			   uint32_t src_nentries)
2069 {
2070 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2071 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2072 
2073 	/* For perf build, return directly for non ce2/ce3 */
2074 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2075 	    ce_id != CE_ID_2 &&
2076 	    ce_id != CE_ID_3 &&
2077 	    ce_id != CE_ID_7) {
2078 		ce_hist->enable[ce_id] = false;
2079 		ce_hist->data_enable[ce_id] = false;
2080 		return QDF_STATUS_SUCCESS;
2081 	}
2082 
2083 	ce_hist->hist_ev[ce_id] = hif_ce_debug_history_buf_get(scn, ce_id);
2084 	ce_hist->enable[ce_id] = true;
2085 
2086 	if (src_nentries) {
2087 		status = alloc_mem_ce_debug_hist_data(scn, ce_id);
2088 		if (status != QDF_STATUS_SUCCESS) {
2089 			ce_hist->enable[ce_id] = false;
2090 			ce_hist->hist_ev[ce_id] = NULL;
2091 			return status;
2092 		}
2093 	} else {
2094 		ce_hist->data_enable[ce_id] = false;
2095 	}
2096 
2097 	return QDF_STATUS_SUCCESS;
2098 }
2099 
2100 /**
2101  * free_mem_ce_debug_history() - Free CE descriptor history
2102  * @scn: hif scn handle
2103  * @ce_id: Copy Engine Id
2104  *
2105  * Return: None
2106  */
2107 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
2108 {
2109 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2110 
2111 	if (!ce_hist->enable[ce_id])
2112 		return;
2113 
2114 	ce_hist->enable[ce_id] = false;
2115 	if (ce_hist->data_enable[ce_id]) {
2116 		ce_hist->data_enable[ce_id] = false;
2117 		free_mem_ce_debug_hist_data(scn, ce_id);
2118 	}
2119 	ce_hist->hist_ev[ce_id] = NULL;
2120 }
2121 #else
2122 static inline QDF_STATUS
2123 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2124 			   uint32_t src_nentries)
2125 {
2126 	return QDF_STATUS_SUCCESS;
2127 }
2128 
2129 static inline void
2130 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2131 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
2132 #else
2133 #if defined(HIF_CE_DEBUG_DATA_BUF)
2134 
2135 static QDF_STATUS
2136 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2137 			   uint32_t src_nentries)
2138 {
2139 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
2140 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
2141 
2142 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
2143 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
2144 		return QDF_STATUS_E_NOMEM;
2145 	} else {
2146 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
2147 		return QDF_STATUS_SUCCESS;
2148 	}
2149 }
2150 
2151 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
2152 {
2153 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2154 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
2155 
2156 	if (!hist_ev)
2157 		return;
2158 
2159 	if (ce_hist->data_enable[CE_id]) {
2160 		ce_hist->data_enable[CE_id] = false;
2161 		free_mem_ce_debug_hist_data(scn, CE_id);
2162 	}
2163 
2164 	ce_hist->enable[CE_id] = false;
2165 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
2166 	ce_hist->hist_ev[CE_id] = NULL;
2167 }
2168 
2169 #else
2170 
2171 static inline QDF_STATUS
2172 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2173 			   uint32_t src_nentries)
2174 {
2175 	return QDF_STATUS_SUCCESS;
2176 }
2177 
2178 static inline void
2179 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2180 #endif /* HIF_CE_DEBUG_DATA_BUF */
2181 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
2182 
2183 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2184 /**
2185  * reset_ce_debug_history() - reset the index and ce id used for dumping the
2186  * CE records on the console using sysfs.
2187  * @scn: hif scn handle
2188  *
2189  * Return:
2190  */
2191 static inline void reset_ce_debug_history(struct hif_softc *scn)
2192 {
2193 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2194 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2195 	 * index. Disable data storing
2196 	 */
2197 	ce_hist->hist_index = 0;
2198 	ce_hist->hist_id = 0;
2199 }
2200 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2201 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
2202 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2203 
2204 void ce_enable_polling(void *cestate)
2205 {
2206 	struct CE_state *CE_state = (struct CE_state *)cestate;
2207 
2208 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2209 		CE_state->timer_inited = true;
2210 }
2211 
2212 void ce_disable_polling(void *cestate)
2213 {
2214 	struct CE_state *CE_state = (struct CE_state *)cestate;
2215 
2216 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2217 		CE_state->timer_inited = false;
2218 }
2219 
2220 /*
2221  * Initialize a Copy Engine based on caller-supplied attributes.
2222  * This may be called once to initialize both source and destination
2223  * rings or it may be called twice for separate source and destination
2224  * initialization. It may be that only one side or the other is
2225  * initialized by software/firmware.
2226  *
2227  * This should be called durring the initialization sequence before
2228  * interupts are enabled, so we don't have to worry about thread safety.
2229  */
2230 struct CE_handle *ce_init(struct hif_softc *scn,
2231 			  unsigned int CE_id, struct CE_attr *attr)
2232 {
2233 	struct CE_state *CE_state;
2234 	uint32_t ctrl_addr;
2235 	unsigned int nentries;
2236 	bool malloc_CE_state = false;
2237 	bool malloc_src_ring = false;
2238 	int status;
2239 	QDF_STATUS mem_status = QDF_STATUS_SUCCESS;
2240 
2241 	QDF_ASSERT(CE_id < scn->ce_count);
2242 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
2243 	CE_state = scn->ce_id_to_state[CE_id];
2244 
2245 	if (!CE_state) {
2246 		CE_state =
2247 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
2248 		if (!CE_state)
2249 			return NULL;
2250 
2251 		malloc_CE_state = true;
2252 		qdf_spinlock_create(&CE_state->ce_index_lock);
2253 
2254 		CE_state->id = CE_id;
2255 		CE_state->ctrl_addr = ctrl_addr;
2256 		CE_state->state = CE_RUNNING;
2257 		CE_state->attr_flags = attr->flags;
2258 	}
2259 	CE_state->scn = scn;
2260 	CE_state->service = ce_engine_service_reg;
2261 
2262 	qdf_atomic_init(&CE_state->rx_pending);
2263 	if (!attr) {
2264 		/* Already initialized; caller wants the handle */
2265 		return (struct CE_handle *)CE_state;
2266 	}
2267 
2268 	if (CE_state->src_sz_max)
2269 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
2270 	else
2271 		CE_state->src_sz_max = attr->src_sz_max;
2272 
2273 	ce_init_ce_desc_event_log(scn, CE_id,
2274 				  attr->src_nentries + attr->dest_nentries);
2275 
2276 	/* source ring setup */
2277 	nentries = attr->src_nentries;
2278 	if (nentries) {
2279 		struct CE_ring_state *src_ring;
2280 
2281 		nentries = roundup_pwr2(nentries);
2282 		if (CE_state->src_ring) {
2283 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
2284 		} else {
2285 			src_ring = CE_state->src_ring =
2286 				ce_alloc_ring_state(CE_state,
2287 						CE_RING_SRC,
2288 						nentries);
2289 			if (!src_ring) {
2290 				/* cannot allocate src ring. If the
2291 				 * CE_state is allocated locally free
2292 				 * CE_State and return error.
2293 				 */
2294 				hif_err("src ring has no mem");
2295 				if (malloc_CE_state) {
2296 					/* allocated CE_state locally */
2297 					qdf_mem_free(CE_state);
2298 					malloc_CE_state = false;
2299 				}
2300 				return NULL;
2301 			}
2302 			/* we can allocate src ring. Mark that the src ring is
2303 			 * allocated locally
2304 			 */
2305 			malloc_src_ring = true;
2306 
2307 			/*
2308 			 * Also allocate a shadow src ring in
2309 			 * regular mem to use for faster access.
2310 			 */
2311 			src_ring->shadow_base_unaligned =
2312 				qdf_mem_malloc(nentries *
2313 					       sizeof(struct CE_src_desc) +
2314 					       CE_DESC_RING_ALIGN);
2315 			if (!src_ring->shadow_base_unaligned)
2316 				goto error_no_dma_mem;
2317 
2318 			src_ring->shadow_base = (struct CE_src_desc *)
2319 				(((size_t) src_ring->shadow_base_unaligned +
2320 				CE_DESC_RING_ALIGN - 1) &
2321 				 ~(CE_DESC_RING_ALIGN - 1));
2322 
2323 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
2324 					       src_ring, attr);
2325 			if (status < 0)
2326 				goto error_target_access;
2327 
2328 			ce_ring_test_initial_indexes(CE_id, src_ring,
2329 						     "src_ring");
2330 		}
2331 	}
2332 
2333 	/* destination ring setup */
2334 	nentries = attr->dest_nentries;
2335 	if (nentries) {
2336 		struct CE_ring_state *dest_ring;
2337 
2338 		nentries = roundup_pwr2(nentries);
2339 		if (CE_state->dest_ring) {
2340 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
2341 		} else {
2342 			dest_ring = CE_state->dest_ring =
2343 				ce_alloc_ring_state(CE_state,
2344 						CE_RING_DEST,
2345 						nentries);
2346 			if (!dest_ring) {
2347 				/* cannot allocate dst ring. If the CE_state
2348 				 * or src ring is allocated locally free
2349 				 * CE_State and src ring and return error.
2350 				 */
2351 				hif_err("dest ring has no mem");
2352 				goto error_no_dma_mem;
2353 			}
2354 
2355 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
2356 				      dest_ring, attr);
2357 			if (status < 0)
2358 				goto error_target_access;
2359 
2360 			ce_ring_test_initial_indexes(CE_id, dest_ring,
2361 						     "dest_ring");
2362 
2363 			/* For srng based target, init status ring here */
2364 			if (ce_srng_based(CE_state->scn)) {
2365 				CE_state->status_ring =
2366 					ce_alloc_ring_state(CE_state,
2367 							CE_RING_STATUS,
2368 							nentries);
2369 				if (!CE_state->status_ring) {
2370 					/*Allocation failed. Cleanup*/
2371 					qdf_mem_free(CE_state->dest_ring);
2372 					if (malloc_src_ring) {
2373 						qdf_mem_free
2374 							(CE_state->src_ring);
2375 						CE_state->src_ring = NULL;
2376 						malloc_src_ring = false;
2377 					}
2378 					if (malloc_CE_state) {
2379 						/* allocated CE_state locally */
2380 						scn->ce_id_to_state[CE_id] =
2381 							NULL;
2382 						qdf_mem_free(CE_state);
2383 						malloc_CE_state = false;
2384 					}
2385 
2386 					return NULL;
2387 				}
2388 
2389 				status = ce_ring_setup(scn, CE_RING_STATUS,
2390 					       CE_id, CE_state->status_ring,
2391 					       attr);
2392 				if (status < 0)
2393 					goto error_target_access;
2394 
2395 			}
2396 
2397 			/* epping */
2398 			/* poll timer */
2399 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2400 				qdf_timer_init(scn->qdf_dev,
2401 						&CE_state->poll_timer,
2402 						ce_poll_timeout,
2403 						CE_state,
2404 						QDF_TIMER_TYPE_WAKE_APPS);
2405 				ce_enable_polling(CE_state);
2406 				qdf_timer_mod(&CE_state->poll_timer,
2407 						      CE_POLL_TIMEOUT);
2408 			}
2409 		}
2410 	}
2411 
2412 	if (!ce_srng_based(scn)) {
2413 		/* Enable CE error interrupts */
2414 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2415 			goto error_target_access;
2416 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
2417 		if (Q_TARGET_ACCESS_END(scn) < 0)
2418 			goto error_target_access;
2419 	}
2420 
2421 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
2422 			ce_oom_recovery, CE_state);
2423 
2424 	/* update the htt_data attribute */
2425 	ce_mark_datapath(CE_state);
2426 	scn->ce_id_to_state[CE_id] = CE_state;
2427 
2428 	mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
2429 	if (mem_status != QDF_STATUS_SUCCESS)
2430 		goto error_target_access;
2431 
2432 	return (struct CE_handle *)CE_state;
2433 
2434 error_target_access:
2435 error_no_dma_mem:
2436 	ce_fini((struct CE_handle *)CE_state);
2437 	return NULL;
2438 }
2439 
2440 /**
2441  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
2442  * @hif_ctx: HIF Context
2443  *
2444  * API to check if polling is enabled on all CEs. Returns true when polling
2445  * is enabled on all CEs.
2446  *
2447  * Return: bool
2448  */
2449 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
2450 {
2451 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2452 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2453 	struct CE_attr *attr;
2454 	int id;
2455 
2456 	for (id = 0; id < scn->ce_count; id++) {
2457 		attr = &hif_state->host_ce_config[id];
2458 		if (attr && (attr->dest_nentries) &&
2459 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
2460 			return false;
2461 	}
2462 	return true;
2463 }
2464 qdf_export_symbol(hif_is_polled_mode_enabled);
2465 
2466 static int hif_get_pktlog_ce_num(struct hif_softc *scn)
2467 {
2468 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2469 	int id;
2470 
2471 	for (id = 0; id < hif_state->sz_tgt_svc_map; id++) {
2472 		if (hif_state->tgt_svc_map[id].service_id ==  PACKET_LOG_SVC)
2473 			return hif_state->tgt_svc_map[id].pipenum;
2474 	}
2475 	return -EINVAL;
2476 }
2477 
2478 #ifdef WLAN_FEATURE_FASTPATH
2479 /**
2480  * hif_enable_fastpath() Update that we have enabled fastpath mode
2481  * @hif_ctx: HIF context
2482  *
2483  * For use in data path
2484  *
2485  * Retrun: void
2486  */
2487 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
2488 {
2489 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2490 
2491 	if (ce_srng_based(scn)) {
2492 		hif_warn("srng rings do not support fastpath");
2493 		return;
2494 	}
2495 	hif_debug("Enabling fastpath mode");
2496 	scn->fastpath_mode_on = true;
2497 }
2498 
2499 /**
2500  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
2501  * @hif_ctx: HIF Context
2502  *
2503  * For use in data path to skip HTC
2504  *
2505  * Return: bool
2506  */
2507 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
2508 {
2509 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2510 
2511 	return scn->fastpath_mode_on;
2512 }
2513 
2514 /**
2515  * hif_get_ce_handle - API to get CE handle for FastPath mode
2516  * @hif_ctx: HIF Context
2517  * @id: CopyEngine Id
2518  *
2519  * API to return CE handle for fastpath mode
2520  *
2521  * Return: void
2522  */
2523 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
2524 {
2525 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2526 
2527 	return scn->ce_id_to_state[id];
2528 }
2529 qdf_export_symbol(hif_get_ce_handle);
2530 
2531 /**
2532  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
2533  * No processing is required inside this function.
2534  * @ce_hdl: Cope engine handle
2535  * Using an assert, this function makes sure that,
2536  * the TX CE has been processed completely.
2537  *
2538  * This is called while dismantling CE structures. No other thread
2539  * should be using these structures while dismantling is occurring
2540  * therfore no locking is needed.
2541  *
2542  * Return: none
2543  */
2544 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
2545 {
2546 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2547 	struct CE_ring_state *src_ring = ce_state->src_ring;
2548 	struct hif_softc *sc = ce_state->scn;
2549 	uint32_t sw_index, write_index;
2550 
2551 	if (hif_is_nss_wifi_enabled(sc))
2552 		return;
2553 
2554 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
2555 		hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE");
2556 		sw_index = src_ring->sw_index;
2557 		write_index = src_ring->sw_index;
2558 
2559 		/* At this point Tx CE should be clean */
2560 		qdf_assert_always(sw_index == write_index);
2561 	}
2562 }
2563 
2564 /**
2565  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
2566  * @ce_hdl: Handle to CE
2567  *
2568  * These buffers are never allocated on the fly, but
2569  * are allocated only once during HIF start and freed
2570  * only once during HIF stop.
2571  * NOTE:
2572  * The assumption here is there is no in-flight DMA in progress
2573  * currently, so that buffers can be freed up safely.
2574  *
2575  * Return: NONE
2576  */
2577 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
2578 {
2579 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2580 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
2581 	qdf_nbuf_t nbuf;
2582 	int i;
2583 
2584 	if (ce_state->scn->fastpath_mode_on == false)
2585 		return;
2586 
2587 	if (!ce_state->htt_rx_data)
2588 		return;
2589 
2590 	/*
2591 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
2592 	 * this CE is completely full: does not leave one blank space, to
2593 	 * distinguish between empty queue & full queue. So free all the
2594 	 * entries.
2595 	 */
2596 	for (i = 0; i < dst_ring->nentries; i++) {
2597 		nbuf = dst_ring->per_transfer_context[i];
2598 
2599 		/*
2600 		 * The reasons for doing this check are:
2601 		 * 1) Protect against calling cleanup before allocating buffers
2602 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
2603 		 *    could have a partially filled ring, because of a memory
2604 		 *    allocation failure in the middle of allocating ring.
2605 		 *    This check accounts for that case, checking
2606 		 *    fastpath_mode_on flag or started flag would not have
2607 		 *    covered that case. This is not in performance path,
2608 		 *    so OK to do this.
2609 		 */
2610 		if (nbuf) {
2611 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
2612 					      QDF_DMA_FROM_DEVICE);
2613 			qdf_nbuf_free(nbuf);
2614 		}
2615 	}
2616 }
2617 
2618 /**
2619  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
2620  * @scn: HIF handle
2621  *
2622  * Datapath Rx CEs are special case, where we reuse all the message buffers.
2623  * Hence we have to post all the entries in the pipe, even, in the beginning
2624  * unlike for other CE pipes where one less than dest_nentries are filled in
2625  * the beginning.
2626  *
2627  * Return: None
2628  */
2629 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2630 {
2631 	int pipe_num;
2632 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2633 
2634 	if (scn->fastpath_mode_on == false)
2635 		return;
2636 
2637 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2638 		struct HIF_CE_pipe_info *pipe_info =
2639 			&hif_state->pipe_info[pipe_num];
2640 		struct CE_state *ce_state =
2641 			scn->ce_id_to_state[pipe_info->pipe_num];
2642 
2643 		if (ce_state->htt_rx_data)
2644 			atomic_inc(&pipe_info->recv_bufs_needed);
2645 	}
2646 }
2647 #else
2648 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2649 {
2650 }
2651 
2652 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
2653 {
2654 	return false;
2655 }
2656 #endif /* WLAN_FEATURE_FASTPATH */
2657 
2658 void ce_fini(struct CE_handle *copyeng)
2659 {
2660 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2661 	unsigned int CE_id = CE_state->id;
2662 	struct hif_softc *scn = CE_state->scn;
2663 	uint32_t desc_size;
2664 
2665 	bool inited = CE_state->timer_inited;
2666 	CE_state->state = CE_UNUSED;
2667 	scn->ce_id_to_state[CE_id] = NULL;
2668 	/* Set the flag to false first to stop processing in ce_poll_timeout */
2669 	ce_disable_polling(CE_state);
2670 
2671 	qdf_lro_deinit(CE_state->lro_data);
2672 
2673 	if (CE_state->src_ring) {
2674 		/* Cleanup the datapath Tx ring */
2675 		ce_h2t_tx_ce_cleanup(copyeng);
2676 
2677 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2678 		if (CE_state->src_ring->shadow_base_unaligned)
2679 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2680 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2681 			ce_free_desc_ring(scn, CE_state->id,
2682 					  CE_state->src_ring,
2683 					  desc_size);
2684 		ce_srng_cleanup(scn, CE_state, CE_RING_SRC);
2685 		qdf_mem_free(CE_state->src_ring);
2686 	}
2687 	if (CE_state->dest_ring) {
2688 		/* Cleanup the datapath Rx ring */
2689 		ce_t2h_msg_ce_cleanup(copyeng);
2690 
2691 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
2692 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
2693 			ce_free_desc_ring(scn, CE_state->id,
2694 					  CE_state->dest_ring,
2695 					  desc_size);
2696 		ce_srng_cleanup(scn, CE_state, CE_RING_DEST);
2697 		qdf_mem_free(CE_state->dest_ring);
2698 
2699 		/* epping */
2700 		if (inited) {
2701 			qdf_timer_free(&CE_state->poll_timer);
2702 		}
2703 	}
2704 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
2705 		/* Cleanup the datapath Tx ring */
2706 		ce_h2t_tx_ce_cleanup(copyeng);
2707 
2708 		if (CE_state->status_ring->shadow_base_unaligned)
2709 			qdf_mem_free(
2710 				CE_state->status_ring->shadow_base_unaligned);
2711 
2712 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
2713 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
2714 			ce_free_desc_ring(scn, CE_state->id,
2715 					  CE_state->status_ring,
2716 					  desc_size);
2717 		ce_srng_cleanup(scn, CE_state, CE_RING_STATUS);
2718 		qdf_mem_free(CE_state->status_ring);
2719 	}
2720 
2721 	free_mem_ce_debug_history(scn, CE_id);
2722 	reset_ce_debug_history(scn);
2723 	ce_deinit_ce_desc_event_log(scn, CE_id);
2724 
2725 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2726 	qdf_mem_free(CE_state);
2727 }
2728 
2729 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2730 {
2731 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2732 
2733 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2734 		  sizeof(hif_state->msg_callbacks_pending));
2735 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2736 		  sizeof(hif_state->msg_callbacks_current));
2737 }
2738 
2739 /* Send the first nbytes bytes of the buffer */
2740 QDF_STATUS
2741 hif_send_head(struct hif_opaque_softc *hif_ctx,
2742 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2743 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2744 {
2745 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2746 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2747 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2748 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2749 	int bytes = nbytes, nfrags = 0;
2750 	struct ce_sendlist sendlist;
2751 	int i = 0;
2752 	QDF_STATUS status;
2753 	unsigned int mux_id = 0;
2754 
2755 	if (nbytes > qdf_nbuf_len(nbuf)) {
2756 		hif_err("nbytes: %d nbuf_len: %d", nbytes,
2757 		       (uint32_t)qdf_nbuf_len(nbuf));
2758 		QDF_ASSERT(0);
2759 	}
2760 
2761 	transfer_id =
2762 		(mux_id & MUX_ID_MASK) |
2763 		(transfer_id & TRANSACTION_ID_MASK);
2764 	data_attr &= DESC_DATA_FLAG_MASK;
2765 	/*
2766 	 * The common case involves sending multiple fragments within a
2767 	 * single download (the tx descriptor and the tx frame header).
2768 	 * So, optimize for the case of multiple fragments by not even
2769 	 * checking whether it's necessary to use a sendlist.
2770 	 * The overhead of using a sendlist for a single buffer download
2771 	 * is not a big deal, since it happens rarely (for WMI messages).
2772 	 */
2773 	ce_sendlist_init(&sendlist);
2774 	do {
2775 		qdf_dma_addr_t frag_paddr;
2776 		int frag_bytes;
2777 
2778 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2779 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2780 		/*
2781 		 * Clear the packet offset for all but the first CE desc.
2782 		 */
2783 		if (i++ > 0)
2784 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2785 
2786 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2787 				    frag_bytes >
2788 				    bytes ? bytes : frag_bytes,
2789 				    qdf_nbuf_get_frag_is_wordstream
2790 				    (nbuf,
2791 				    nfrags) ? 0 :
2792 				    CE_SEND_FLAG_SWAP_DISABLE,
2793 				    data_attr);
2794 		if (status != QDF_STATUS_SUCCESS) {
2795 			hif_err("frag_num: %d larger than limit (status=%d)",
2796 			       nfrags, status);
2797 			return status;
2798 		}
2799 		bytes -= frag_bytes;
2800 		nfrags++;
2801 	} while (bytes > 0);
2802 
2803 	/* Make sure we have resources to handle this request */
2804 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2805 	if (pipe_info->num_sends_allowed < nfrags) {
2806 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2807 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2808 		return QDF_STATUS_E_RESOURCES;
2809 	}
2810 	pipe_info->num_sends_allowed -= nfrags;
2811 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2812 
2813 	if (qdf_unlikely(!ce_hdl)) {
2814 		hif_err("CE handle is null");
2815 		return A_ERROR;
2816 	}
2817 
2818 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2819 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2820 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2821 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2822 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2823 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2824 
2825 	return status;
2826 }
2827 
2828 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2829 								int force)
2830 {
2831 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2832 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2833 
2834 	if (!force) {
2835 		int resources;
2836 		/*
2837 		 * Decide whether to actually poll for completions, or just
2838 		 * wait for a later chance. If there seem to be plenty of
2839 		 * resources left, then just wait, since checking involves
2840 		 * reading a CE register, which is a relatively expensive
2841 		 * operation.
2842 		 */
2843 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2844 		/*
2845 		 * If at least 50% of the total resources are still available,
2846 		 * don't bother checking again yet.
2847 		 */
2848 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2849 									 1))
2850 			return;
2851 	}
2852 #ifdef ATH_11AC_TXCOMPACT
2853 	ce_per_engine_servicereap(scn, pipe);
2854 #else
2855 	ce_per_engine_service(scn, pipe);
2856 #endif
2857 }
2858 
2859 #if defined(CE_TASKLET_SCHEDULE_ON_FULL) && defined(CE_TASKLET_DEBUG_ENABLE)
2860 #define CE_RING_FULL_THRESHOLD_TIME 3000000
2861 #define CE_RING_FULL_THRESHOLD 1024
2862 /* Ths function is called from htc_send path. If there is no resourse to send
2863  * packet via HTC, then check if interrupts are not processed from that
2864  * CE for last 3 seconds. If so, schedule a tasklet to reap available entries.
2865  * Also if Queue has reached 1024 entries within 3 seconds, then also schedule
2866  * tasklet.
2867  */
2868 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2869 {
2870 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2871 	uint64_t diff_time = qdf_get_log_timestamp_usecs() -
2872 			hif_state->stats.tasklet_sched_entry_ts[pipe];
2873 
2874 	hif_state->stats.ce_ring_full_count[pipe]++;
2875 
2876 	if (diff_time >= CE_RING_FULL_THRESHOLD_TIME ||
2877 	    hif_state->stats.ce_ring_full_count[pipe] >=
2878 	    CE_RING_FULL_THRESHOLD) {
2879 		hif_state->stats.ce_ring_full_count[pipe] = 0;
2880 		hif_state->stats.ce_manual_tasklet_schedule_count[pipe]++;
2881 		hif_state->stats.ce_last_manual_tasklet_schedule_ts[pipe] =
2882 			qdf_get_log_timestamp_usecs();
2883 		ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]);
2884 	}
2885 }
2886 #else
2887 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2888 {
2889 }
2890 #endif
2891 
2892 uint16_t
2893 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2894 {
2895 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2896 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2897 	uint16_t rv;
2898 
2899 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2900 	rv = pipe_info->num_sends_allowed;
2901 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2902 	return rv;
2903 }
2904 
2905 /* Called by lower (CE) layer when a send to Target completes. */
2906 static void
2907 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2908 		     void *transfer_context, qdf_dma_addr_t CE_data,
2909 		     unsigned int nbytes, unsigned int transfer_id,
2910 		     unsigned int sw_index, unsigned int hw_index,
2911 		     unsigned int toeplitz_hash_result)
2912 {
2913 	struct HIF_CE_pipe_info *pipe_info =
2914 		(struct HIF_CE_pipe_info *)ce_context;
2915 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2916 	struct hif_msg_callbacks *msg_callbacks =
2917 		&pipe_info->pipe_callbacks;
2918 
2919 	do {
2920 		/*
2921 		 * The upper layer callback will be triggered
2922 		 * when last fragment is complteted.
2923 		 */
2924 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2925 			msg_callbacks->txCompletionHandler(
2926 				msg_callbacks->Context,
2927 				transfer_context, transfer_id,
2928 				toeplitz_hash_result);
2929 
2930 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2931 		pipe_info->num_sends_allowed++;
2932 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2933 	} while (ce_completed_send_next(copyeng,
2934 			&ce_context, &transfer_context,
2935 			&CE_data, &nbytes, &transfer_id,
2936 			&sw_idx, &hw_idx,
2937 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2938 }
2939 
2940 /**
2941  * hif_ce_do_recv(): send message from copy engine to upper layers
2942  * @msg_callbacks: structure containing callback and callback context
2943  * @netbuff: skb containing message
2944  * @nbytes: number of bytes in the message
2945  * @pipe_info: used for the pipe_number info
2946  *
2947  * Checks the packet length, configures the length in the netbuff,
2948  * and calls the upper layer callback.
2949  *
2950  * return: None
2951  */
2952 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2953 		qdf_nbuf_t netbuf, int nbytes,
2954 		struct HIF_CE_pipe_info *pipe_info) {
2955 	if (nbytes <= pipe_info->buf_sz) {
2956 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2957 		msg_callbacks->
2958 			rxCompletionHandler(msg_callbacks->Context,
2959 					netbuf, pipe_info->pipe_num);
2960 	} else {
2961 		hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
2962 		qdf_nbuf_free(netbuf);
2963 	}
2964 }
2965 
2966 /* Called by lower (CE) layer when data is received from the Target. */
2967 static void
2968 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2969 		     void *transfer_context, qdf_dma_addr_t CE_data,
2970 		     unsigned int nbytes, unsigned int transfer_id,
2971 		     unsigned int flags)
2972 {
2973 	struct HIF_CE_pipe_info *pipe_info =
2974 		(struct HIF_CE_pipe_info *)ce_context;
2975 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2976 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2977 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2978 	struct hif_msg_callbacks *msg_callbacks = &pipe_info->pipe_callbacks;
2979 
2980 	do {
2981 		hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE);
2982 		qdf_nbuf_unmap_single(scn->qdf_dev,
2983 				      (qdf_nbuf_t) transfer_context,
2984 				      QDF_DMA_FROM_DEVICE);
2985 
2986 		atomic_inc(&pipe_info->recv_bufs_needed);
2987 		hif_post_recv_buffers_for_pipe(pipe_info);
2988 		if (scn->target_status == TARGET_STATUS_RESET)
2989 			qdf_nbuf_free(transfer_context);
2990 		else
2991 			hif_ce_do_recv(msg_callbacks, transfer_context,
2992 				nbytes, pipe_info);
2993 
2994 		/* Set up force_break flag if num of receices reaches
2995 		 * MAX_NUM_OF_RECEIVES
2996 		 */
2997 		ce_state->receive_count++;
2998 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2999 			ce_state->force_break = 1;
3000 			break;
3001 		}
3002 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
3003 					&CE_data, &nbytes, &transfer_id,
3004 					&flags) == QDF_STATUS_SUCCESS);
3005 
3006 }
3007 
3008 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
3009 
3010 void
3011 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
3012 	      struct hif_msg_callbacks *callbacks)
3013 {
3014 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3015 
3016 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3017 	spin_lock_init(&pcie_access_log_lock);
3018 #endif
3019 	/* Save callbacks for later installation */
3020 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
3021 		 sizeof(hif_state->msg_callbacks_pending));
3022 
3023 }
3024 
3025 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state,
3026 						 int pipe_num)
3027 {
3028 	struct CE_attr attr;
3029 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3030 	struct hif_msg_callbacks *hif_msg_callbacks =
3031 		&hif_state->msg_callbacks_current;
3032 	struct HIF_CE_pipe_info *pipe_info;
3033 	struct CE_state *ce_state;
3034 
3035 	if (pipe_num >= CE_COUNT_MAX)
3036 		return -EINVAL;
3037 
3038 	pipe_info = &hif_state->pipe_info[pipe_num];
3039 	ce_state = scn->ce_id_to_state[pipe_num];
3040 
3041 	if (!hif_msg_callbacks ||
3042 	    !hif_msg_callbacks->rxCompletionHandler ||
3043 	    !hif_msg_callbacks->txCompletionHandler) {
3044 		hif_err("%s: no completion handler registered", __func__);
3045 		return -EFAULT;
3046 	}
3047 
3048 	attr = hif_state->host_ce_config[pipe_num];
3049 	if (attr.src_nentries) {
3050 		/* pipe used to send to target */
3051 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
3052 			  __func__, pipe_num, pipe_info);
3053 		ce_send_cb_register(pipe_info->ce_hdl,
3054 				    hif_pci_ce_send_done, pipe_info,
3055 				    attr.flags & CE_ATTR_DISABLE_INTR);
3056 		pipe_info->num_sends_allowed = attr.src_nentries - 1;
3057 	}
3058 	if (attr.dest_nentries) {
3059 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
3060 			  __func__, pipe_num, pipe_info);
3061 		/* pipe used to receive from target */
3062 		ce_recv_cb_register(pipe_info->ce_hdl,
3063 				    hif_pci_ce_recv_data, pipe_info,
3064 				    attr.flags & CE_ATTR_DISABLE_INTR);
3065 	}
3066 
3067 	if (attr.src_nentries)
3068 		qdf_spinlock_create(&pipe_info->completion_freeq_lock);
3069 
3070 	if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND))
3071 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
3072 			     sizeof(pipe_info->pipe_callbacks));
3073 
3074 	return 0;
3075 }
3076 
3077 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
3078 {
3079 	struct CE_handle *ce_diag = hif_state->ce_diag;
3080 	int pipe_num, ret;
3081 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3082 
3083 	/* daemonize("hif_compl_thread"); */
3084 
3085 	if (scn->ce_count == 0) {
3086 		hif_err("ce_count is 0");
3087 		return -EINVAL;
3088 	}
3089 
3090 
3091 	A_TARGET_ACCESS_LIKELY(scn);
3092 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3093 		struct HIF_CE_pipe_info *pipe_info;
3094 
3095 		pipe_info = &hif_state->pipe_info[pipe_num];
3096 		if (pipe_info->ce_hdl == ce_diag)
3097 			continue;       /* Handle Diagnostic CE specially */
3098 
3099 		ret = hif_completion_thread_startup_by_ceid(hif_state,
3100 							    pipe_num);
3101 		if (ret < 0)
3102 			return ret;
3103 
3104 	}
3105 
3106 	A_TARGET_ACCESS_UNLIKELY(scn);
3107 	return 0;
3108 }
3109 
3110 /*
3111  * Install pending msg callbacks.
3112  *
3113  * TBDXXX: This hack is needed because upper layers install msg callbacks
3114  * for use with HTC before BMI is done; yet this HIF implementation
3115  * needs to continue to use BMI msg callbacks. Really, upper layers
3116  * should not register HTC callbacks until AFTER BMI phase.
3117  */
3118 static void hif_msg_callbacks_install(struct hif_softc *scn)
3119 {
3120 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3121 
3122 	qdf_mem_copy(&hif_state->msg_callbacks_current,
3123 		 &hif_state->msg_callbacks_pending,
3124 		 sizeof(hif_state->msg_callbacks_pending));
3125 }
3126 
3127 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
3128 							uint8_t *DLPipe)
3129 {
3130 	int ul_is_polled, dl_is_polled;
3131 
3132 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
3133 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
3134 }
3135 
3136 /**
3137  * hif_dump_pipe_debug_count() - Log error count
3138  * @scn: hif_softc pointer.
3139  *
3140  * Output the pipe error counts of each pipe to log file
3141  *
3142  * Return: N/A
3143  */
3144 void hif_dump_pipe_debug_count(struct hif_softc *scn)
3145 {
3146 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3147 	int pipe_num;
3148 
3149 	if (!hif_state) {
3150 		hif_err("hif_state is NULL");
3151 		return;
3152 	}
3153 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3154 		struct HIF_CE_pipe_info *pipe_info;
3155 
3156 	pipe_info = &hif_state->pipe_info[pipe_num];
3157 
3158 	if (pipe_info->nbuf_alloc_err_count > 0 ||
3159 			pipe_info->nbuf_dma_err_count > 0 ||
3160 			pipe_info->nbuf_ce_enqueue_err_count)
3161 		hif_err(
3162 			"pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
3163 			pipe_info->pipe_num,
3164 			atomic_read(&pipe_info->recv_bufs_needed),
3165 			pipe_info->nbuf_alloc_err_count,
3166 			pipe_info->nbuf_dma_err_count,
3167 			pipe_info->nbuf_ce_enqueue_err_count);
3168 	}
3169 }
3170 
3171 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
3172 					  void *nbuf, uint32_t *error_cnt,
3173 					  enum hif_ce_event_type failure_type,
3174 					  const char *failure_type_string)
3175 {
3176 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
3177 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
3178 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3179 	int ce_id = CE_state->id;
3180 	uint32_t error_cnt_tmp;
3181 
3182 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3183 	error_cnt_tmp = ++(*error_cnt);
3184 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3185 	hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s",
3186 		  pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
3187 		  failure_type_string);
3188 	hif_record_ce_desc_event(scn, ce_id, failure_type,
3189 				 NULL, nbuf, bufs_needed_tmp, 0);
3190 	/* if we fail to allocate the last buffer for an rx pipe,
3191 	 *	there is no trigger to refill the ce and we will
3192 	 *	eventually crash
3193 	 */
3194 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
3195 	    (ce_srng_based(scn) &&
3196 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
3197 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
3198 
3199 }
3200 
3201 
3202 
3203 
3204 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
3205 {
3206 	struct CE_handle *ce_hdl;
3207 	qdf_size_t buf_sz;
3208 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3209 	QDF_STATUS status;
3210 	uint32_t bufs_posted = 0;
3211 	unsigned int ce_id;
3212 
3213 	buf_sz = pipe_info->buf_sz;
3214 	if (buf_sz == 0) {
3215 		/* Unused Copy Engine */
3216 		return QDF_STATUS_SUCCESS;
3217 	}
3218 
3219 	ce_hdl = pipe_info->ce_hdl;
3220 	ce_id = ((struct CE_state *)ce_hdl)->id;
3221 
3222 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3223 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
3224 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
3225 		qdf_nbuf_t nbuf;
3226 
3227 		atomic_dec(&pipe_info->recv_bufs_needed);
3228 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3229 
3230 		hif_record_ce_desc_event(scn, ce_id,
3231 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
3232 					 0, 0);
3233 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
3234 		if (!nbuf) {
3235 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3236 					&pipe_info->nbuf_alloc_err_count,
3237 					 HIF_RX_NBUF_ALLOC_FAILURE,
3238 					"HIF_RX_NBUF_ALLOC_FAILURE");
3239 			return QDF_STATUS_E_NOMEM;
3240 		}
3241 
3242 		hif_record_ce_desc_event(scn, ce_id,
3243 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
3244 					 0, 0);
3245 		/*
3246 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
3247 		 * CE_data = dma_map_single(dev, data, buf_sz, );
3248 		 * DMA_FROM_DEVICE);
3249 		 */
3250 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
3251 					    QDF_DMA_FROM_DEVICE);
3252 
3253 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3254 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3255 					&pipe_info->nbuf_dma_err_count,
3256 					 HIF_RX_NBUF_MAP_FAILURE,
3257 					"HIF_RX_NBUF_MAP_FAILURE");
3258 			qdf_nbuf_free(nbuf);
3259 			return status;
3260 		}
3261 
3262 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
3263 		hif_record_ce_desc_event(scn, ce_id,
3264 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
3265 					 0, 0);
3266 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
3267 					       buf_sz, DMA_FROM_DEVICE);
3268 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
3269 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3270 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3271 					&pipe_info->nbuf_ce_enqueue_err_count,
3272 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
3273 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
3274 
3275 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
3276 						QDF_DMA_FROM_DEVICE);
3277 			qdf_nbuf_free(nbuf);
3278 			return status;
3279 		}
3280 
3281 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3282 		bufs_posted++;
3283 	}
3284 	pipe_info->nbuf_alloc_err_count =
3285 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
3286 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
3287 	pipe_info->nbuf_dma_err_count =
3288 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
3289 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
3290 	pipe_info->nbuf_ce_enqueue_err_count =
3291 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
3292 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
3293 
3294 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3295 
3296 	return QDF_STATUS_SUCCESS;
3297 }
3298 
3299 /*
3300  * Try to post all desired receive buffers for all pipes.
3301  * Returns 0 for non fastpath rx copy engine as
3302  * oom_allocation_work will be scheduled to recover any
3303  * failures, non-zero if unable to completely replenish
3304  * receive buffers for fastpath rx Copy engine.
3305  */
3306 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
3307 {
3308 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3309 	int pipe_num;
3310 	struct CE_state *ce_state = NULL;
3311 	QDF_STATUS qdf_status;
3312 
3313 	A_TARGET_ACCESS_LIKELY(scn);
3314 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3315 		struct HIF_CE_pipe_info *pipe_info;
3316 
3317 		ce_state = scn->ce_id_to_state[pipe_num];
3318 		pipe_info = &hif_state->pipe_info[pipe_num];
3319 
3320 		if (!ce_state)
3321 			continue;
3322 
3323 		/* Do not init dynamic CEs, during initial load */
3324 		if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)
3325 			continue;
3326 
3327 		if (hif_is_nss_wifi_enabled(scn) &&
3328 		    ce_state && (ce_state->htt_rx_data))
3329 			continue;
3330 
3331 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
3332 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
3333 			ce_state->htt_rx_data &&
3334 			scn->fastpath_mode_on) {
3335 			A_TARGET_ACCESS_UNLIKELY(scn);
3336 			return qdf_status;
3337 		}
3338 	}
3339 
3340 	A_TARGET_ACCESS_UNLIKELY(scn);
3341 
3342 	return QDF_STATUS_SUCCESS;
3343 }
3344 
3345 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
3346 {
3347 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3348 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3349 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
3350 
3351 	hif_update_fastpath_recv_bufs_cnt(scn);
3352 
3353 	hif_msg_callbacks_install(scn);
3354 
3355 	if (hif_completion_thread_startup(hif_state))
3356 		return QDF_STATUS_E_FAILURE;
3357 
3358 	/* enable buffer cleanup */
3359 	hif_state->started = true;
3360 
3361 	/* Post buffers once to start things off. */
3362 	qdf_status = hif_post_recv_buffers(scn);
3363 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3364 		/* cleanup is done in hif_ce_disable */
3365 		hif_err("Failed to post buffers");
3366 		return qdf_status;
3367 	}
3368 
3369 	return qdf_status;
3370 }
3371 
3372 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3373 {
3374 	struct hif_softc *scn;
3375 	struct CE_handle *ce_hdl;
3376 	uint32_t buf_sz;
3377 	struct HIF_CE_state *hif_state;
3378 	qdf_nbuf_t netbuf;
3379 	qdf_dma_addr_t CE_data;
3380 	void *per_CE_context;
3381 
3382 	buf_sz = pipe_info->buf_sz;
3383 	/* Unused Copy Engine */
3384 	if (buf_sz == 0)
3385 		return;
3386 
3387 
3388 	hif_state = pipe_info->HIF_CE_state;
3389 	if (!hif_state->started)
3390 		return;
3391 
3392 	scn = HIF_GET_SOFTC(hif_state);
3393 	ce_hdl = pipe_info->ce_hdl;
3394 
3395 	if (!scn->qdf_dev)
3396 		return;
3397 	while (ce_revoke_recv_next
3398 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
3399 			&CE_data) == QDF_STATUS_SUCCESS) {
3400 		if (netbuf) {
3401 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
3402 					      QDF_DMA_FROM_DEVICE);
3403 			qdf_nbuf_free(netbuf);
3404 		}
3405 	}
3406 }
3407 
3408 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3409 {
3410 	struct CE_handle *ce_hdl;
3411 	struct HIF_CE_state *hif_state;
3412 	struct hif_softc *scn;
3413 	qdf_nbuf_t netbuf;
3414 	void *per_CE_context;
3415 	qdf_dma_addr_t CE_data;
3416 	unsigned int nbytes;
3417 	unsigned int id;
3418 	uint32_t buf_sz;
3419 	uint32_t toeplitz_hash_result;
3420 
3421 	buf_sz = pipe_info->buf_sz;
3422 	if (buf_sz == 0) {
3423 		/* Unused Copy Engine */
3424 		return;
3425 	}
3426 
3427 	hif_state = pipe_info->HIF_CE_state;
3428 	if (!hif_state->started) {
3429 		return;
3430 	}
3431 
3432 	scn = HIF_GET_SOFTC(hif_state);
3433 
3434 	ce_hdl = pipe_info->ce_hdl;
3435 
3436 	while (ce_cancel_send_next
3437 		       (ce_hdl, &per_CE_context,
3438 		       (void **)&netbuf, &CE_data, &nbytes,
3439 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
3440 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
3441 			/*
3442 			 * Packets enqueued by htt_h2t_ver_req_msg() and
3443 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
3444 			 * freed in htt_htc_misc_pkt_pool_free() in
3445 			 * wlantl_close(), so do not free them here again
3446 			 * by checking whether it's the endpoint
3447 			 * which they are queued in.
3448 			 */
3449 			if (id == scn->htc_htt_tx_endpoint)
3450 				return;
3451 			/* Indicate the completion to higher
3452 			 * layer to free the buffer
3453 			 */
3454 			if (pipe_info->pipe_callbacks.txCompletionHandler)
3455 				pipe_info->pipe_callbacks.
3456 				    txCompletionHandler(pipe_info->
3457 					    pipe_callbacks.Context,
3458 					    netbuf, id, toeplitz_hash_result);
3459 		}
3460 	}
3461 }
3462 
3463 /*
3464  * Cleanup residual buffers for device shutdown:
3465  *    buffers that were enqueued for receive
3466  *    buffers that were to be sent
3467  * Note: Buffers that had completed but which were
3468  * not yet processed are on a completion queue. They
3469  * are handled when the completion thread shuts down.
3470  */
3471 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
3472 {
3473 	int pipe_num;
3474 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3475 	struct CE_state *ce_state;
3476 
3477 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3478 		struct HIF_CE_pipe_info *pipe_info;
3479 
3480 		ce_state = scn->ce_id_to_state[pipe_num];
3481 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3482 				((ce_state->htt_tx_data) ||
3483 				 (ce_state->htt_rx_data))) {
3484 			continue;
3485 		}
3486 
3487 		pipe_info = &hif_state->pipe_info[pipe_num];
3488 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
3489 		hif_send_buffer_cleanup_on_pipe(pipe_info);
3490 	}
3491 }
3492 
3493 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
3494 {
3495 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3496 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3497 
3498 	hif_buffer_cleanup(hif_state);
3499 }
3500 
3501 static void hif_destroy_oom_work(struct hif_softc *scn)
3502 {
3503 	struct CE_state *ce_state;
3504 	int ce_id;
3505 
3506 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3507 		ce_state = scn->ce_id_to_state[ce_id];
3508 		if (ce_state)
3509 			qdf_destroy_work(scn->qdf_dev,
3510 					 &ce_state->oom_allocation_work);
3511 	}
3512 }
3513 
3514 void hif_ce_stop(struct hif_softc *scn)
3515 {
3516 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3517 	int pipe_num;
3518 
3519 	/*
3520 	 * before cleaning up any memory, ensure irq &
3521 	 * bottom half contexts will not be re-entered
3522 	 */
3523 	hif_disable_isr(&scn->osc);
3524 	hif_destroy_oom_work(scn);
3525 	scn->hif_init_done = false;
3526 
3527 	/*
3528 	 * At this point, asynchronous threads are stopped,
3529 	 * The Target should not DMA nor interrupt, Host code may
3530 	 * not initiate anything more.  So we just need to clean
3531 	 * up Host-side state.
3532 	 */
3533 
3534 	if (scn->athdiag_procfs_inited) {
3535 		athdiag_procfs_remove();
3536 		scn->athdiag_procfs_inited = false;
3537 	}
3538 
3539 	hif_buffer_cleanup(hif_state);
3540 
3541 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3542 		struct HIF_CE_pipe_info *pipe_info;
3543 		struct CE_attr attr;
3544 		struct CE_handle *ce_diag = hif_state->ce_diag;
3545 
3546 		pipe_info = &hif_state->pipe_info[pipe_num];
3547 		if (pipe_info->ce_hdl) {
3548 			if (pipe_info->ce_hdl != ce_diag &&
3549 			    hif_state->started) {
3550 				attr = hif_state->host_ce_config[pipe_num];
3551 				if (attr.src_nentries)
3552 					qdf_spinlock_destroy(&pipe_info->
3553 							completion_freeq_lock);
3554 			}
3555 			ce_fini(pipe_info->ce_hdl);
3556 			pipe_info->ce_hdl = NULL;
3557 			pipe_info->buf_sz = 0;
3558 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3559 		}
3560 	}
3561 
3562 	if (hif_state->sleep_timer_init) {
3563 		qdf_timer_stop(&hif_state->sleep_timer);
3564 		qdf_timer_free(&hif_state->sleep_timer);
3565 		hif_state->sleep_timer_init = false;
3566 	}
3567 
3568 	hif_state->started = false;
3569 }
3570 
3571 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
3572 				   struct shadow_reg_cfg
3573 				   **target_shadow_reg_cfg_ret,
3574 				   uint32_t *shadow_cfg_sz_ret)
3575 {
3576 	if (target_shadow_reg_cfg_ret)
3577 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
3578 	if (shadow_cfg_sz_ret)
3579 		*shadow_cfg_sz_ret = shadow_cfg_sz;
3580 }
3581 
3582 /**
3583  * hif_get_target_ce_config() - get copy engine configuration
3584  * @target_ce_config_ret: basic copy engine configuration
3585  * @target_ce_config_sz_ret: size of the basic configuration in bytes
3586  * @target_service_to_ce_map_ret: service mapping for the copy engines
3587  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
3588  * @target_shadow_reg_cfg_ret: shadow register configuration
3589  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
3590  *
3591  * providing accessor to these values outside of this file.
3592  * currently these are stored in static pointers to const sections.
3593  * there are multiple configurations that are selected from at compile time.
3594  * Runtime selection would need to consider mode, target type and bus type.
3595  *
3596  * Return: return by parameter.
3597  */
3598 void hif_get_target_ce_config(struct hif_softc *scn,
3599 		struct CE_pipe_config **target_ce_config_ret,
3600 		uint32_t *target_ce_config_sz_ret,
3601 		struct service_to_pipe **target_service_to_ce_map_ret,
3602 		uint32_t *target_service_to_ce_map_sz_ret,
3603 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
3604 		uint32_t *shadow_cfg_sz_ret)
3605 {
3606 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3607 
3608 	*target_ce_config_ret = hif_state->target_ce_config;
3609 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
3610 
3611 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
3612 				       target_service_to_ce_map_sz_ret);
3613 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
3614 			       shadow_cfg_sz_ret);
3615 }
3616 
3617 #ifdef CONFIG_SHADOW_V3
3618 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3619 {
3620 	int i;
3621 
3622 	hif_err("v3: num_config %d", cfg->num_shadow_reg_v3_cfg);
3623 
3624 	for (i = 0; i < cfg->num_shadow_reg_v3_cfg; i++) {
3625 		hif_err("i %d, val %x", i, cfg->shadow_reg_v3_cfg[i].addr);
3626 	}
3627 }
3628 
3629 #elif defined(CONFIG_SHADOW_V2)
3630 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3631 {
3632 	int i;
3633 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3634 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
3635 
3636 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
3637 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3638 		     "%s: i %d, val %x", __func__, i,
3639 		     cfg->shadow_reg_v2_cfg[i].addr);
3640 	}
3641 }
3642 
3643 #else
3644 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3645 {
3646 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3647 		  "%s: CONFIG_SHADOW V2/V3 not defined", __func__);
3648 }
3649 #endif
3650 
3651 #ifdef ADRASTEA_RRI_ON_DDR
3652 /**
3653  * hif_get_src_ring_read_index(): Called to get the SRRI
3654  *
3655  * @scn: hif_softc pointer
3656  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3657  *
3658  * This function returns the SRRI to the caller. For CEs that
3659  * dont have interrupts enabled, we look at the DDR based SRRI
3660  *
3661  * Return: SRRI
3662  */
3663 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3664 		uint32_t CE_ctrl_addr)
3665 {
3666 	struct CE_attr attr;
3667 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3668 
3669 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3670 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3671 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3672 	} else {
3673 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3674 			return A_TARGET_READ(scn,
3675 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3676 		else
3677 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3678 					CE_ctrl_addr);
3679 	}
3680 }
3681 
3682 /**
3683  * hif_get_dst_ring_read_index(): Called to get the DRRI
3684  *
3685  * @scn: hif_softc pointer
3686  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3687  *
3688  * This function returns the DRRI to the caller. For CEs that
3689  * dont have interrupts enabled, we look at the DDR based DRRI
3690  *
3691  * Return: DRRI
3692  */
3693 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3694 		uint32_t CE_ctrl_addr)
3695 {
3696 	struct CE_attr attr;
3697 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3698 
3699 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3700 
3701 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3702 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3703 	} else {
3704 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3705 			return A_TARGET_READ(scn,
3706 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3707 		else
3708 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3709 					CE_ctrl_addr);
3710 	}
3711 }
3712 
3713 /**
3714  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
3715  * @scn: hif_softc pointer
3716  *
3717  * Return: qdf status
3718  */
3719 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
3720 {
3721 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
3722 
3723 	scn->vaddr_rri_on_ddr =
3724 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3725 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
3726 		&paddr_rri_on_ddr);
3727 
3728 	if (!scn->vaddr_rri_on_ddr) {
3729 		hif_err("dmaable page alloc fail");
3730 		return QDF_STATUS_E_NOMEM;
3731 	}
3732 
3733 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3734 
3735 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
3736 
3737 	return QDF_STATUS_SUCCESS;
3738 }
3739 #endif
3740 
3741 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
3742 /**
3743  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3744  *
3745  * @scn: hif_softc pointer
3746  *
3747  * This function allocates non cached memory on ddr and sends
3748  * the physical address of this memory to the CE hardware. The
3749  * hardware updates the RRI on this particular location.
3750  *
3751  * Return: None
3752  */
3753 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3754 {
3755 	unsigned int i;
3756 	uint32_t high_paddr, low_paddr;
3757 
3758 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3759 		return;
3760 
3761 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
3762 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
3763 
3764 	hif_debug("using srri and drri from DDR");
3765 
3766 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3767 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3768 
3769 	for (i = 0; i < CE_COUNT; i++)
3770 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3771 }
3772 #else
3773 /**
3774  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3775  *
3776  * @scn: hif_softc pointer
3777  *
3778  * This is a dummy implementation for platforms that don't
3779  * support this functionality.
3780  *
3781  * Return: None
3782  */
3783 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3784 {
3785 }
3786 #endif
3787 
3788 /**
3789  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
3790  *                                    QMI command
3791  * @scn: hif context
3792  * @cfg: wlan enable config
3793  *
3794  * In case of Genoa, rri_over_ddr memory configuration is passed
3795  * to firmware through QMI configure command.
3796  */
3797 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3798 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3799 					   struct pld_wlan_enable_cfg *cfg)
3800 {
3801 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3802 		return;
3803 
3804 	cfg->rri_over_ddr_cfg_valid = true;
3805 	cfg->rri_over_ddr_cfg.base_addr_low =
3806 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3807 	cfg->rri_over_ddr_cfg.base_addr_high =
3808 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3809 }
3810 #else
3811 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3812 					   struct pld_wlan_enable_cfg *cfg)
3813 {
3814 }
3815 #endif
3816 
3817 /**
3818  * hif_wlan_enable(): call the platform driver to enable wlan
3819  * @scn: HIF Context
3820  *
3821  * This function passes the con_mode and CE configuration to
3822  * platform driver to enable wlan.
3823  *
3824  * Return: linux error code
3825  */
3826 int hif_wlan_enable(struct hif_softc *scn)
3827 {
3828 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3829 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3830 	struct pld_wlan_enable_cfg cfg = { 0 };
3831 	enum pld_driver_mode mode;
3832 	uint32_t con_mode = hif_get_conparam(scn);
3833 
3834 	hif_get_target_ce_config(scn,
3835 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3836 			&cfg.num_ce_tgt_cfg,
3837 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3838 			&cfg.num_ce_svc_pipe_cfg,
3839 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3840 			&cfg.num_shadow_reg_cfg);
3841 
3842 	/* translate from structure size to array size */
3843 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3844 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3845 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3846 
3847 	switch (tgt_info->target_type) {
3848 	case TARGET_TYPE_KIWI:
3849 	case TARGET_TYPE_MANGO:
3850 		hif_prepare_hal_shadow_reg_cfg_v3(scn, &cfg);
3851 		break;
3852 	default:
3853 		hif_prepare_hal_shadow_register_cfg(scn,
3854 						    &cfg.shadow_reg_v2_cfg,
3855 						    &cfg.num_shadow_reg_v2_cfg);
3856 		break;
3857 	}
3858 
3859 	hif_print_hal_shadow_register_cfg(&cfg);
3860 
3861 	hif_update_rri_over_ddr_config(scn, &cfg);
3862 
3863 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3864 		mode = PLD_FTM;
3865 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3866 		mode = PLD_COLDBOOT_CALIBRATION;
3867 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3868 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3869 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3870 		mode = PLD_EPPING;
3871 	else
3872 		mode = PLD_MISSION;
3873 
3874 	if (BYPASS_QMI)
3875 		return 0;
3876 	else
3877 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3878 }
3879 
3880 #ifdef WLAN_FEATURE_EPPING
3881 
3882 #define CE_EPPING_USES_IRQ true
3883 
3884 void hif_ce_prepare_epping_config(struct hif_softc *scn,
3885 				  struct HIF_CE_state *hif_state)
3886 {
3887 	if (CE_EPPING_USES_IRQ)
3888 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3889 	else
3890 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3891 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3892 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3893 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3894 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3895 	scn->ce_count = EPPING_HOST_CE_COUNT;
3896 }
3897 #endif
3898 
3899 #ifdef QCN7605_SUPPORT
3900 static inline
3901 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3902 			       struct HIF_CE_state *hif_state)
3903 {
3904 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3905 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3906 	hif_state->target_ce_config_sz =
3907 				 sizeof(target_ce_config_wlan_qcn7605);
3908 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3909 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3910 	scn->ce_count = QCN7605_CE_COUNT;
3911 }
3912 #else
3913 static inline
3914 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3915 			       struct HIF_CE_state *hif_state)
3916 {
3917 	hif_err("QCN7605 not supported");
3918 }
3919 #endif
3920 
3921 #ifdef CE_SVC_CMN_INIT
3922 #ifdef QCA_WIFI_SUPPORT_SRNG
3923 static inline void hif_ce_service_init(void)
3924 {
3925 	ce_service_srng_init();
3926 }
3927 #else
3928 static inline void hif_ce_service_init(void)
3929 {
3930 	ce_service_legacy_init();
3931 }
3932 #endif
3933 #else
3934 static inline void hif_ce_service_init(void)
3935 {
3936 }
3937 #endif
3938 
3939 
3940 /**
3941  * hif_ce_prepare_config() - load the correct static tables.
3942  * @scn: hif context
3943  *
3944  * Epping uses different static attribute tables than mission mode.
3945  */
3946 void hif_ce_prepare_config(struct hif_softc *scn)
3947 {
3948 	uint32_t mode = hif_get_conparam(scn);
3949 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3950 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3951 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3952 	int ret;
3953 	int msi_data_count = 0;
3954 	int msi_data_start = 0;
3955 	int msi_irq_start = 0;
3956 
3957 	hif_ce_service_init();
3958 	hif_state->ce_services = ce_services_attach(scn);
3959 
3960 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3961 					  &msi_data_count, &msi_data_start,
3962 					  &msi_irq_start);
3963 
3964 	scn->ce_count = HOST_CE_COUNT;
3965 	scn->int_assignment = &ce_int_context[msi_data_count];
3966 	scn->free_irq_done = false;
3967 	/* if epping is enabled we need to use the epping configuration. */
3968 	if (QDF_IS_EPPING_ENABLED(mode)) {
3969 		hif_ce_prepare_epping_config(scn, hif_state);
3970 		return;
3971 	}
3972 
3973 	switch (tgt_info->target_type) {
3974 	default:
3975 		hif_state->host_ce_config = host_ce_config_wlan;
3976 		hif_state->target_ce_config = target_ce_config_wlan;
3977 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3978 		break;
3979 	case TARGET_TYPE_QCN7605:
3980 		hif_set_ce_config_qcn7605(scn, hif_state);
3981 		break;
3982 	case TARGET_TYPE_AR900B:
3983 	case TARGET_TYPE_QCA9984:
3984 	case TARGET_TYPE_QCA9888:
3985 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3986 			hif_state->host_ce_config =
3987 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3988 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3989 			hif_state->host_ce_config =
3990 				host_lowdesc_ce_cfg_wlan_ar900b;
3991 		} else {
3992 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3993 		}
3994 
3995 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3996 		hif_state->target_ce_config_sz =
3997 				sizeof(target_ce_config_wlan_ar900b);
3998 
3999 		break;
4000 
4001 	case TARGET_TYPE_AR9888:
4002 	case TARGET_TYPE_AR9888V2:
4003 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4004 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
4005 		} else {
4006 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
4007 		}
4008 
4009 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
4010 		hif_state->target_ce_config_sz =
4011 					sizeof(target_ce_config_wlan_ar9888);
4012 
4013 		break;
4014 
4015 	case TARGET_TYPE_QCA8074:
4016 	case TARGET_TYPE_QCA8074V2:
4017 	case TARGET_TYPE_QCA6018:
4018 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
4019 			hif_state->host_ce_config =
4020 					host_ce_config_wlan_qca8074_pci;
4021 			hif_state->target_ce_config =
4022 				target_ce_config_wlan_qca8074_pci;
4023 			hif_state->target_ce_config_sz =
4024 				sizeof(target_ce_config_wlan_qca8074_pci);
4025 		} else {
4026 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
4027 			hif_state->target_ce_config =
4028 					target_ce_config_wlan_qca8074;
4029 			hif_state->target_ce_config_sz =
4030 				sizeof(target_ce_config_wlan_qca8074);
4031 		}
4032 		break;
4033 	case TARGET_TYPE_QCA6290:
4034 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
4035 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
4036 		hif_state->target_ce_config_sz =
4037 					sizeof(target_ce_config_wlan_qca6290);
4038 
4039 		scn->ce_count = QCA_6290_CE_COUNT;
4040 		break;
4041 	case TARGET_TYPE_QCN9000:
4042 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
4043 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
4044 		hif_state->target_ce_config_sz =
4045 					sizeof(target_ce_config_wlan_qcn9000);
4046 		scn->ce_count = QCN_9000_CE_COUNT;
4047 		scn->disable_wake_irq = 1;
4048 		break;
4049 	case TARGET_TYPE_QCN9224:
4050 		hif_set_ce_config_qcn9224(scn, hif_state);
4051 		break;
4052 	case TARGET_TYPE_QCN6122:
4053 		hif_state->host_ce_config = host_ce_config_wlan_qcn6122;
4054 		hif_state->target_ce_config = target_ce_config_wlan_qcn6122;
4055 		hif_state->target_ce_config_sz =
4056 					sizeof(target_ce_config_wlan_qcn6122);
4057 		scn->ce_count = QCN_6122_CE_COUNT;
4058 		scn->disable_wake_irq = 1;
4059 		break;
4060 	case TARGET_TYPE_QCA5018:
4061 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
4062 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
4063 		hif_state->target_ce_config_sz =
4064 					sizeof(target_ce_config_wlan_qca5018);
4065 		scn->ce_count = QCA_5018_CE_COUNT;
4066 		break;
4067 	case TARGET_TYPE_QCA9574:
4068 		hif_state->host_ce_config = host_ce_config_wlan_qca9574;
4069 		hif_state->target_ce_config = target_ce_config_wlan_qca9574;
4070 		hif_state->target_ce_config_sz =
4071 					sizeof(target_ce_config_wlan_qca9574);
4072 		break;
4073 	case TARGET_TYPE_QCA6390:
4074 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
4075 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
4076 		hif_state->target_ce_config_sz =
4077 					sizeof(target_ce_config_wlan_qca6390);
4078 
4079 		scn->ce_count = QCA_6390_CE_COUNT;
4080 		break;
4081 	case TARGET_TYPE_QCA6490:
4082 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
4083 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
4084 		hif_state->target_ce_config_sz =
4085 					sizeof(target_ce_config_wlan_qca6490);
4086 
4087 		scn->ce_count = QCA_6490_CE_COUNT;
4088 		break;
4089 	case TARGET_TYPE_QCA6750:
4090 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
4091 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
4092 		hif_state->target_ce_config_sz =
4093 					sizeof(target_ce_config_wlan_qca6750);
4094 
4095 		scn->ce_count = QCA_6750_CE_COUNT;
4096 		break;
4097 	case TARGET_TYPE_KIWI:
4098 	case TARGET_TYPE_MANGO:
4099 		hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4100 		hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4101 		hif_state->target_ce_config_sz =
4102 					sizeof(target_ce_config_wlan_kiwi);
4103 		scn->ce_count = KIWI_CE_COUNT;
4104 		break;
4105 	case TARGET_TYPE_ADRASTEA:
4106 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4107 			hif_state->host_ce_config =
4108 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
4109 			hif_state->target_ce_config =
4110 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
4111 			hif_state->target_ce_config_sz =
4112 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
4113 		} else {
4114 			hif_state->host_ce_config =
4115 				host_ce_config_wlan_adrastea;
4116 			hif_state->target_ce_config =
4117 					target_ce_config_wlan_adrastea;
4118 			hif_state->target_ce_config_sz =
4119 					sizeof(target_ce_config_wlan_adrastea);
4120 		}
4121 		break;
4122 
4123 	}
4124 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
4125 }
4126 
4127 /**
4128  * hif_ce_open() - do ce specific allocations
4129  * @hif_sc: pointer to hif context
4130  *
4131  * return: 0 for success or QDF_STATUS_E_NOMEM
4132  */
4133 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
4134 {
4135 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4136 
4137 	qdf_spinlock_create(&hif_state->irq_reg_lock);
4138 	qdf_spinlock_create(&hif_state->keep_awake_lock);
4139 	return QDF_STATUS_SUCCESS;
4140 }
4141 
4142 /**
4143  * hif_ce_close() - do ce specific free
4144  * @hif_sc: pointer to hif context
4145  */
4146 void hif_ce_close(struct hif_softc *hif_sc)
4147 {
4148 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4149 
4150 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
4151 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
4152 }
4153 
4154 /**
4155  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
4156  * @hif_sc: hif context
4157  *
4158  * uses state variables to support cleaning up when hif_config_ce fails.
4159  */
4160 void hif_unconfig_ce(struct hif_softc *hif_sc)
4161 {
4162 	int pipe_num;
4163 	struct HIF_CE_pipe_info *pipe_info;
4164 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4165 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
4166 
4167 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4168 		pipe_info = &hif_state->pipe_info[pipe_num];
4169 		if (pipe_info->ce_hdl) {
4170 			ce_unregister_irq(hif_state, (1 << pipe_num));
4171 		}
4172 	}
4173 	deinit_tasklet_workers(hif_hdl);
4174 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4175 		pipe_info = &hif_state->pipe_info[pipe_num];
4176 		if (pipe_info->ce_hdl) {
4177 			ce_fini(pipe_info->ce_hdl);
4178 			pipe_info->ce_hdl = NULL;
4179 			pipe_info->buf_sz = 0;
4180 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
4181 		}
4182 	}
4183 	if (hif_sc->athdiag_procfs_inited) {
4184 		athdiag_procfs_remove();
4185 		hif_sc->athdiag_procfs_inited = false;
4186 	}
4187 }
4188 
4189 #ifdef CONFIG_BYPASS_QMI
4190 #ifdef QCN7605_SUPPORT
4191 /**
4192  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4193  * @scn: pointer to HIF structure
4194  *
4195  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4196  *
4197  * Return: void
4198  */
4199 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4200 {
4201 	phys_addr_t target_pa;
4202 	struct ce_info *ce_info_ptr;
4203 	uint32_t msi_data_start;
4204 	uint32_t msi_data_count;
4205 	uint32_t msi_irq_start;
4206 	uint32_t i = 0;
4207 	int ret;
4208 
4209 	scn->vaddr_qmi_bypass =
4210 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4211 							     scn->qdf_dev->dev,
4212 							     FW_SHARED_MEM,
4213 							     &target_pa);
4214 	if (!scn->vaddr_qmi_bypass) {
4215 		hif_err("Memory allocation failed could not post target buf");
4216 		return;
4217 	}
4218 
4219 	scn->paddr_qmi_bypass = target_pa;
4220 
4221 	ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass;
4222 
4223 	if (scn->vaddr_rri_on_ddr) {
4224 		ce_info_ptr->rri_over_ddr_low_paddr  =
4225 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
4226 		ce_info_ptr->rri_over_ddr_high_paddr =
4227 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
4228 	}
4229 
4230 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4231 					  &msi_data_count, &msi_data_start,
4232 					  &msi_irq_start);
4233 	if (ret) {
4234 		hif_err("Failed to get CE msi config");
4235 		return;
4236 	}
4237 
4238 	for (i = 0; i < CE_COUNT_MAX; i++) {
4239 		ce_info_ptr->cfg[i].ce_id = i;
4240 		ce_info_ptr->cfg[i].msi_vector =
4241 			 (i % msi_data_count) + msi_irq_start;
4242 	}
4243 
4244 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4245 	hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass,
4246 		 &target_pa);
4247 }
4248 
4249 /**
4250  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4251  * @scn: pointer to HIF structure
4252  *
4253  *
4254  * Return: void
4255  */
4256 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4257 {
4258 	void *target_va = scn->vaddr_qmi_bypass;
4259 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4260 
4261 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4262 				FW_SHARED_MEM, target_va,
4263 				target_pa, 0);
4264 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4265 }
4266 #else
4267 /**
4268  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4269  * @scn: pointer to HIF structure
4270  *
4271  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4272  *
4273  * Return: void
4274  */
4275 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4276 {
4277 	qdf_dma_addr_t target_pa;
4278 
4279 	scn->vaddr_qmi_bypass =
4280 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4281 							     scn->qdf_dev->dev,
4282 							     FW_SHARED_MEM,
4283 							     &target_pa);
4284 	if (!scn->vaddr_qmi_bypass) {
4285 		hif_err("Memory allocation failed could not post target buf");
4286 		return;
4287 	}
4288 
4289 	scn->paddr_qmi_bypass = target_pa;
4290 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4291 }
4292 
4293 /**
4294  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4295  * @scn: pointer to HIF structure
4296  *
4297  *
4298  * Return: void
4299  */
4300 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4301 {
4302 	void *target_va = scn->vaddr_qmi_bypass;
4303 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4304 
4305 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4306 				FW_SHARED_MEM, target_va,
4307 				target_pa, 0);
4308 	hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4309 }
4310 #endif
4311 
4312 #else
4313 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
4314 {
4315 }
4316 
4317 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4318 {
4319 }
4320 #endif
4321 
4322 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
4323 				bool wait_for_it)
4324 {
4325 	/* todo */
4326 	return 0;
4327 }
4328 
4329 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num)
4330 {
4331 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4332 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4333 	struct HIF_CE_pipe_info *pipe_info;
4334 	struct CE_state *ce_state = NULL;
4335 	struct CE_attr *attr;
4336 	int rv = 0;
4337 
4338 	if (pipe_num >= CE_COUNT_MAX)
4339 		return -EINVAL;
4340 
4341 	pipe_info = &hif_state->pipe_info[pipe_num];
4342 	pipe_info->pipe_num = pipe_num;
4343 	pipe_info->HIF_CE_state = hif_state;
4344 	attr = &hif_state->host_ce_config[pipe_num];
4345 	ce_state = scn->ce_id_to_state[pipe_num];
4346 
4347 	if (ce_state) {
4348 		/* Do not reinitialize the CE if its done already */
4349 		rv = QDF_STATUS_E_BUSY;
4350 		goto err;
4351 	}
4352 
4353 	pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
4354 	ce_state = scn->ce_id_to_state[pipe_num];
4355 	if (!ce_state) {
4356 		A_TARGET_ACCESS_UNLIKELY(scn);
4357 		rv = QDF_STATUS_E_FAILURE;
4358 		goto err;
4359 	}
4360 	qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
4361 	QDF_ASSERT(pipe_info->ce_hdl);
4362 	if (!pipe_info->ce_hdl) {
4363 		rv = QDF_STATUS_E_FAILURE;
4364 		A_TARGET_ACCESS_UNLIKELY(scn);
4365 		goto err;
4366 	}
4367 
4368 	ce_state->lro_data = qdf_lro_init();
4369 
4370 	if (attr->flags & CE_ATTR_DIAG) {
4371 		/* Reserve the ultimate CE for
4372 		 * Diagnostic Window support
4373 		 */
4374 		hif_state->ce_diag = pipe_info->ce_hdl;
4375 		goto skip;
4376 	}
4377 
4378 	if (hif_is_nss_wifi_enabled(scn) && ce_state &&
4379 	    (ce_state->htt_rx_data)) {
4380 		goto skip;
4381 	}
4382 
4383 	pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max);
4384 	if (attr->dest_nentries > 0) {
4385 		atomic_set(&pipe_info->recv_bufs_needed,
4386 			   init_buffer_count(attr->dest_nentries - 1));
4387 		/*SRNG based CE has one entry less */
4388 		if (ce_srng_based(scn))
4389 			atomic_dec(&pipe_info->recv_bufs_needed);
4390 	} else {
4391 		atomic_set(&pipe_info->recv_bufs_needed, 0);
4392 	}
4393 	ce_tasklet_init(hif_state, (1 << pipe_num));
4394 	ce_register_irq(hif_state, (1 << pipe_num));
4395 
4396 	init_tasklet_worker_by_ceid(hif_hdl, pipe_num);
4397 skip:
4398 	return 0;
4399 err:
4400 	return rv;
4401 }
4402 
4403 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
4404 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
4405 {
4406 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
4407 	uint8_t ce_id, hist_idx = 0;
4408 
4409 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
4410 		if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & (1 << ce_id))
4411 			ce_hist->ce_id_hist_map[ce_id] = hist_idx++;
4412 		else
4413 			ce_hist->ce_id_hist_map[ce_id] = -1;
4414 	}
4415 }
4416 #else
4417 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
4418 {
4419 }
4420 #endif
4421 
4422 /**
4423  * hif_config_ce() - configure copy engines
4424  * @scn: hif context
4425  *
4426  * Prepares fw, copy engine hardware and host sw according
4427  * to the attributes selected by hif_ce_prepare_config.
4428  *
4429  * also calls athdiag_procfs_init
4430  *
4431  * return: 0 for success nonzero for failure.
4432  */
4433 int hif_config_ce(struct hif_softc *scn)
4434 {
4435 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4436 	struct HIF_CE_pipe_info *pipe_info;
4437 	int pipe_num;
4438 
4439 #ifdef ADRASTEA_SHADOW_REGISTERS
4440 	int i;
4441 #endif
4442 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
4443 
4444 	scn->notice_send = true;
4445 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
4446 
4447 	hif_post_static_buf_to_target(scn);
4448 
4449 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
4450 
4451 	hif_config_rri_on_ddr(scn);
4452 
4453 	if (ce_srng_based(scn))
4454 		scn->bus_ops.hif_target_sleep_state_adjust =
4455 			&hif_srng_sleep_state_adjust;
4456 
4457 	/* Initialise the CE debug history sysfs interface inputs ce_id and
4458 	 * index. Disable data storing
4459 	 */
4460 	reset_ce_debug_history(scn);
4461 	hif_gen_ce_id_history_idx_mapping(scn);
4462 
4463 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4464 		struct CE_attr *attr;
4465 
4466 		pipe_info = &hif_state->pipe_info[pipe_num];
4467 		attr = &hif_state->host_ce_config[pipe_num];
4468 
4469 		if (attr->flags & CE_ATTR_INIT_ON_DEMAND)
4470 			continue;
4471 
4472 		if (hif_config_ce_by_id(scn, pipe_num))
4473 			goto err;
4474 	}
4475 
4476 	if (athdiag_procfs_init(scn) != 0) {
4477 		A_TARGET_ACCESS_UNLIKELY(scn);
4478 		goto err;
4479 	}
4480 	scn->athdiag_procfs_inited = true;
4481 
4482 	hif_debug("ce_init done");
4483 	hif_debug("%s: X, ret = %d", __func__, rv);
4484 
4485 #ifdef ADRASTEA_SHADOW_REGISTERS
4486 	hif_debug("Using Shadow Registers instead of CE Registers");
4487 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
4488 		hif_debug("Shadow Register%d is mapped to address %x",
4489 			  i,
4490 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
4491 	}
4492 #endif
4493 
4494 	return rv != QDF_STATUS_SUCCESS;
4495 err:
4496 	/* Failure, so clean up */
4497 	hif_unconfig_ce(scn);
4498 	hif_info("X, ret = %d", rv);
4499 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4500 }
4501 
4502 /**
4503  * hif_config_ce_pktlog() - configure copy engines
4504  * @scn: hif context
4505  *
4506  * Prepares fw, copy engine hardware and host sw according
4507  * to the attributes selected by hif_ce_prepare_config.
4508  *
4509  * also calls athdiag_procfs_init
4510  *
4511  * return: 0 for success nonzero for failure.
4512  */
4513 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl)
4514 {
4515 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4516 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4517 	int pipe_num;
4518 	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
4519 	struct HIF_CE_pipe_info *pipe_info;
4520 
4521 	if (!scn)
4522 		goto err;
4523 
4524 	if (scn->pktlog_init)
4525 		return QDF_STATUS_SUCCESS;
4526 
4527 	pipe_num =  hif_get_pktlog_ce_num(scn);
4528 	if (pipe_num < 0) {
4529 		qdf_status = QDF_STATUS_E_FAILURE;
4530 		goto err;
4531 	}
4532 
4533 	pipe_info = &hif_state->pipe_info[pipe_num];
4534 
4535 	qdf_status = hif_config_ce_by_id(scn, pipe_num);
4536 	/* CE Already initialized. Do not try to reinitialized again */
4537 	if (qdf_status == QDF_STATUS_E_BUSY)
4538 		return QDF_STATUS_SUCCESS;
4539 
4540 	qdf_status = hif_config_irq_by_ceid(scn, pipe_num);
4541 	if (qdf_status < 0)
4542 		goto err;
4543 
4544 	qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num);
4545 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4546 		hif_err("%s:failed to start hif thread", __func__);
4547 		goto err;
4548 	}
4549 
4550 	/* Post buffers for pktlog copy engine. */
4551 	qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
4552 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4553 		/* cleanup is done in hif_ce_disable */
4554 		hif_err("%s:failed to post buffers", __func__);
4555 		return qdf_status;
4556 	}
4557 	scn->pktlog_init = true;
4558 	return qdf_status != QDF_STATUS_SUCCESS;
4559 
4560 err:
4561 	hif_debug("%s: X, ret = %d", __func__, qdf_status);
4562 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4563 }
4564 
4565 #ifdef IPA_OFFLOAD
4566 /**
4567  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
4568  * @scn: bus context
4569  * @ce_sr_base_paddr: copyengine source ring base physical address
4570  * @ce_sr_ring_size: copyengine source ring size
4571  * @ce_reg_paddr: copyengine register physical address
4572  *
4573  * IPA micro controller data path offload feature enabled,
4574  * HIF should release copy engine related resource information to IPA UC
4575  * IPA UC will access hardware resource with released information
4576  *
4577  * Return: None
4578  */
4579 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
4580 			     qdf_shared_mem_t **ce_sr,
4581 			     uint32_t *ce_sr_ring_size,
4582 			     qdf_dma_addr_t *ce_reg_paddr)
4583 {
4584 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4585 	struct HIF_CE_pipe_info *pipe_info =
4586 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
4587 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4588 
4589 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
4590 			    ce_reg_paddr);
4591 }
4592 #endif /* IPA_OFFLOAD */
4593 
4594 
4595 #ifdef ADRASTEA_SHADOW_REGISTERS
4596 
4597 /*
4598  * Current shadow register config
4599  *
4600  * -----------------------------------------------------------
4601  * Shadow Register      |     CE   |    src/dst write index
4602  * -----------------------------------------------------------
4603  *         0            |     0    |           src
4604  *         1     No Config - Doesn't point to anything
4605  *         2     No Config - Doesn't point to anything
4606  *         3            |     3    |           src
4607  *         4            |     4    |           src
4608  *         5            |     5    |           src
4609  *         6     No Config - Doesn't point to anything
4610  *         7            |     7    |           src
4611  *         8     No Config - Doesn't point to anything
4612  *         9     No Config - Doesn't point to anything
4613  *         10    No Config - Doesn't point to anything
4614  *         11    No Config - Doesn't point to anything
4615  * -----------------------------------------------------------
4616  *         12    No Config - Doesn't point to anything
4617  *         13           |     1    |           dst
4618  *         14           |     2    |           dst
4619  *         15    No Config - Doesn't point to anything
4620  *         16    No Config - Doesn't point to anything
4621  *         17    No Config - Doesn't point to anything
4622  *         18    No Config - Doesn't point to anything
4623  *         19           |     7    |           dst
4624  *         20           |     8    |           dst
4625  *         21    No Config - Doesn't point to anything
4626  *         22    No Config - Doesn't point to anything
4627  *         23    No Config - Doesn't point to anything
4628  * -----------------------------------------------------------
4629  *
4630  *
4631  * ToDo - Move shadow register config to following in the future
4632  * This helps free up a block of shadow registers towards the end.
4633  * Can be used for other purposes
4634  *
4635  * -----------------------------------------------------------
4636  * Shadow Register      |     CE   |    src/dst write index
4637  * -----------------------------------------------------------
4638  *      0            |     0    |           src
4639  *      1            |     3    |           src
4640  *      2            |     4    |           src
4641  *      3            |     5    |           src
4642  *      4            |     7    |           src
4643  * -----------------------------------------------------------
4644  *      5            |     1    |           dst
4645  *      6            |     2    |           dst
4646  *      7            |     7    |           dst
4647  *      8            |     8    |           dst
4648  * -----------------------------------------------------------
4649  *      9     No Config - Doesn't point to anything
4650  *      12    No Config - Doesn't point to anything
4651  *      13    No Config - Doesn't point to anything
4652  *      14    No Config - Doesn't point to anything
4653  *      15    No Config - Doesn't point to anything
4654  *      16    No Config - Doesn't point to anything
4655  *      17    No Config - Doesn't point to anything
4656  *      18    No Config - Doesn't point to anything
4657  *      19    No Config - Doesn't point to anything
4658  *      20    No Config - Doesn't point to anything
4659  *      21    No Config - Doesn't point to anything
4660  *      22    No Config - Doesn't point to anything
4661  *      23    No Config - Doesn't point to anything
4662  * -----------------------------------------------------------
4663 */
4664 #ifndef QCN7605_SUPPORT
4665 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4666 {
4667 	u32 addr = 0;
4668 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4669 
4670 	switch (ce) {
4671 	case 0:
4672 		addr = SHADOW_VALUE0;
4673 		break;
4674 	case 3:
4675 		addr = SHADOW_VALUE3;
4676 		break;
4677 	case 4:
4678 		addr = SHADOW_VALUE4;
4679 		break;
4680 	case 5:
4681 		addr = SHADOW_VALUE5;
4682 		break;
4683 	case 7:
4684 		addr = SHADOW_VALUE7;
4685 		break;
4686 	default:
4687 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4688 		QDF_ASSERT(0);
4689 	}
4690 	return addr;
4691 
4692 }
4693 
4694 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4695 {
4696 	u32 addr = 0;
4697 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4698 
4699 	switch (ce) {
4700 	case 1:
4701 		addr = SHADOW_VALUE13;
4702 		break;
4703 	case 2:
4704 		addr = SHADOW_VALUE14;
4705 		break;
4706 	case 5:
4707 		addr = SHADOW_VALUE17;
4708 		break;
4709 	case 7:
4710 		addr = SHADOW_VALUE19;
4711 		break;
4712 	case 8:
4713 		addr = SHADOW_VALUE20;
4714 		break;
4715 	case 9:
4716 		addr = SHADOW_VALUE21;
4717 		break;
4718 	case 10:
4719 		addr = SHADOW_VALUE22;
4720 		break;
4721 	case 11:
4722 		addr = SHADOW_VALUE23;
4723 		break;
4724 	default:
4725 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4726 		QDF_ASSERT(0);
4727 	}
4728 
4729 	return addr;
4730 
4731 }
4732 #else
4733 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4734 {
4735 	u32 addr = 0;
4736 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4737 
4738 	switch (ce) {
4739 	case 0:
4740 		addr = SHADOW_VALUE0;
4741 		break;
4742 	case 3:
4743 		addr = SHADOW_VALUE3;
4744 		break;
4745 	case 4:
4746 		addr = SHADOW_VALUE4;
4747 		break;
4748 	case 5:
4749 		addr = SHADOW_VALUE5;
4750 		break;
4751 	default:
4752 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4753 		QDF_ASSERT(0);
4754 	}
4755 	return addr;
4756 }
4757 
4758 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4759 {
4760 	u32 addr = 0;
4761 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4762 
4763 	switch (ce) {
4764 	case 1:
4765 		addr = SHADOW_VALUE13;
4766 		break;
4767 	case 2:
4768 		addr = SHADOW_VALUE14;
4769 		break;
4770 	case 3:
4771 		addr = SHADOW_VALUE15;
4772 		break;
4773 	case 5:
4774 		addr = SHADOW_VALUE17;
4775 		break;
4776 	case 7:
4777 		addr = SHADOW_VALUE19;
4778 		break;
4779 	case 8:
4780 		addr = SHADOW_VALUE20;
4781 		break;
4782 	case 9:
4783 		addr = SHADOW_VALUE21;
4784 		break;
4785 	case 10:
4786 		addr = SHADOW_VALUE22;
4787 		break;
4788 	case 11:
4789 		addr = SHADOW_VALUE23;
4790 		break;
4791 	default:
4792 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4793 		QDF_ASSERT(0);
4794 	}
4795 
4796 	return addr;
4797 }
4798 #endif
4799 #endif
4800 
4801 #if defined(FEATURE_LRO)
4802 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
4803 {
4804 	struct CE_state *ce_state;
4805 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4806 
4807 	ce_state = scn->ce_id_to_state[ctx_id];
4808 
4809 	return ce_state->lro_data;
4810 }
4811 #endif
4812 
4813 /**
4814  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
4815  * this service
4816  * @scn: hif_softc pointer.
4817  * @svc_id: Service ID for which the mapping is needed.
4818  * @ul_pipe: address of the container in which ul pipe is returned.
4819  * @dl_pipe: address of the container in which dl pipe is returned.
4820  * @ul_is_polled: address of the container in which a bool
4821  *			indicating if the UL CE for this service
4822  *			is polled is returned.
4823  * @dl_is_polled: address of the container in which a bool
4824  *			indicating if the DL CE for this service
4825  *			is polled is returned.
4826  *
4827  * Return: Indicates whether the service has been found in the table.
4828  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
4829  *         There will be warning logs if either leg has not been updated
4830  *         because it missed the entry in the table (but this is not an err).
4831  */
4832 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
4833 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
4834 			int *dl_is_polled)
4835 {
4836 	int status = -EINVAL;
4837 	unsigned int i;
4838 	struct service_to_pipe element;
4839 	struct service_to_pipe *tgt_svc_map_to_use;
4840 	uint32_t sz_tgt_svc_map_to_use;
4841 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4842 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4843 	bool dl_updated = false;
4844 	bool ul_updated = false;
4845 
4846 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
4847 				       &sz_tgt_svc_map_to_use);
4848 
4849 	*dl_is_polled = 0;  /* polling for received messages not supported */
4850 
4851 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
4852 
4853 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
4854 		if (element.service_id == svc_id) {
4855 			if (element.pipedir == PIPEDIR_OUT) {
4856 				*ul_pipe = element.pipenum;
4857 				*ul_is_polled =
4858 					(hif_state->host_ce_config[*ul_pipe].flags &
4859 					 CE_ATTR_DISABLE_INTR) != 0;
4860 				ul_updated = true;
4861 			} else if (element.pipedir == PIPEDIR_IN) {
4862 				*dl_pipe = element.pipenum;
4863 				dl_updated = true;
4864 			}
4865 			status = 0;
4866 		}
4867 	}
4868 	if (ul_updated == false)
4869 		hif_debug("ul pipe is NOT updated for service %d", svc_id);
4870 	if (dl_updated == false)
4871 		hif_debug("dl pipe is NOT updated for service %d", svc_id);
4872 
4873 	return status;
4874 }
4875 
4876 #ifdef SHADOW_REG_DEBUG
4877 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
4878 		uint32_t CE_ctrl_addr)
4879 {
4880 	uint32_t read_from_hw, srri_from_ddr = 0;
4881 
4882 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
4883 
4884 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4885 
4886 	if (read_from_hw != srri_from_ddr) {
4887 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4888 		       srri_from_ddr, read_from_hw,
4889 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4890 		QDF_ASSERT(0);
4891 	}
4892 	return srri_from_ddr;
4893 }
4894 
4895 
4896 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
4897 		uint32_t CE_ctrl_addr)
4898 {
4899 	uint32_t read_from_hw, drri_from_ddr = 0;
4900 
4901 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
4902 
4903 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4904 
4905 	if (read_from_hw != drri_from_ddr) {
4906 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4907 		       drri_from_ddr, read_from_hw,
4908 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4909 		QDF_ASSERT(0);
4910 	}
4911 	return drri_from_ddr;
4912 }
4913 
4914 #endif
4915 
4916 /**
4917  * hif_dump_ce_registers() - dump ce registers
4918  * @scn: hif_opaque_softc pointer.
4919  *
4920  * Output the copy engine registers
4921  *
4922  * Return: 0 for success or error code
4923  */
4924 int hif_dump_ce_registers(struct hif_softc *scn)
4925 {
4926 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4927 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
4928 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
4929 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
4930 	uint16_t i;
4931 	QDF_STATUS status;
4932 
4933 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
4934 		if (!scn->ce_id_to_state[i]) {
4935 			hif_debug("CE%d not used", i);
4936 			continue;
4937 		}
4938 
4939 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
4940 					   (uint8_t *) &ce_reg_values[0],
4941 					   ce_reg_word_size * sizeof(uint32_t));
4942 
4943 		if (status != QDF_STATUS_SUCCESS) {
4944 			hif_err("Dumping CE register failed!");
4945 			return -EACCES;
4946 		}
4947 		hif_debug("CE%d=>", i);
4948 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
4949 				   (uint8_t *) &ce_reg_values[0],
4950 				   ce_reg_word_size * sizeof(uint32_t));
4951 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
4952 				+ SR_WR_INDEX_ADDRESS),
4953 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
4954 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
4955 				+ CURRENT_SRRI_ADDRESS),
4956 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
4957 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
4958 				+ DST_WR_INDEX_ADDRESS),
4959 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
4960 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
4961 				+ CURRENT_DRRI_ADDRESS),
4962 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
4963 		qdf_print("---");
4964 	}
4965 	return 0;
4966 }
4967 qdf_export_symbol(hif_dump_ce_registers);
4968 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
4969 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
4970 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
4971 {
4972 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4973 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4974 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
4975 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
4976 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4977 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
4978 	struct CE_ring_state *src_ring = ce_state->src_ring;
4979 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
4980 
4981 	if (src_ring) {
4982 		hif_info->ul_pipe.nentries = src_ring->nentries;
4983 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
4984 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
4985 		hif_info->ul_pipe.write_index = src_ring->write_index;
4986 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
4987 		hif_info->ul_pipe.base_addr_CE_space =
4988 			src_ring->base_addr_CE_space;
4989 		hif_info->ul_pipe.base_addr_owner_space =
4990 			src_ring->base_addr_owner_space;
4991 	}
4992 
4993 
4994 	if (dest_ring) {
4995 		hif_info->dl_pipe.nentries = dest_ring->nentries;
4996 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
4997 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
4998 		hif_info->dl_pipe.write_index = dest_ring->write_index;
4999 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
5000 		hif_info->dl_pipe.base_addr_CE_space =
5001 			dest_ring->base_addr_CE_space;
5002 		hif_info->dl_pipe.base_addr_owner_space =
5003 			dest_ring->base_addr_owner_space;
5004 	}
5005 
5006 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
5007 	hif_info->ctrl_addr = ce_state->ctrl_addr;
5008 
5009 	return hif_info;
5010 }
5011 qdf_export_symbol(hif_get_addl_pipe_info);
5012 
5013 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
5014 {
5015 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5016 
5017 	scn->nss_wifi_ol_mode = mode;
5018 	return 0;
5019 }
5020 qdf_export_symbol(hif_set_nss_wifiol_mode);
5021 #endif
5022 
5023 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
5024 {
5025 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5026 	scn->hif_attribute = hif_attrib;
5027 }
5028 
5029 
5030 /* disable interrupts (only applicable for legacy copy engine currently */
5031 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
5032 {
5033 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5034 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
5035 	uint32_t ctrl_addr = CE_state->ctrl_addr;
5036 
5037 	Q_TARGET_ACCESS_BEGIN(scn);
5038 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
5039 	Q_TARGET_ACCESS_END(scn);
5040 }
5041 qdf_export_symbol(hif_disable_interrupt);
5042 
5043 /**
5044  * hif_fw_event_handler() - hif fw event handler
5045  * @hif_state: pointer to hif ce state structure
5046  *
5047  * Process fw events and raise HTC callback to process fw events.
5048  *
5049  * Return: none
5050  */
5051 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
5052 {
5053 	struct hif_msg_callbacks *msg_callbacks =
5054 		&hif_state->msg_callbacks_current;
5055 
5056 	if (!msg_callbacks->fwEventHandler)
5057 		return;
5058 
5059 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
5060 			QDF_STATUS_E_FAILURE);
5061 }
5062 
5063 #ifndef QCA_WIFI_3_0
5064 /**
5065  * hif_fw_interrupt_handler() - FW interrupt handler
5066  * @irq: irq number
5067  * @arg: the user pointer
5068  *
5069  * Called from the PCI interrupt handler when a
5070  * firmware-generated interrupt to the Host.
5071  *
5072  * only registered for legacy ce devices
5073  *
5074  * Return: status of handled irq
5075  */
5076 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5077 {
5078 	struct hif_softc *scn = arg;
5079 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5080 	uint32_t fw_indicator_address, fw_indicator;
5081 
5082 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
5083 		return ATH_ISR_NOSCHED;
5084 
5085 	fw_indicator_address = hif_state->fw_indicator_address;
5086 	/* For sudden unplug this will return ~0 */
5087 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
5088 
5089 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
5090 		/* ACK: clear Target-side pending event */
5091 		A_TARGET_WRITE(scn, fw_indicator_address,
5092 			       fw_indicator & ~FW_IND_EVENT_PENDING);
5093 		if (Q_TARGET_ACCESS_END(scn) < 0)
5094 			return ATH_ISR_SCHED;
5095 
5096 		if (hif_state->started) {
5097 			hif_fw_event_handler(hif_state);
5098 		} else {
5099 			/*
5100 			 * Probable Target failure before we're prepared
5101 			 * to handle it.  Generally unexpected.
5102 			 * fw_indicator used as bitmap, and defined as below:
5103 			 *     FW_IND_EVENT_PENDING    0x1
5104 			 *     FW_IND_INITIALIZED      0x2
5105 			 *     FW_IND_NEEDRECOVER      0x4
5106 			 */
5107 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
5108 				("%s: Early firmware event indicated 0x%x\n",
5109 				 __func__, fw_indicator));
5110 		}
5111 	} else {
5112 		if (Q_TARGET_ACCESS_END(scn) < 0)
5113 			return ATH_ISR_SCHED;
5114 	}
5115 
5116 	return ATH_ISR_SCHED;
5117 }
5118 #else
5119 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5120 {
5121 	return ATH_ISR_SCHED;
5122 }
5123 #endif /* #ifdef QCA_WIFI_3_0 */
5124 
5125 
5126 /**
5127  * hif_wlan_disable(): call the platform driver to disable wlan
5128  * @scn: HIF Context
5129  *
5130  * This function passes the con_mode to platform driver to disable
5131  * wlan.
5132  *
5133  * Return: void
5134  */
5135 void hif_wlan_disable(struct hif_softc *scn)
5136 {
5137 	enum pld_driver_mode mode;
5138 	uint32_t con_mode = hif_get_conparam(scn);
5139 
5140 	if (scn->target_status == TARGET_STATUS_RESET)
5141 		return;
5142 
5143 	if (QDF_GLOBAL_FTM_MODE == con_mode)
5144 		mode = PLD_FTM;
5145 	else if (QDF_IS_EPPING_ENABLED(con_mode))
5146 		mode = PLD_EPPING;
5147 	else
5148 		mode = PLD_MISSION;
5149 
5150 	pld_wlan_disable(scn->qdf_dev->dev, mode);
5151 }
5152 
5153 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5154 {
5155 	int status;
5156 	uint8_t ul_pipe, dl_pipe;
5157 	int ul_is_polled, dl_is_polled;
5158 
5159 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
5160 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5161 					 HTC_CTRL_RSVD_SVC,
5162 					 &ul_pipe, &dl_pipe,
5163 					 &ul_is_polled, &dl_is_polled);
5164 	if (status) {
5165 		hif_err("Failed to map pipe: %d", status);
5166 		return status;
5167 	}
5168 
5169 	*ce_id = dl_pipe;
5170 
5171 	return 0;
5172 }
5173 
5174 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5175 {
5176 	int status;
5177 	uint8_t ul_pipe, dl_pipe;
5178 	int ul_is_polled, dl_is_polled;
5179 
5180 	/* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */
5181 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5182 					 WMI_CONTROL_DIAG_SVC,
5183 					 &ul_pipe, &dl_pipe,
5184 					 &ul_is_polled, &dl_is_polled);
5185 	if (status) {
5186 		hif_err("Failed to map pipe: %d", status);
5187 		return status;
5188 	}
5189 
5190 	*ce_id = dl_pipe;
5191 
5192 	return 0;
5193 }
5194 
5195 #ifdef HIF_CE_LOG_INFO
5196 /**
5197  * ce_get_index_info(): Get CE index info
5198  * @scn: HIF Context
5199  * @ce_state: CE opaque handle
5200  * @info: CE info
5201  *
5202  * Return: 0 for success and non zero for failure
5203  */
5204 static
5205 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
5206 		      struct ce_index *info)
5207 {
5208 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5209 
5210 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
5211 }
5212 
5213 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
5214 		     unsigned int *offset)
5215 {
5216 	struct hang_event_info info = {0};
5217 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
5218 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
5219 	uint8_t curr_index = 0;
5220 	uint8_t i;
5221 	uint16_t size;
5222 
5223 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
5224 	info.active_grp_tasklet_cnt =
5225 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
5226 
5227 	for (i = 0; i < scn->ce_count; i++) {
5228 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
5229 			continue;
5230 
5231 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
5232 				      &info.ce_info[curr_index]))
5233 			continue;
5234 
5235 		curr_index++;
5236 	}
5237 
5238 	info.ce_count = curr_index;
5239 	size = sizeof(info) -
5240 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
5241 
5242 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
5243 		return;
5244 
5245 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
5246 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
5247 
5248 	qdf_mem_copy(data + *offset, &info, size);
5249 	*offset = *offset + size;
5250 }
5251 #endif
5252