xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "ce_api.h"
34 #include "qdf_trace.h"
35 #include "pld_common.h"
36 #include "hif_debug.h"
37 #include "ce_internal.h"
38 #include "ce_reg.h"
39 #include "ce_assignment.h"
40 #include "ce_tasklet.h"
41 #include "qdf_module.h"
42 
43 #define CE_POLL_TIMEOUT 10      /* ms */
44 
45 #define AGC_DUMP         1
46 #define CHANINFO_DUMP    2
47 #define BB_WATCHDOG_DUMP 3
48 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
49 #define PCIE_ACCESS_DUMP 4
50 #endif
51 #include "mp_dev.h"
52 #ifdef HIF_CE_LOG_INFO
53 #include "qdf_hang_event_notifier.h"
54 #endif
55 
56 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
57 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \
58 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCA5332) || \
59 	defined(QCA_WIFI_QCA9574)) && !defined(QCA_WIFI_SUPPORT_SRNG)
60 #define QCA_WIFI_SUPPORT_SRNG
61 #endif
62 
63 #ifdef QCA_WIFI_SUPPORT_SRNG
64 #include <hal_api.h>
65 #endif
66 
67 /* Forward references */
68 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
69 
70 /*
71  * Fix EV118783, poll to check whether a BMI response comes
72  * other than waiting for the interruption which may be lost.
73  */
74 /* #define BMI_RSP_POLLING */
75 #define BMI_RSP_TO_MILLISEC  1000
76 
77 #ifdef CONFIG_BYPASS_QMI
78 #define BYPASS_QMI 1
79 #else
80 #define BYPASS_QMI 0
81 #endif
82 
83 #ifdef ENABLE_10_4_FW_HDR
84 #if (ENABLE_10_4_FW_HDR == 1)
85 #define WDI_IPA_SERVICE_GROUP 5
86 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
87 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
88 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
89 #endif /* ENABLE_10_4_FW_HDR == 1 */
90 #endif /* ENABLE_10_4_FW_HDR */
91 
92 static void hif_config_rri_on_ddr(struct hif_softc *scn);
93 
94 /**
95  * hif_target_access_log_dump() - dump access log
96  *
97  * dump access log
98  *
99  * Return: n/a
100  */
101 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102 static void hif_target_access_log_dump(void)
103 {
104 	hif_target_dump_access_log();
105 }
106 #endif
107 
108 /*
109  * This structure contains the interrupt index for each Copy engine
110  * for various number of MSIs available in the system.
111  */
112 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = {
113 	/* Default configuration */
114 	{{ CE_INTERRUPT_IDX(0),
115 	  CE_INTERRUPT_IDX(1),
116 	  CE_INTERRUPT_IDX(2),
117 	  CE_INTERRUPT_IDX(3),
118 	  CE_INTERRUPT_IDX(4),
119 	  CE_INTERRUPT_IDX(5),
120 	  CE_INTERRUPT_IDX(6),
121 	  CE_INTERRUPT_IDX(7),
122 	  CE_INTERRUPT_IDX(8),
123 	  CE_INTERRUPT_IDX(9),
124 	  CE_INTERRUPT_IDX(10),
125 	  CE_INTERRUPT_IDX(11),
126 #ifdef QCA_WIFI_QCN9224
127 	  CE_INTERRUPT_IDX(12),
128 	  CE_INTERRUPT_IDX(13),
129 	  CE_INTERRUPT_IDX(14),
130 	  CE_INTERRUPT_IDX(15),
131 #endif
132 	} },
133 	/* Interrupt assignment for 1 MSI combination */
134 	{{ CE_INTERRUPT_IDX(0),
135 	  CE_INTERRUPT_IDX(0),
136 	  CE_INTERRUPT_IDX(0),
137 	  CE_INTERRUPT_IDX(0),
138 	  CE_INTERRUPT_IDX(0),
139 	  CE_INTERRUPT_IDX(0),
140 	  CE_INTERRUPT_IDX(0),
141 	  CE_INTERRUPT_IDX(0),
142 	  CE_INTERRUPT_IDX(0),
143 	  CE_INTERRUPT_IDX(0),
144 	  CE_INTERRUPT_IDX(0),
145 	  CE_INTERRUPT_IDX(0),
146 #ifdef QCA_WIFI_QCN9224
147 	  CE_INTERRUPT_IDX(0),
148 	  CE_INTERRUPT_IDX(0),
149 	  CE_INTERRUPT_IDX(0),
150 	  CE_INTERRUPT_IDX(0),
151 #endif
152 	} },
153 	/* Interrupt assignment for 2 MSI combination */
154 	{{ CE_INTERRUPT_IDX(0),
155 	  CE_INTERRUPT_IDX(1),
156 	  CE_INTERRUPT_IDX(0),
157 	  CE_INTERRUPT_IDX(1),
158 	  CE_INTERRUPT_IDX(0),
159 	  CE_INTERRUPT_IDX(1),
160 	  CE_INTERRUPT_IDX(0),
161 	  CE_INTERRUPT_IDX(0),
162 	  CE_INTERRUPT_IDX(0),
163 	  CE_INTERRUPT_IDX(0),
164 	  CE_INTERRUPT_IDX(0),
165 	  CE_INTERRUPT_IDX(0),
166 #ifdef QCA_WIFI_QCN9224
167 	  CE_INTERRUPT_IDX(0),
168 	  CE_INTERRUPT_IDX(0),
169 	  CE_INTERRUPT_IDX(0),
170 	  CE_INTERRUPT_IDX(0),
171 #endif
172 	} },
173 	/* Interrupt assignment for 3 MSI combination */
174 	{{ CE_INTERRUPT_IDX(0),
175 	  CE_INTERRUPT_IDX(1),
176 	  CE_INTERRUPT_IDX(2),
177 	  CE_INTERRUPT_IDX(1),
178 	  CE_INTERRUPT_IDX(0),
179 	  CE_INTERRUPT_IDX(1),
180 	  CE_INTERRUPT_IDX(0),
181 	  CE_INTERRUPT_IDX(0),
182 	  CE_INTERRUPT_IDX(0),
183 	  CE_INTERRUPT_IDX(0),
184 	  CE_INTERRUPT_IDX(0),
185 	  CE_INTERRUPT_IDX(0),
186 #ifdef QCA_WIFI_QCN9224
187 	  CE_INTERRUPT_IDX(0),
188 	  CE_INTERRUPT_IDX(0),
189 	  CE_INTERRUPT_IDX(0),
190 	  CE_INTERRUPT_IDX(0),
191 #endif
192 	} },
193 	/* Interrupt assignment for 4 MSI combination */
194 	{{ CE_INTERRUPT_IDX(0),
195 	  CE_INTERRUPT_IDX(1),
196 	  CE_INTERRUPT_IDX(2),
197 	  CE_INTERRUPT_IDX(3),
198 	  CE_INTERRUPT_IDX(0),
199 	  CE_INTERRUPT_IDX(1),
200 	  CE_INTERRUPT_IDX(0),
201 	  CE_INTERRUPT_IDX(0),
202 	  CE_INTERRUPT_IDX(0),
203 	  CE_INTERRUPT_IDX(0),
204 	  CE_INTERRUPT_IDX(0),
205 	  CE_INTERRUPT_IDX(0),
206 #ifdef QCA_WIFI_QCN9224
207 	  CE_INTERRUPT_IDX(0),
208 	  CE_INTERRUPT_IDX(0),
209 	  CE_INTERRUPT_IDX(0),
210 	  CE_INTERRUPT_IDX(0),
211 #endif
212 	} },
213 	/* Interrupt assignment for 5 MSI combination */
214 	{{ CE_INTERRUPT_IDX(0),
215 	  CE_INTERRUPT_IDX(1),
216 	  CE_INTERRUPT_IDX(2),
217 	  CE_INTERRUPT_IDX(3),
218 	  CE_INTERRUPT_IDX(0),
219 	  CE_INTERRUPT_IDX(4),
220 	  CE_INTERRUPT_IDX(0),
221 	  CE_INTERRUPT_IDX(0),
222 	  CE_INTERRUPT_IDX(0),
223 	  CE_INTERRUPT_IDX(0),
224 	  CE_INTERRUPT_IDX(0),
225 	  CE_INTERRUPT_IDX(0),
226 #ifdef QCA_WIFI_QCN9224
227 	  CE_INTERRUPT_IDX(0),
228 	  CE_INTERRUPT_IDX(0),
229 	  CE_INTERRUPT_IDX(0),
230 	  CE_INTERRUPT_IDX(0),
231 #endif
232 	} },
233 	/* Interrupt assignment for 6 MSI combination */
234 	{{ CE_INTERRUPT_IDX(0),
235 	  CE_INTERRUPT_IDX(1),
236 	  CE_INTERRUPT_IDX(2),
237 	  CE_INTERRUPT_IDX(3),
238 	  CE_INTERRUPT_IDX(4),
239 	  CE_INTERRUPT_IDX(5),
240 	  CE_INTERRUPT_IDX(0),
241 	  CE_INTERRUPT_IDX(0),
242 	  CE_INTERRUPT_IDX(0),
243 	  CE_INTERRUPT_IDX(0),
244 	  CE_INTERRUPT_IDX(0),
245 	  CE_INTERRUPT_IDX(0),
246 #ifdef QCA_WIFI_QCN9224
247 	  CE_INTERRUPT_IDX(0),
248 	  CE_INTERRUPT_IDX(0),
249 	  CE_INTERRUPT_IDX(0),
250 	  CE_INTERRUPT_IDX(0),
251 #endif
252 	} },
253 	/* Interrupt assignment for 7 MSI combination */
254 	{{ CE_INTERRUPT_IDX(0),
255 	  CE_INTERRUPT_IDX(1),
256 	  CE_INTERRUPT_IDX(2),
257 	  CE_INTERRUPT_IDX(3),
258 	  CE_INTERRUPT_IDX(4),
259 	  CE_INTERRUPT_IDX(5),
260 	  CE_INTERRUPT_IDX(6),
261 	  CE_INTERRUPT_IDX(0),
262 	  CE_INTERRUPT_IDX(0),
263 	  CE_INTERRUPT_IDX(0),
264 	  CE_INTERRUPT_IDX(0),
265 	  CE_INTERRUPT_IDX(0),
266 #ifdef QCA_WIFI_QCN9224
267 	  CE_INTERRUPT_IDX(0),
268 	  CE_INTERRUPT_IDX(0),
269 	  CE_INTERRUPT_IDX(0),
270 	  CE_INTERRUPT_IDX(0),
271 #endif
272 	} },
273 	/* Interrupt assignment for 8 MSI combination */
274 	{{ CE_INTERRUPT_IDX(0),
275 	  CE_INTERRUPT_IDX(1),
276 	  CE_INTERRUPT_IDX(2),
277 	  CE_INTERRUPT_IDX(3),
278 	  CE_INTERRUPT_IDX(4),
279 	  CE_INTERRUPT_IDX(5),
280 	  CE_INTERRUPT_IDX(6),
281 	  CE_INTERRUPT_IDX(7),
282 	  CE_INTERRUPT_IDX(0),
283 	  CE_INTERRUPT_IDX(0),
284 	  CE_INTERRUPT_IDX(0),
285 	  CE_INTERRUPT_IDX(0),
286 #ifdef QCA_WIFI_QCN9224
287 	  CE_INTERRUPT_IDX(0),
288 	  CE_INTERRUPT_IDX(0),
289 	  CE_INTERRUPT_IDX(0),
290 	  CE_INTERRUPT_IDX(0),
291 #endif
292 	} },
293 	/* Interrupt assignment for 9 MSI combination */
294 	{{ CE_INTERRUPT_IDX(0),
295 	  CE_INTERRUPT_IDX(1),
296 	  CE_INTERRUPT_IDX(2),
297 	  CE_INTERRUPT_IDX(3),
298 	  CE_INTERRUPT_IDX(4),
299 	  CE_INTERRUPT_IDX(5),
300 	  CE_INTERRUPT_IDX(6),
301 	  CE_INTERRUPT_IDX(7),
302 	  CE_INTERRUPT_IDX(8),
303 	  CE_INTERRUPT_IDX(0),
304 	  CE_INTERRUPT_IDX(0),
305 	  CE_INTERRUPT_IDX(0),
306 #ifdef QCA_WIFI_QCN9224
307 	  CE_INTERRUPT_IDX(0),
308 	  CE_INTERRUPT_IDX(0),
309 	  CE_INTERRUPT_IDX(0),
310 	  CE_INTERRUPT_IDX(0),
311 #endif
312 	} },
313 	/* Interrupt assignment for 10 MSI combination */
314 	{{ CE_INTERRUPT_IDX(0),
315 	  CE_INTERRUPT_IDX(1),
316 	  CE_INTERRUPT_IDX(2),
317 	  CE_INTERRUPT_IDX(3),
318 	  CE_INTERRUPT_IDX(4),
319 	  CE_INTERRUPT_IDX(5),
320 	  CE_INTERRUPT_IDX(6),
321 	  CE_INTERRUPT_IDX(7),
322 	  CE_INTERRUPT_IDX(8),
323 	  CE_INTERRUPT_IDX(9),
324 	  CE_INTERRUPT_IDX(0),
325 	  CE_INTERRUPT_IDX(0),
326 #ifdef QCA_WIFI_QCN9224
327 	  CE_INTERRUPT_IDX(0),
328 	  CE_INTERRUPT_IDX(0),
329 	  CE_INTERRUPT_IDX(0),
330 	  CE_INTERRUPT_IDX(0),
331 #endif
332 	} },
333 	/* Interrupt assignment for 11 MSI combination */
334 	{{ CE_INTERRUPT_IDX(0),
335 	  CE_INTERRUPT_IDX(1),
336 	  CE_INTERRUPT_IDX(2),
337 	  CE_INTERRUPT_IDX(3),
338 	  CE_INTERRUPT_IDX(4),
339 	  CE_INTERRUPT_IDX(5),
340 	  CE_INTERRUPT_IDX(6),
341 	  CE_INTERRUPT_IDX(7),
342 	  CE_INTERRUPT_IDX(8),
343 	  CE_INTERRUPT_IDX(9),
344 	  CE_INTERRUPT_IDX(10),
345 	  CE_INTERRUPT_IDX(0),
346 #ifdef QCA_WIFI_QCN9224
347 	  CE_INTERRUPT_IDX(0),
348 	  CE_INTERRUPT_IDX(0),
349 	  CE_INTERRUPT_IDX(0),
350 	  CE_INTERRUPT_IDX(0),
351 #endif
352 	} },
353 	/* Interrupt assignment for 12 MSI combination */
354 	{{ CE_INTERRUPT_IDX(0),
355 	  CE_INTERRUPT_IDX(1),
356 	  CE_INTERRUPT_IDX(2),
357 	  CE_INTERRUPT_IDX(3),
358 	  CE_INTERRUPT_IDX(4),
359 	  CE_INTERRUPT_IDX(5),
360 	  CE_INTERRUPT_IDX(6),
361 	  CE_INTERRUPT_IDX(7),
362 	  CE_INTERRUPT_IDX(8),
363 	  CE_INTERRUPT_IDX(9),
364 	  CE_INTERRUPT_IDX(10),
365 	  CE_INTERRUPT_IDX(11),
366 #ifdef QCA_WIFI_QCN9224
367 	  CE_INTERRUPT_IDX(0),
368 	  CE_INTERRUPT_IDX(0),
369 	  CE_INTERRUPT_IDX(0),
370 	  CE_INTERRUPT_IDX(0),
371 #endif
372 	} },
373 #ifdef QCA_WIFI_QCN9224
374 	/* Interrupt assignment for 13 MSI combination */
375 	{{ CE_INTERRUPT_IDX(0),
376 	  CE_INTERRUPT_IDX(1),
377 	  CE_INTERRUPT_IDX(2),
378 	  CE_INTERRUPT_IDX(3),
379 	  CE_INTERRUPT_IDX(4),
380 	  CE_INTERRUPT_IDX(5),
381 	  CE_INTERRUPT_IDX(6),
382 	  CE_INTERRUPT_IDX(7),
383 	  CE_INTERRUPT_IDX(8),
384 	  CE_INTERRUPT_IDX(9),
385 	  CE_INTERRUPT_IDX(10),
386 	  CE_INTERRUPT_IDX(11),
387 	  CE_INTERRUPT_IDX(12),
388 	  CE_INTERRUPT_IDX(0),
389 	  CE_INTERRUPT_IDX(0),
390 	  CE_INTERRUPT_IDX(0),
391 	} },
392 	/* Interrupt assignment for 14 MSI combination */
393 	{{ CE_INTERRUPT_IDX(0),
394 	  CE_INTERRUPT_IDX(1),
395 	  CE_INTERRUPT_IDX(2),
396 	  CE_INTERRUPT_IDX(3),
397 	  CE_INTERRUPT_IDX(4),
398 	  CE_INTERRUPT_IDX(5),
399 	  CE_INTERRUPT_IDX(6),
400 	  CE_INTERRUPT_IDX(7),
401 	  CE_INTERRUPT_IDX(8),
402 	  CE_INTERRUPT_IDX(9),
403 	  CE_INTERRUPT_IDX(10),
404 	  CE_INTERRUPT_IDX(11),
405 	  CE_INTERRUPT_IDX(12),
406 	  CE_INTERRUPT_IDX(13),
407 	  CE_INTERRUPT_IDX(0),
408 	  CE_INTERRUPT_IDX(0),
409 	} },
410 	/* Interrupt assignment for 15 MSI combination */
411 	{{ CE_INTERRUPT_IDX(0),
412 	  CE_INTERRUPT_IDX(1),
413 	  CE_INTERRUPT_IDX(2),
414 	  CE_INTERRUPT_IDX(3),
415 	  CE_INTERRUPT_IDX(4),
416 	  CE_INTERRUPT_IDX(5),
417 	  CE_INTERRUPT_IDX(6),
418 	  CE_INTERRUPT_IDX(7),
419 	  CE_INTERRUPT_IDX(8),
420 	  CE_INTERRUPT_IDX(9),
421 	  CE_INTERRUPT_IDX(10),
422 	  CE_INTERRUPT_IDX(11),
423 	  CE_INTERRUPT_IDX(12),
424 	  CE_INTERRUPT_IDX(13),
425 	  CE_INTERRUPT_IDX(14),
426 	  CE_INTERRUPT_IDX(0),
427 	} },
428 	/* Interrupt assignment for 16 MSI combination */
429 	{{ CE_INTERRUPT_IDX(0),
430 	  CE_INTERRUPT_IDX(1),
431 	  CE_INTERRUPT_IDX(2),
432 	  CE_INTERRUPT_IDX(3),
433 	  CE_INTERRUPT_IDX(4),
434 	  CE_INTERRUPT_IDX(5),
435 	  CE_INTERRUPT_IDX(6),
436 	  CE_INTERRUPT_IDX(7),
437 	  CE_INTERRUPT_IDX(8),
438 	  CE_INTERRUPT_IDX(9),
439 	  CE_INTERRUPT_IDX(10),
440 	  CE_INTERRUPT_IDX(11),
441 	  CE_INTERRUPT_IDX(12),
442 	  CE_INTERRUPT_IDX(13),
443 	  CE_INTERRUPT_IDX(14),
444 	  CE_INTERRUPT_IDX(15),
445 	} },
446 #endif
447 };
448 
449 
450 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
451 		      uint8_t cmd_id, bool start)
452 {
453 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
454 
455 	switch (cmd_id) {
456 	case AGC_DUMP:
457 		if (start)
458 			priv_start_agc(scn);
459 		else
460 			priv_dump_agc(scn);
461 		break;
462 	case CHANINFO_DUMP:
463 		if (start)
464 			priv_start_cap_chaninfo(scn);
465 		else
466 			priv_dump_chaninfo(scn);
467 		break;
468 	case BB_WATCHDOG_DUMP:
469 		priv_dump_bbwatchdog(scn);
470 		break;
471 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
472 	case PCIE_ACCESS_DUMP:
473 		hif_target_access_log_dump();
474 		break;
475 #endif
476 	default:
477 		hif_err("Invalid htc dump command: %d", cmd_id);
478 		break;
479 	}
480 }
481 
482 static void ce_poll_timeout(void *arg)
483 {
484 	struct CE_state *CE_state = (struct CE_state *)arg;
485 
486 	if (CE_state->timer_inited) {
487 		ce_per_engine_service(CE_state->scn, CE_state->id);
488 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
489 	}
490 }
491 
492 static unsigned int roundup_pwr2(unsigned int n)
493 {
494 	int i;
495 	unsigned int test_pwr2;
496 
497 	if (!(n & (n - 1)))
498 		return n; /* already a power of 2 */
499 
500 	test_pwr2 = 4;
501 	for (i = 0; i < 29; i++) {
502 		if (test_pwr2 > n)
503 			return test_pwr2;
504 		test_pwr2 = test_pwr2 << 1;
505 	}
506 
507 	QDF_ASSERT(0); /* n too large */
508 	return 0;
509 }
510 
511 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
512 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
513 
514 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
515 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
516 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
517 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
518 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
519 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
520 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
521 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
522 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
523 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
524 #ifdef QCA_WIFI_3_0_ADRASTEA
525 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
526 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
527 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
528 #endif
529 };
530 
531 #ifdef QCN7605_SUPPORT
532 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
533 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
534 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
535 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
536 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
537 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
538 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
539 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
540 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
541 };
542 #endif
543 
544 #ifdef WLAN_FEATURE_EPPING
545 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
546 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
547 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
548 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
549 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
550 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
551 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
552 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
553 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
554 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
555 };
556 #endif
557 
558 /* CE_PCI TABLE */
559 /*
560  * NOTE: the table below is out of date, though still a useful reference.
561  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
562  * mapping of HTC services to HIF pipes.
563  */
564 /*
565  * This authoritative table defines Copy Engine configuration and the mapping
566  * of services/endpoints to CEs.  A subset of this information is passed to
567  * the Target during startup as a prerequisite to entering BMI phase.
568  * See:
569  *    target_service_to_ce_map - Target-side mapping
570  *    hif_map_service_to_pipe      - Host-side mapping
571  *    target_ce_config         - Target-side configuration
572  *    host_ce_config           - Host-side configuration
573    ============================================================================
574    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
575  |                      |      | ctio | Size     | Frequency
576  |                      |      | n    |          |
577    ============================================================================
578    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
579    descriptor |                      |      |      | O(100B)  | and regular
580    download   |                      |      |      |          |
581    ----------------------------------------------------------------------------
582    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
583    indication |                      |      |      | O(10B)   | regular
584    upload     |                      |      |      |          |
585    ----------------------------------------------------------------------------
586    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
587    upload     |                      |      |      | O(1000B) | (frequent
588    e.g. noise |                      |      |      |          | during IP1.0
589    packets    |                      |      |      |          | testing)
590    ----------------------------------------------------------------------------
591    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
592    download   |                      |      |      | O(1000B) | (frequent
593    e.g.       |                      |      |      |          | during IP1.0
594    misdirecte |                      |      |      |          | testing)
595    d EAPOL    |                      |      |      |          |
596    packets    |                      |      |      |          |
597    ----------------------------------------------------------------------------
598    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
599  | DATA_VO (uplink)     |      |      |          |
600    ----------------------------------------------------------------------------
601    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
602  | DATA_VO (downlink)   |      |      |          |
603    ----------------------------------------------------------------------------
604    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
605  |                      |      |      | O(100B)  |
606    ----------------------------------------------------------------------------
607    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
608    messages   | (downlink)           |      |      | O(100B)  |
609  |                      |      |      |          |
610    ----------------------------------------------------------------------------
611    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
612  | HTC_RAW_STREAMS      |      |      |          |
613  | (uplink)             |      |      |          |
614    ----------------------------------------------------------------------------
615    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
616  | HTC_RAW_STREAMS      |      |      |          |
617  | (downlink)           |      |      |          |
618    ----------------------------------------------------------------------------
619    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
620  |                      |      |      |          | infrequent
621    ============================================================================
622  */
623 
624 /*
625  * Map from service/endpoint to Copy Engine.
626  * This table is derived from the CE_PCI TABLE, above.
627  * It is passed to the Target at startup for use by firmware.
628  */
629 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
630 	{
631 		WMI_DATA_VO_SVC,
632 		PIPEDIR_OUT,    /* out = UL = host -> target */
633 		3,
634 	},
635 	{
636 		WMI_DATA_VO_SVC,
637 		PIPEDIR_IN,     /* in = DL = target -> host */
638 		2,
639 	},
640 	{
641 		WMI_DATA_BK_SVC,
642 		PIPEDIR_OUT,    /* out = UL = host -> target */
643 		3,
644 	},
645 	{
646 		WMI_DATA_BK_SVC,
647 		PIPEDIR_IN,     /* in = DL = target -> host */
648 		2,
649 	},
650 	{
651 		WMI_DATA_BE_SVC,
652 		PIPEDIR_OUT,    /* out = UL = host -> target */
653 		3,
654 	},
655 	{
656 		WMI_DATA_BE_SVC,
657 		PIPEDIR_IN,     /* in = DL = target -> host */
658 		2,
659 	},
660 	{
661 		WMI_DATA_VI_SVC,
662 		PIPEDIR_OUT,    /* out = UL = host -> target */
663 		3,
664 	},
665 	{
666 		WMI_DATA_VI_SVC,
667 		PIPEDIR_IN,     /* in = DL = target -> host */
668 		2,
669 	},
670 	{
671 		WMI_CONTROL_SVC,
672 		PIPEDIR_OUT,    /* out = UL = host -> target */
673 		3,
674 	},
675 	{
676 		WMI_CONTROL_SVC,
677 		PIPEDIR_IN,     /* in = DL = target -> host */
678 		2,
679 	},
680 	{
681 		HTC_CTRL_RSVD_SVC,
682 		PIPEDIR_OUT,    /* out = UL = host -> target */
683 		0,              /* could be moved to 3 (share with WMI) */
684 	},
685 	{
686 		HTC_CTRL_RSVD_SVC,
687 		PIPEDIR_IN,     /* in = DL = target -> host */
688 		2,
689 	},
690 	{
691 		HTC_RAW_STREAMS_SVC, /* not currently used */
692 		PIPEDIR_OUT,    /* out = UL = host -> target */
693 		0,
694 	},
695 	{
696 		HTC_RAW_STREAMS_SVC, /* not currently used */
697 		PIPEDIR_IN,     /* in = DL = target -> host */
698 		2,
699 	},
700 	{
701 		HTT_DATA_MSG_SVC,
702 		PIPEDIR_OUT,    /* out = UL = host -> target */
703 		4,
704 	},
705 	{
706 		HTT_DATA_MSG_SVC,
707 		PIPEDIR_IN,     /* in = DL = target -> host */
708 		1,
709 	},
710 	{
711 		WDI_IPA_TX_SVC,
712 		PIPEDIR_OUT,    /* in = DL = target -> host */
713 		5,
714 	},
715 #if defined(QCA_WIFI_3_0_ADRASTEA)
716 	{
717 		HTT_DATA2_MSG_SVC,
718 		PIPEDIR_IN,    /* in = DL = target -> host */
719 		9,
720 	},
721 	{
722 		HTT_DATA3_MSG_SVC,
723 		PIPEDIR_IN,    /* in = DL = target -> host */
724 		10,
725 	},
726 	{
727 		PACKET_LOG_SVC,
728 		PIPEDIR_IN,    /* in = DL = target -> host */
729 		11,
730 	},
731 #endif
732 	/* (Additions here) */
733 
734 	{                       /* Must be last */
735 		0,
736 		0,
737 		0,
738 	},
739 };
740 
741 /* PIPEDIR_OUT = HOST to Target */
742 /* PIPEDIR_IN  = TARGET to HOST */
743 #if (defined(QCA_WIFI_QCA8074))
744 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
745 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
746 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
747 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
748 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
749 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
750 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
751 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
752 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
753 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
754 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
755 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
756 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
757 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
758 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
759 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
760 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
761 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
762 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
763 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
764 	/* (Additions here) */
765 	{ 0, 0, 0, },
766 };
767 #else
768 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
769 };
770 #endif
771 
772 #if (defined(QCA_WIFI_QCA9574))
773 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
774 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
775 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
776 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
777 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
778 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
779 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
780 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
781 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
782 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
783 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
784 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
785 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
786 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
787 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
788 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
789 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
790 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
791 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
792 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
793 	/* (Additions here) */
794 	{ 0, 0, 0, },
795 };
796 #else
797 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
798 };
799 #endif
800 
801 #if (defined(QCA_WIFI_QCA8074V2))
802 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
803 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
804 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
805 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
806 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
807 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
808 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
809 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
810 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
811 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
812 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
813 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
814 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
815 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
816 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
817 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
818 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
819 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
820 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
821 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
822 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
823 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
824 	/* (Additions here) */
825 	{ 0, 0, 0, },
826 };
827 #else
828 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
829 };
830 #endif
831 
832 #if (defined(QCA_WIFI_QCA6018))
833 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
834 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
835 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
836 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
837 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
838 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
839 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
840 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
841 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
842 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
843 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
844 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
845 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
846 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
847 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
848 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
849 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
850 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
851 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
852 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
853 	/* (Additions here) */
854 	{ 0, 0, 0, },
855 };
856 #else
857 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
858 };
859 #endif
860 
861 #if (defined(QCA_WIFI_QCN9000))
862 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
863 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
864 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
865 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
866 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
867 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
868 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
869 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
870 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
871 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
872 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
873 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
874 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
875 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
876 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
877 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
878 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
879 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
880 	/* (Additions here) */
881 	{ 0, 0, 0, },
882 };
883 #else
884 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
885 };
886 #endif
887 
888 #if (defined(QCA_WIFI_QCA5332))
889 static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
890 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
891 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
892 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
893 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
894 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
895 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
896 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
897 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
898 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
899 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
900 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
901 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
902 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
903 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
904 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
905 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
906 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
907 #ifdef WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE
908 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 9, },
909 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 9, },
910 #endif
911 	/* (Additions here) */
912 	{ 0, 0, 0, },
913 };
914 #else
915 static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
916 };
917 #endif
918 
919 #if (defined(QCA_WIFI_QCN9224))
920 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = {
921 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
922 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
923 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
924 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
925 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
926 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
927 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
928 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
929 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
930 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
931 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
932 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
933 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
934 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
935 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
936 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
937 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, },
938 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, },
939 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
940 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, },
941 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 14, },
942 	/* (Additions here) */
943 	{ 0, 0, 0, },
944 };
945 #endif
946 
947 #if defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCN9160)
948 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
949 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
950 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
951 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
952 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
953 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
954 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
955 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
956 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
957 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
958 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
959 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
960 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
961 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
962 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
963 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
964 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
965 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
966 	/* (Additions here) */
967 	{ 0, 0, 0, },
968 };
969 #else
970 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
971 };
972 #endif
973 
974 /* PIPEDIR_OUT = HOST to Target */
975 /* PIPEDIR_IN  = TARGET to HOST */
976 #ifdef QCN7605_SUPPORT
977 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
978 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
979 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
980 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
981 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
982 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
983 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
984 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
985 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
986 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
987 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
988 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
989 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
990 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
991 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
992 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
993 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
994 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
995 #ifdef IPA_OFFLOAD
996 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
997 #else
998 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
999 #endif
1000 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
1001 	/* (Additions here) */
1002 	{ 0, 0, 0, },
1003 };
1004 #endif
1005 
1006 #if (defined(QCA_WIFI_QCA6290))
1007 #ifdef QCA_6290_AP_MODE
1008 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1009 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1010 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
1011 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1012 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
1013 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1014 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
1015 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1016 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
1017 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1018 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
1019 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1020 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
1021 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1022 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
1023 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
1024 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
1025 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1026 	/* (Additions here) */
1027 	{ 0, 0, 0, },
1028 };
1029 #else
1030 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1031 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1032 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1033 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1034 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1035 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1036 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1037 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1038 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1039 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1040 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1041 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1042 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1043 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1044 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1045 	/* (Additions here) */
1046 	{ 0, 0, 0, },
1047 };
1048 #endif
1049 #else
1050 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1051 };
1052 #endif
1053 
1054 #if (defined(QCA_WIFI_QCA6390))
1055 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1056 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1057 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1058 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1059 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1060 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1061 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1062 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1063 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1064 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1065 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1066 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1067 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1068 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1069 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1070 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1071 	/* (Additions here) */
1072 	{ 0, 0, 0, },
1073 };
1074 #else
1075 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1076 };
1077 #endif
1078 
1079 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
1080 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1081 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1082 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1083 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1084 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1085 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1086 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1087 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1088 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1089 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1090 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1091 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1092 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1093 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1094 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1095 	/* (Additions here) */
1096 	{ 0, 0, 0, },
1097 };
1098 
1099 #if (defined(QCA_WIFI_QCA6750))
1100 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1101 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1102 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1103 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1104 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1105 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1106 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1107 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1108 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1109 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1110 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1111 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1112 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1113 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1114 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1115 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1116 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1117 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1118 #endif
1119 	/* (Additions here) */
1120 	{ 0, 0, 0, },
1121 };
1122 #else
1123 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1124 };
1125 #endif
1126 
1127 #if (defined(QCA_WIFI_KIWI))
1128 #ifdef FEATURE_DIRECT_LINK
1129 static struct service_to_pipe target_service_to_ce_map_kiwi_direct_link[] = {
1130 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1131 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1132 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1133 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1134 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1135 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1136 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1137 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1138 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1139 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1140 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 4, },
1141 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1142 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1143 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1144 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1145 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1146 #endif
1147 	{ LPASS_DATA_MSG_SVC, PIPEDIR_OUT, 0, },
1148 	{ LPASS_DATA_MSG_SVC, PIPEDIR_IN, 5, },
1149 	/* (Additions here) */
1150 	{ 0, 0, 0, },
1151 };
1152 #endif
1153 
1154 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1155 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1156 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1157 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1158 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1159 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1160 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1161 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1162 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1163 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1164 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1165 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1166 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1167 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1168 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1169 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1170 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1171 #endif
1172 	/* (Additions here) */
1173 	{ 0, 0, 0, },
1174 };
1175 #else
1176 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1177 };
1178 #endif
1179 
1180 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
1181 	{
1182 		WMI_DATA_VO_SVC,
1183 		PIPEDIR_OUT,    /* out = UL = host -> target */
1184 		3,
1185 	},
1186 	{
1187 		WMI_DATA_VO_SVC,
1188 		PIPEDIR_IN,     /* in = DL = target -> host */
1189 		2,
1190 	},
1191 	{
1192 		WMI_DATA_BK_SVC,
1193 		PIPEDIR_OUT,    /* out = UL = host -> target */
1194 		3,
1195 	},
1196 	{
1197 		WMI_DATA_BK_SVC,
1198 		PIPEDIR_IN,     /* in = DL = target -> host */
1199 		2,
1200 	},
1201 	{
1202 		WMI_DATA_BE_SVC,
1203 		PIPEDIR_OUT,    /* out = UL = host -> target */
1204 		3,
1205 	},
1206 	{
1207 		WMI_DATA_BE_SVC,
1208 		PIPEDIR_IN,     /* in = DL = target -> host */
1209 		2,
1210 	},
1211 	{
1212 		WMI_DATA_VI_SVC,
1213 		PIPEDIR_OUT,    /* out = UL = host -> target */
1214 		3,
1215 	},
1216 	{
1217 		WMI_DATA_VI_SVC,
1218 		PIPEDIR_IN,     /* in = DL = target -> host */
1219 		2,
1220 	},
1221 	{
1222 		WMI_CONTROL_SVC,
1223 		PIPEDIR_OUT,    /* out = UL = host -> target */
1224 		3,
1225 	},
1226 	{
1227 		WMI_CONTROL_SVC,
1228 		PIPEDIR_IN,     /* in = DL = target -> host */
1229 		2,
1230 	},
1231 	{
1232 		HTC_CTRL_RSVD_SVC,
1233 		PIPEDIR_OUT,    /* out = UL = host -> target */
1234 		0,              /* could be moved to 3 (share with WMI) */
1235 	},
1236 	{
1237 		HTC_CTRL_RSVD_SVC,
1238 		PIPEDIR_IN,     /* in = DL = target -> host */
1239 		1,
1240 	},
1241 	{
1242 		HTC_RAW_STREAMS_SVC, /* not currently used */
1243 		PIPEDIR_OUT,    /* out = UL = host -> target */
1244 		0,
1245 	},
1246 	{
1247 		HTC_RAW_STREAMS_SVC, /* not currently used */
1248 		PIPEDIR_IN,     /* in = DL = target -> host */
1249 		1,
1250 	},
1251 	{
1252 		HTT_DATA_MSG_SVC,
1253 		PIPEDIR_OUT,    /* out = UL = host -> target */
1254 		4,
1255 	},
1256 #ifdef WLAN_FEATURE_FASTPATH
1257 	{
1258 		HTT_DATA_MSG_SVC,
1259 		PIPEDIR_IN,     /* in = DL = target -> host */
1260 		5,
1261 	},
1262 #else /* WLAN_FEATURE_FASTPATH */
1263 	{
1264 		HTT_DATA_MSG_SVC,
1265 		PIPEDIR_IN,  /* in = DL = target -> host */
1266 		1,
1267 	},
1268 #endif /* WLAN_FEATURE_FASTPATH */
1269 
1270 	/* (Additions here) */
1271 
1272 	{                       /* Must be last */
1273 		0,
1274 		0,
1275 		0,
1276 	},
1277 };
1278 
1279 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1280 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1281 
1282 #ifdef WLAN_FEATURE_EPPING
1283 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1284 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1285 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1286 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
1287 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
1288 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1289 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1290 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1291 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1292 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1293 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1294 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
1295 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
1296 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1297 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
1298 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
1299 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
1300 	{0, 0, 0,},             /* Must be last */
1301 };
1302 
1303 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
1304 					   **tgt_svc_map_to_use,
1305 					   uint32_t *sz_tgt_svc_map_to_use)
1306 {
1307 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
1308 	*sz_tgt_svc_map_to_use =
1309 			sizeof(target_service_to_ce_map_wlan_epping);
1310 }
1311 #endif
1312 
1313 #ifdef QCN7605_SUPPORT
1314 static inline
1315 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1316 			       uint32_t *sz_tgt_svc_map_to_use)
1317 {
1318 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
1319 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
1320 }
1321 #else
1322 static inline
1323 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1324 			       uint32_t *sz_tgt_svc_map_to_use)
1325 {
1326 	hif_err("QCN7605 not supported");
1327 }
1328 #endif
1329 
1330 #ifdef QCA_WIFI_QCN9224
1331 static
1332 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1333 			       struct HIF_CE_state *hif_state)
1334 {
1335 	hif_state->host_ce_config = host_ce_config_wlan_qcn9224;
1336 	hif_state->target_ce_config = target_ce_config_wlan_qcn9224;
1337 	hif_state->target_ce_config_sz =
1338 				 sizeof(target_ce_config_wlan_qcn9224);
1339 	scn->ce_count = QCN_9224_CE_COUNT;
1340 	scn->ini_cfg.disable_wake_irq = 1;
1341 }
1342 
1343 static
1344 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1345 			       uint32_t *sz_tgt_svc_map_to_use)
1346 {
1347 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn9224;
1348 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224);
1349 }
1350 #else
1351 static inline
1352 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1353 			       struct HIF_CE_state *hif_state)
1354 {
1355 	hif_err("QCN9224 not supported");
1356 }
1357 
1358 static inline
1359 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1360 			       uint32_t *sz_tgt_svc_map_to_use)
1361 {
1362 	hif_err("QCN9224 not supported");
1363 }
1364 #endif
1365 
1366 #ifdef FEATURE_DIRECT_LINK
1367 /**
1368  * hif_select_service_to_pipe_map_kiwi() - Select service to CE map
1369  *  configuration for Kiwi
1370  * @scn: HIF context
1371  * @tgt_svc_map_to_use: returned service map
1372  * @sz_tgt_svc_map_to_use: returned length of the service map
1373  *
1374  * Return: None
1375  */
1376 static inline void
1377 hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn,
1378 				    struct service_to_pipe **tgt_svc_map_to_use,
1379 				    uint32_t *sz_tgt_svc_map_to_use)
1380 {
1381 	if (pld_is_direct_link_supported(scn->qdf_dev->dev)) {
1382 		*tgt_svc_map_to_use = target_service_to_ce_map_kiwi_direct_link;
1383 		*sz_tgt_svc_map_to_use =
1384 			sizeof(target_service_to_ce_map_kiwi_direct_link);
1385 	} else {
1386 		*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1387 		*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi);
1388 	}
1389 }
1390 #else
1391 static inline void
1392 hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn,
1393 				    struct service_to_pipe **tgt_svc_map_to_use,
1394 				    uint32_t *sz_tgt_svc_map_to_use)
1395 {
1396 	*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1397 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi);
1398 }
1399 #endif
1400 
1401 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
1402 				    struct service_to_pipe **tgt_svc_map_to_use,
1403 				    uint32_t *sz_tgt_svc_map_to_use)
1404 {
1405 	uint32_t mode = hif_get_conparam(scn);
1406 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1407 	struct hif_target_info *tgt_info = &scn->target_info;
1408 
1409 	if (QDF_IS_EPPING_ENABLED(mode)) {
1410 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
1411 						      sz_tgt_svc_map_to_use);
1412 	} else {
1413 		switch (tgt_info->target_type) {
1414 		default:
1415 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
1416 			*sz_tgt_svc_map_to_use =
1417 				sizeof(target_service_to_ce_map_wlan);
1418 			break;
1419 		case TARGET_TYPE_QCN7605:
1420 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
1421 						  sz_tgt_svc_map_to_use);
1422 			break;
1423 		case TARGET_TYPE_AR900B:
1424 		case TARGET_TYPE_QCA9984:
1425 		case TARGET_TYPE_QCA9888:
1426 		case TARGET_TYPE_AR9888:
1427 		case TARGET_TYPE_AR9888V2:
1428 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
1429 			*sz_tgt_svc_map_to_use =
1430 				sizeof(target_service_to_ce_map_ar900b);
1431 			break;
1432 		case TARGET_TYPE_QCA6290:
1433 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
1434 			*sz_tgt_svc_map_to_use =
1435 				sizeof(target_service_to_ce_map_qca6290);
1436 			break;
1437 		case TARGET_TYPE_QCA6390:
1438 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
1439 			*sz_tgt_svc_map_to_use =
1440 				sizeof(target_service_to_ce_map_qca6390);
1441 			break;
1442 		case TARGET_TYPE_QCA6490:
1443 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
1444 			*sz_tgt_svc_map_to_use =
1445 				sizeof(target_service_to_ce_map_qca6490);
1446 			break;
1447 		case TARGET_TYPE_QCA6750:
1448 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
1449 			*sz_tgt_svc_map_to_use =
1450 				sizeof(target_service_to_ce_map_qca6750);
1451 			break;
1452 		case TARGET_TYPE_KIWI:
1453 		case TARGET_TYPE_MANGO:
1454 		case TARGET_TYPE_PEACH:
1455 			hif_select_service_to_pipe_map_kiwi(scn,
1456 							 tgt_svc_map_to_use,
1457 							 sz_tgt_svc_map_to_use);
1458 			break;
1459 		case TARGET_TYPE_QCA8074:
1460 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
1461 			*sz_tgt_svc_map_to_use =
1462 				sizeof(target_service_to_ce_map_qca8074);
1463 			break;
1464 		case TARGET_TYPE_QCA8074V2:
1465 			*tgt_svc_map_to_use =
1466 				target_service_to_ce_map_qca8074_v2;
1467 			*sz_tgt_svc_map_to_use =
1468 				sizeof(target_service_to_ce_map_qca8074_v2);
1469 			break;
1470 		case TARGET_TYPE_QCA9574:
1471 			*tgt_svc_map_to_use =
1472 				target_service_to_ce_map_qca9574;
1473 			*sz_tgt_svc_map_to_use =
1474 				sizeof(target_service_to_ce_map_qca9574);
1475 			break;
1476 		case TARGET_TYPE_QCA6018:
1477 			*tgt_svc_map_to_use =
1478 				target_service_to_ce_map_qca6018;
1479 			*sz_tgt_svc_map_to_use =
1480 				sizeof(target_service_to_ce_map_qca6018);
1481 			break;
1482 		case TARGET_TYPE_QCN9000:
1483 			*tgt_svc_map_to_use =
1484 				target_service_to_ce_map_qcn9000;
1485 			*sz_tgt_svc_map_to_use =
1486 				sizeof(target_service_to_ce_map_qcn9000);
1487 			break;
1488 		case TARGET_TYPE_QCN9224:
1489 			hif_select_ce_map_qcn9224(tgt_svc_map_to_use,
1490 						  sz_tgt_svc_map_to_use);
1491 			break;
1492 		case TARGET_TYPE_QCA5332:
1493 			*tgt_svc_map_to_use = target_service_to_ce_map_qca5332;
1494 			*sz_tgt_svc_map_to_use =
1495 				sizeof(target_service_to_ce_map_qca5332);
1496 			break;
1497 		case TARGET_TYPE_QCA5018:
1498 		case TARGET_TYPE_QCN6122:
1499 		case TARGET_TYPE_QCN9160:
1500 			*tgt_svc_map_to_use =
1501 				target_service_to_ce_map_qca5018;
1502 			*sz_tgt_svc_map_to_use =
1503 				sizeof(target_service_to_ce_map_qca5018);
1504 			break;
1505 		}
1506 	}
1507 	hif_state->tgt_svc_map = *tgt_svc_map_to_use;
1508 	hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use /
1509 					sizeof(struct service_to_pipe);
1510 }
1511 
1512 /**
1513  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
1514  * @ce_state : pointer to the state context of the CE
1515  *
1516  * Description:
1517  *   Sets htt_rx_data attribute of the state structure if the
1518  *   CE serves one of the HTT DATA services.
1519  *
1520  * Return:
1521  *  false (attribute set to false)
1522  *  true  (attribute set to true);
1523  */
1524 static bool ce_mark_datapath(struct CE_state *ce_state)
1525 {
1526 	struct service_to_pipe *svc_map;
1527 	uint32_t map_sz, map_len;
1528 	int    i;
1529 	bool   rc = false;
1530 
1531 	if (ce_state) {
1532 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1533 					       &map_sz);
1534 
1535 		map_len = map_sz / sizeof(struct service_to_pipe);
1536 		for (i = 0; i < map_len; i++) {
1537 			if ((svc_map[i].pipenum == ce_state->id) &&
1538 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1539 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1540 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
1541 				/* HTT CEs are unidirectional */
1542 				if (svc_map[i].pipedir == PIPEDIR_IN)
1543 					ce_state->htt_rx_data = true;
1544 				else
1545 					ce_state->htt_tx_data = true;
1546 				rc = true;
1547 			}
1548 		}
1549 	}
1550 	return rc;
1551 }
1552 
1553 /**
1554  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
1555  * @hif_ctx: hif opaque handle
1556  *
1557  * Description:
1558  *   Gets number of WMI EPs configured in target svc map. Since EP map
1559  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
1560  *   configured for WMI service.
1561  *
1562  * Return:
1563  *  uint8_t: count for WMI eps in target svc map
1564  */
1565 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *hif_ctx)
1566 {
1567 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1568 	struct service_to_pipe *svc_map;
1569 	uint32_t map_sz, map_len;
1570 	int    i;
1571 	uint8_t   wmi_ep_count = 0;
1572 
1573 	hif_select_service_to_pipe_map(scn, &svc_map,
1574 				       &map_sz);
1575 	map_len = map_sz / sizeof(struct service_to_pipe);
1576 
1577 	for (i = 0; i < map_len; i++) {
1578 		/* Count number of WMI EPs based on out direction */
1579 		if ((svc_map[i].pipedir == PIPEDIR_OUT) &&
1580 		    ((svc_map[i].service_id == WMI_CONTROL_SVC)  ||
1581 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC1) ||
1582 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC2))) {
1583 			wmi_ep_count++;
1584 		}
1585 	}
1586 
1587 	return wmi_ep_count;
1588 }
1589 
1590 /**
1591  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
1592  * @ce_id: ce in question
1593  * @ring: ring state being examined
1594  * @type: "src_ring" or "dest_ring" string for identifying the ring
1595  *
1596  * Warns on non-zero index values.
1597  * Causes a kernel panic if the ring is not empty during initialization.
1598  */
1599 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
1600 					 char *type)
1601 {
1602 	if (ring->write_index != 0 || ring->sw_index != 0)
1603 		hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d",
1604 			  ce_id, type, ring->sw_index, ring->write_index);
1605 	if (ring->write_index != ring->sw_index)
1606 		QDF_BUG(0);
1607 }
1608 
1609 #ifdef IPA_OFFLOAD
1610 /**
1611  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
1612  * @scn: softc instance
1613  * @CE_id: ce in question
1614  * @base_addr: pointer to copyengine ring base address
1615  * @ce_ring: copyengine instance
1616  * @nentries: number of entries should be allocated
1617  * @desc_size: ce desc size
1618  *
1619  * Return: QDF_STATUS_SUCCESS - for success
1620  */
1621 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1622 				     qdf_dma_addr_t *base_addr,
1623 				     struct CE_ring_state *ce_ring,
1624 				     unsigned int nentries, uint32_t desc_size)
1625 {
1626 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1627 	    !ce_srng_based(scn)) {
1628 		if (!scn->ipa_ce_ring) {
1629 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1630 				scn->qdf_dev,
1631 				nentries * desc_size + CE_DESC_RING_ALIGN);
1632 			if (!scn->ipa_ce_ring) {
1633 				hif_err(
1634 				"Failed to allocate memory for IPA ce ring");
1635 				return QDF_STATUS_E_NOMEM;
1636 			}
1637 		}
1638 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1639 						&scn->ipa_ce_ring->mem_info);
1640 		ce_ring->base_addr_owner_space_unaligned =
1641 						scn->ipa_ce_ring->vaddr;
1642 	} else {
1643 		ce_ring->base_addr_owner_space_unaligned =
1644 			hif_mem_alloc_consistent_unaligned
1645 					(scn,
1646 					 (nentries * desc_size +
1647 					  CE_DESC_RING_ALIGN),
1648 					 base_addr,
1649 					 ce_ring->hal_ring_type,
1650 					 &ce_ring->is_ring_prealloc);
1651 
1652 		if (!ce_ring->base_addr_owner_space_unaligned) {
1653 			hif_err("Failed to allocate DMA memory for ce ring id: %u",
1654 			       CE_id);
1655 			return QDF_STATUS_E_NOMEM;
1656 		}
1657 	}
1658 	return QDF_STATUS_SUCCESS;
1659 }
1660 
1661 /**
1662  * ce_free_desc_ring() - Frees copyengine descriptor ring
1663  * @scn: softc instance
1664  * @CE_id: ce in question
1665  * @ce_ring: copyengine instance
1666  * @desc_size: ce desc size
1667  *
1668  * Return: None
1669  */
1670 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1671 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1672 {
1673 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1674 	    !ce_srng_based(scn)) {
1675 		if (scn->ipa_ce_ring) {
1676 			qdf_mem_shared_mem_free(scn->qdf_dev,
1677 						scn->ipa_ce_ring);
1678 			scn->ipa_ce_ring = NULL;
1679 		}
1680 		ce_ring->base_addr_owner_space_unaligned = NULL;
1681 	} else {
1682 		hif_mem_free_consistent_unaligned
1683 			(scn,
1684 			 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1685 			 ce_ring->base_addr_owner_space_unaligned,
1686 			 ce_ring->base_addr_CE_space, 0,
1687 			 ce_ring->is_ring_prealloc);
1688 		ce_ring->base_addr_owner_space_unaligned = NULL;
1689 	}
1690 }
1691 #else
1692 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1693 				     qdf_dma_addr_t *base_addr,
1694 				     struct CE_ring_state *ce_ring,
1695 				     unsigned int nentries, uint32_t desc_size)
1696 {
1697 	ce_ring->base_addr_owner_space_unaligned =
1698 			hif_mem_alloc_consistent_unaligned
1699 					(scn,
1700 					 (nentries * desc_size +
1701 					  CE_DESC_RING_ALIGN),
1702 					 base_addr,
1703 					 ce_ring->hal_ring_type,
1704 					 &ce_ring->is_ring_prealloc);
1705 
1706 	if (!ce_ring->base_addr_owner_space_unaligned) {
1707 		hif_err("Failed to allocate DMA memory for ce ring id: %u",
1708 		       CE_id);
1709 		return QDF_STATUS_E_NOMEM;
1710 	}
1711 	return QDF_STATUS_SUCCESS;
1712 }
1713 
1714 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1715 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1716 {
1717 	hif_mem_free_consistent_unaligned
1718 		(scn,
1719 		 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1720 		 ce_ring->base_addr_owner_space_unaligned,
1721 		 ce_ring->base_addr_CE_space, 0,
1722 		 ce_ring->is_ring_prealloc);
1723 	ce_ring->base_addr_owner_space_unaligned = NULL;
1724 }
1725 #endif /* IPA_OFFLOAD */
1726 
1727 /*
1728  * TODO: Need to explore the possibility of having this as part of a
1729  * target context instead of a global array.
1730  */
1731 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1732 
1733 void ce_service_register_module(enum ce_target_type target_type,
1734 				struct ce_ops* (*ce_attach)(void))
1735 {
1736 	if (target_type < CE_MAX_TARGET_TYPE)
1737 		ce_attach_register[target_type] = ce_attach;
1738 }
1739 
1740 qdf_export_symbol(ce_service_register_module);
1741 
1742 /**
1743  * ce_srng_based() - Does this target use srng
1744  * @scn: pointer to the state context of the CE
1745  *
1746  * Description:
1747  *   returns true if the target is SRNG based
1748  *
1749  * Return:
1750  *  false (attribute set to false)
1751  *  true  (attribute set to true);
1752  */
1753 bool ce_srng_based(struct hif_softc *scn)
1754 {
1755 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1756 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1757 
1758 	switch (tgt_info->target_type) {
1759 	case TARGET_TYPE_QCA8074:
1760 	case TARGET_TYPE_QCA8074V2:
1761 	case TARGET_TYPE_QCA6290:
1762 	case TARGET_TYPE_QCA6390:
1763 	case TARGET_TYPE_QCA6490:
1764 	case TARGET_TYPE_QCA6750:
1765 	case TARGET_TYPE_QCA6018:
1766 	case TARGET_TYPE_QCN9000:
1767 	case TARGET_TYPE_QCN6122:
1768 	case TARGET_TYPE_QCN9160:
1769 	case TARGET_TYPE_QCA5018:
1770 	case TARGET_TYPE_KIWI:
1771 	case TARGET_TYPE_MANGO:
1772 	case TARGET_TYPE_PEACH:
1773 	case TARGET_TYPE_QCN9224:
1774 	case TARGET_TYPE_QCA9574:
1775 	case TARGET_TYPE_QCA5332:
1776 		return true;
1777 	default:
1778 		return false;
1779 	}
1780 	return false;
1781 }
1782 qdf_export_symbol(ce_srng_based);
1783 
1784 #ifdef QCA_WIFI_SUPPORT_SRNG
1785 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1786 {
1787 	struct ce_ops *ops = NULL;
1788 
1789 	if (ce_srng_based(scn)) {
1790 		if (ce_attach_register[CE_SVC_SRNG])
1791 			ops = ce_attach_register[CE_SVC_SRNG]();
1792 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1793 		ops = ce_attach_register[CE_SVC_LEGACY]();
1794 	}
1795 
1796 	return ops;
1797 }
1798 
1799 
1800 #else	/* QCA_LITHIUM */
1801 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1802 {
1803 	if (ce_attach_register[CE_SVC_LEGACY])
1804 		return ce_attach_register[CE_SVC_LEGACY]();
1805 
1806 	return NULL;
1807 }
1808 #endif /* QCA_LITHIUM */
1809 
1810 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1811 		struct pld_shadow_reg_v2_cfg **shadow_config,
1812 		int *num_shadow_registers_configured) {
1813 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1814 
1815 	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1816 			scn, shadow_config, num_shadow_registers_configured);
1817 
1818 	return;
1819 }
1820 
1821 #ifdef CONFIG_SHADOW_V3
1822 static inline void
1823 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1824 				  struct pld_wlan_enable_cfg *cfg)
1825 {
1826 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1827 
1828 	if (!hif_state->ce_services->ce_prepare_shadow_register_v3_cfg)
1829 		return;
1830 
1831 	hif_state->ce_services->ce_prepare_shadow_register_v3_cfg(
1832 			scn, &cfg->shadow_reg_v3_cfg,
1833 			&cfg->num_shadow_reg_v3_cfg);
1834 }
1835 #else
1836 static inline void
1837 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1838 				  struct pld_wlan_enable_cfg *cfg)
1839 {
1840 }
1841 #endif
1842 
1843 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1844 						uint8_t ring_type)
1845 {
1846 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1847 
1848 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1849 }
1850 
1851 #ifdef QCA_WIFI_SUPPORT_SRNG
1852 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1853 {
1854 	switch (ce_ring_type) {
1855 	case CE_RING_SRC:
1856 		return CE_SRC;
1857 	case CE_RING_DEST:
1858 		return CE_DST;
1859 	case CE_RING_STATUS:
1860 		return CE_DST_STATUS;
1861 	default:
1862 		return -EINVAL;
1863 	}
1864 }
1865 #else
1866 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1867 {
1868 	return 0;
1869 }
1870 #endif
1871 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1872 		uint8_t ring_type, uint32_t nentries)
1873 {
1874 	uint32_t ce_nbytes;
1875 	char *ptr;
1876 	qdf_dma_addr_t base_addr;
1877 	struct CE_ring_state *ce_ring;
1878 	uint32_t desc_size;
1879 	struct hif_softc *scn = CE_state->scn;
1880 
1881 	ce_nbytes = sizeof(struct CE_ring_state)
1882 		+ (nentries * sizeof(void *));
1883 	ptr = qdf_mem_malloc(ce_nbytes);
1884 	if (!ptr)
1885 		return NULL;
1886 
1887 	ce_ring = (struct CE_ring_state *)ptr;
1888 	ptr += sizeof(struct CE_ring_state);
1889 	ce_ring->nentries = nentries;
1890 	ce_ring->nentries_mask = nentries - 1;
1891 
1892 	ce_ring->low_water_mark_nentries = 0;
1893 	ce_ring->high_water_mark_nentries = nentries;
1894 	ce_ring->per_transfer_context = (void **)ptr;
1895 	ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type);
1896 
1897 	desc_size = ce_get_desc_size(scn, ring_type);
1898 
1899 	/* Legacy platforms that do not support cache
1900 	 * coherent DMA are unsupported
1901 	 */
1902 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1903 			       ce_ring, nentries,
1904 			       desc_size) !=
1905 	    QDF_STATUS_SUCCESS) {
1906 		hif_err("ring has no DMA mem");
1907 		qdf_mem_free(ce_ring);
1908 		return NULL;
1909 	}
1910 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1911 
1912 	/* Correctly initialize memory to 0 to
1913 	 * prevent garbage data crashing system
1914 	 * when download firmware
1915 	 */
1916 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1917 			nentries * desc_size +
1918 			CE_DESC_RING_ALIGN);
1919 
1920 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1921 
1922 		ce_ring->base_addr_CE_space =
1923 			(ce_ring->base_addr_CE_space_unaligned +
1924 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1925 
1926 		ce_ring->base_addr_owner_space = (void *)
1927 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1928 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1929 	} else {
1930 		ce_ring->base_addr_CE_space =
1931 				ce_ring->base_addr_CE_space_unaligned;
1932 		ce_ring->base_addr_owner_space =
1933 				ce_ring->base_addr_owner_space_unaligned;
1934 	}
1935 
1936 	return ce_ring;
1937 }
1938 
1939 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1940 			uint32_t ce_id, struct CE_ring_state *ring,
1941 			struct CE_attr *attr)
1942 {
1943 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1944 
1945 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1946 					      ring, attr);
1947 }
1948 
1949 static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state,
1950 			    uint8_t ring_type)
1951 {
1952 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1953 
1954 	if (hif_state->ce_services->ce_srng_cleanup)
1955 		hif_state->ce_services->ce_srng_cleanup(scn,
1956 					CE_state, ring_type);
1957 }
1958 
1959 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1960 {
1961 	uint8_t ul_pipe, dl_pipe;
1962 	int ce_id, status, ul_is_polled, dl_is_polled;
1963 	struct CE_state *ce_state;
1964 
1965 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1966 					 &ul_pipe, &dl_pipe,
1967 					 &ul_is_polled, &dl_is_polled);
1968 	if (status) {
1969 		hif_err("pipe_mapping failure");
1970 		return status;
1971 	}
1972 
1973 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1974 		if (ce_id == ul_pipe)
1975 			continue;
1976 		if (ce_id == dl_pipe)
1977 			continue;
1978 
1979 		ce_state = scn->ce_id_to_state[ce_id];
1980 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1981 		if (ce_state->state == CE_RUNNING)
1982 			ce_state->state = CE_PAUSED;
1983 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1984 	}
1985 
1986 	return status;
1987 }
1988 
1989 int hif_ce_bus_late_resume(struct hif_softc *scn)
1990 {
1991 	int ce_id;
1992 	struct CE_state *ce_state;
1993 	int write_index = 0;
1994 	bool index_updated;
1995 
1996 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1997 		ce_state = scn->ce_id_to_state[ce_id];
1998 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1999 		if (ce_state->state == CE_PENDING) {
2000 			write_index = ce_state->src_ring->write_index;
2001 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2002 					write_index);
2003 			ce_state->state = CE_RUNNING;
2004 			index_updated = true;
2005 		} else {
2006 			index_updated = false;
2007 		}
2008 
2009 		if (ce_state->state == CE_PAUSED)
2010 			ce_state->state = CE_RUNNING;
2011 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2012 
2013 		if (index_updated)
2014 			hif_record_ce_desc_event(scn, ce_id,
2015 				RESUME_WRITE_INDEX_UPDATE,
2016 				NULL, NULL, write_index, 0);
2017 	}
2018 
2019 	return 0;
2020 }
2021 
2022 /**
2023  * ce_oom_recovery() - try to recover rx ce from oom condition
2024  * @context: CE_state of the CE with oom rx ring
2025  *
2026  * the executing work Will continue to be rescheduled until
2027  * at least 1 descriptor is successfully posted to the rx ring.
2028  *
2029  * return: none
2030  */
2031 static void ce_oom_recovery(void *context)
2032 {
2033 	struct CE_state *ce_state = context;
2034 	struct hif_softc *scn = ce_state->scn;
2035 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
2036 	struct HIF_CE_pipe_info *pipe_info =
2037 		&ce_softc->pipe_info[ce_state->id];
2038 
2039 	hif_post_recv_buffers_for_pipe(pipe_info);
2040 }
2041 
2042 #ifdef HIF_CE_DEBUG_DATA_BUF
2043 /**
2044  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
2045  * the CE descriptors.
2046  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
2047  * @scn: hif scn handle
2048  * @ce_id: Copy Engine Id
2049  *
2050  * Return: QDF_STATUS
2051  */
2052 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
2053 {
2054 	struct hif_ce_desc_event *event = NULL;
2055 	struct hif_ce_desc_event *hist_ev = NULL;
2056 	uint32_t index = 0;
2057 
2058 	hist_ev =
2059 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2060 
2061 	if (!hist_ev)
2062 		return QDF_STATUS_E_NOMEM;
2063 
2064 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
2065 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2066 		event = &hist_ev[index];
2067 		event->data =
2068 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
2069 		if (!event->data) {
2070 			hif_err_rl("ce debug data alloc failed");
2071 			scn->hif_ce_desc_hist.data_enable[ce_id] = false;
2072 			return QDF_STATUS_E_NOMEM;
2073 		}
2074 	}
2075 	return QDF_STATUS_SUCCESS;
2076 }
2077 
2078 /**
2079  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
2080  * the CE descriptors.
2081  * @scn: hif scn handle
2082  * @ce_id: Copy Engine Id
2083  *
2084  * Return:
2085  */
2086 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
2087 {
2088 	struct hif_ce_desc_event *event = NULL;
2089 	struct hif_ce_desc_event *hist_ev = NULL;
2090 	uint32_t index = 0;
2091 
2092 	hist_ev =
2093 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2094 
2095 	if (!hist_ev)
2096 		return;
2097 
2098 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2099 		event = &hist_ev[index];
2100 		if (event->data)
2101 			qdf_mem_free(event->data);
2102 		event->data = NULL;
2103 		event = NULL;
2104 	}
2105 
2106 }
2107 #endif /* HIF_CE_DEBUG_DATA_BUF */
2108 
2109 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
2110 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2111 
2112 /* define below variables for crashscope parse */
2113 struct hif_ce_desc_event *hif_ce_desc_history[CE_COUNT_MAX];
2114 uint32_t hif_ce_history_max = HIF_CE_HISTORY_MAX;
2115 
2116 /*
2117  * for debug build, it will enable ce history for all ce, but for
2118  * perf build(if CONFIG_SLUB_DEBUG_ON is N), it only enable for
2119  * ce2(wmi event) & ce3(wmi cmd) history.
2120  */
2121 #if defined(CONFIG_SLUB_DEBUG_ON)
2122 #define CE_DESC_HISTORY_BUFF_CNT  CE_COUNT_MAX
2123 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE  0
2124 #else
2125 /* CE2, CE3, CE7 */
2126 #define CE_DESC_HISTORY_BUFF_CNT  3
2127 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(2) | BIT(3) | BIT(7))
2128 #endif
2129 struct hif_ce_desc_event
2130 	hif_ce_desc_history_buff[CE_DESC_HISTORY_BUFF_CNT][HIF_CE_HISTORY_MAX];
2131 
2132 static struct hif_ce_desc_event *
2133 	hif_ce_debug_history_buf_get(struct hif_softc *scn, unsigned int ce_id)
2134 {
2135 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2136 
2137 	hif_debug("get ce debug buffer ce_id %u, only_ce2/ce3=0x%x, idx=%u",
2138 		  ce_id, IS_CE_DEBUG_ONLY_FOR_CRIT_CE,
2139 		  ce_hist->ce_id_hist_map[ce_id]);
2140 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2141 	    (ce_id == CE_ID_2 || ce_id == CE_ID_3 || ce_id == CE_ID_7)) {
2142 		uint8_t idx = ce_hist->ce_id_hist_map[ce_id];
2143 
2144 		hif_ce_desc_history[ce_id] = hif_ce_desc_history_buff[idx];
2145 	} else {
2146 		hif_ce_desc_history[ce_id] =
2147 			hif_ce_desc_history_buff[ce_id];
2148 	}
2149 
2150 	return hif_ce_desc_history[ce_id];
2151 }
2152 
2153 /**
2154  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
2155  * @scn: hif scn handle
2156  * @ce_id: Copy Engine Id
2157  * @src_nentries: source ce ring entries
2158  * Return: QDF_STATUS
2159  */
2160 static QDF_STATUS
2161 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
2162 			   uint32_t src_nentries)
2163 {
2164 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2165 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2166 
2167 	/* For perf build, return directly for non ce2/ce3 */
2168 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2169 	    ce_id != CE_ID_2 &&
2170 	    ce_id != CE_ID_3 &&
2171 	    ce_id != CE_ID_7) {
2172 		ce_hist->enable[ce_id] = false;
2173 		ce_hist->data_enable[ce_id] = false;
2174 		return QDF_STATUS_SUCCESS;
2175 	}
2176 
2177 	ce_hist->hist_ev[ce_id] = hif_ce_debug_history_buf_get(scn, ce_id);
2178 	ce_hist->enable[ce_id] = true;
2179 
2180 	if (src_nentries) {
2181 		status = alloc_mem_ce_debug_hist_data(scn, ce_id);
2182 		if (status != QDF_STATUS_SUCCESS) {
2183 			ce_hist->enable[ce_id] = false;
2184 			ce_hist->hist_ev[ce_id] = NULL;
2185 			return status;
2186 		}
2187 	} else {
2188 		ce_hist->data_enable[ce_id] = false;
2189 	}
2190 
2191 	return QDF_STATUS_SUCCESS;
2192 }
2193 
2194 /**
2195  * free_mem_ce_debug_history() - Free CE descriptor history
2196  * @scn: hif scn handle
2197  * @ce_id: Copy Engine Id
2198  *
2199  * Return: None
2200  */
2201 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
2202 {
2203 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2204 
2205 	if (!ce_hist->enable[ce_id])
2206 		return;
2207 
2208 	ce_hist->enable[ce_id] = false;
2209 	if (ce_hist->data_enable[ce_id]) {
2210 		ce_hist->data_enable[ce_id] = false;
2211 		free_mem_ce_debug_hist_data(scn, ce_id);
2212 	}
2213 	ce_hist->hist_ev[ce_id] = NULL;
2214 }
2215 #else
2216 static inline QDF_STATUS
2217 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2218 			   uint32_t src_nentries)
2219 {
2220 	return QDF_STATUS_SUCCESS;
2221 }
2222 
2223 static inline void
2224 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2225 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
2226 #else
2227 #if defined(HIF_CE_DEBUG_DATA_BUF)
2228 
2229 static QDF_STATUS
2230 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2231 			   uint32_t src_nentries)
2232 {
2233 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
2234 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
2235 
2236 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
2237 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
2238 		return QDF_STATUS_E_NOMEM;
2239 	} else {
2240 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
2241 		return QDF_STATUS_SUCCESS;
2242 	}
2243 }
2244 
2245 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
2246 {
2247 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2248 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
2249 
2250 	if (!hist_ev)
2251 		return;
2252 
2253 	if (ce_hist->data_enable[CE_id]) {
2254 		ce_hist->data_enable[CE_id] = false;
2255 		free_mem_ce_debug_hist_data(scn, CE_id);
2256 	}
2257 
2258 	ce_hist->enable[CE_id] = false;
2259 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
2260 	ce_hist->hist_ev[CE_id] = NULL;
2261 }
2262 
2263 #else
2264 
2265 static inline QDF_STATUS
2266 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2267 			   uint32_t src_nentries)
2268 {
2269 	return QDF_STATUS_SUCCESS;
2270 }
2271 
2272 static inline void
2273 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2274 #endif /* HIF_CE_DEBUG_DATA_BUF */
2275 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
2276 
2277 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2278 /**
2279  * reset_ce_debug_history() - reset the index and ce id used for dumping the
2280  * CE records on the console using sysfs.
2281  * @scn: hif scn handle
2282  *
2283  * Return:
2284  */
2285 static inline void reset_ce_debug_history(struct hif_softc *scn)
2286 {
2287 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2288 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2289 	 * index. Disable data storing
2290 	 */
2291 	ce_hist->hist_index = 0;
2292 	ce_hist->hist_id = 0;
2293 }
2294 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2295 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
2296 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2297 
2298 void ce_enable_polling(void *cestate)
2299 {
2300 	struct CE_state *CE_state = (struct CE_state *)cestate;
2301 
2302 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2303 		CE_state->timer_inited = true;
2304 }
2305 
2306 void ce_disable_polling(void *cestate)
2307 {
2308 	struct CE_state *CE_state = (struct CE_state *)cestate;
2309 
2310 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2311 		CE_state->timer_inited = false;
2312 }
2313 
2314 /*
2315  * Initialize a Copy Engine based on caller-supplied attributes.
2316  * This may be called once to initialize both source and destination
2317  * rings or it may be called twice for separate source and destination
2318  * initialization. It may be that only one side or the other is
2319  * initialized by software/firmware.
2320  *
2321  * This should be called during the initialization sequence before
2322  * interrupts are enabled, so we don't have to worry about thread safety.
2323  */
2324 struct CE_handle *ce_init(struct hif_softc *scn,
2325 			  unsigned int CE_id, struct CE_attr *attr)
2326 {
2327 	struct CE_state *CE_state;
2328 	uint32_t ctrl_addr;
2329 	unsigned int nentries;
2330 	bool malloc_CE_state = false;
2331 	bool malloc_src_ring = false;
2332 	int status;
2333 	QDF_STATUS mem_status = QDF_STATUS_SUCCESS;
2334 
2335 	QDF_ASSERT(CE_id < scn->ce_count);
2336 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
2337 	CE_state = scn->ce_id_to_state[CE_id];
2338 
2339 	if (!CE_state) {
2340 		CE_state =
2341 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
2342 		if (!CE_state)
2343 			return NULL;
2344 
2345 		malloc_CE_state = true;
2346 		qdf_spinlock_create(&CE_state->ce_index_lock);
2347 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
2348 		qdf_spinlock_create(&CE_state->ce_interrupt_lock);
2349 #endif
2350 
2351 		CE_state->id = CE_id;
2352 		CE_state->ctrl_addr = ctrl_addr;
2353 		CE_state->state = CE_RUNNING;
2354 		CE_state->attr_flags = attr->flags;
2355 	}
2356 	CE_state->scn = scn;
2357 	CE_state->service = ce_engine_service_reg;
2358 
2359 	qdf_atomic_init(&CE_state->rx_pending);
2360 	if (!attr) {
2361 		/* Already initialized; caller wants the handle */
2362 		return (struct CE_handle *)CE_state;
2363 	}
2364 
2365 	if (CE_state->src_sz_max)
2366 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
2367 	else
2368 		CE_state->src_sz_max = attr->src_sz_max;
2369 
2370 	ce_init_ce_desc_event_log(scn, CE_id,
2371 				  attr->src_nentries + attr->dest_nentries);
2372 
2373 	/* source ring setup */
2374 	nentries = attr->src_nentries;
2375 	if (nentries) {
2376 		struct CE_ring_state *src_ring;
2377 
2378 		nentries = roundup_pwr2(nentries);
2379 		if (CE_state->src_ring) {
2380 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
2381 		} else {
2382 			src_ring = CE_state->src_ring =
2383 				ce_alloc_ring_state(CE_state,
2384 						CE_RING_SRC,
2385 						nentries);
2386 			if (!src_ring) {
2387 				/* cannot allocate src ring. If the
2388 				 * CE_state is allocated locally free
2389 				 * CE_State and return error.
2390 				 */
2391 				hif_err("src ring has no mem");
2392 				if (malloc_CE_state) {
2393 					/* allocated CE_state locally */
2394 					qdf_mem_free(CE_state);
2395 					malloc_CE_state = false;
2396 				}
2397 				return NULL;
2398 			}
2399 			/* we can allocate src ring. Mark that the src ring is
2400 			 * allocated locally
2401 			 */
2402 			malloc_src_ring = true;
2403 
2404 			/*
2405 			 * Also allocate a shadow src ring in
2406 			 * regular mem to use for faster access.
2407 			 */
2408 			src_ring->shadow_base_unaligned =
2409 				qdf_mem_malloc(nentries *
2410 					       sizeof(struct CE_src_desc) +
2411 					       CE_DESC_RING_ALIGN);
2412 			if (!src_ring->shadow_base_unaligned)
2413 				goto error_no_dma_mem;
2414 
2415 			src_ring->shadow_base = (struct CE_src_desc *)
2416 				(((size_t) src_ring->shadow_base_unaligned +
2417 				CE_DESC_RING_ALIGN - 1) &
2418 				 ~(CE_DESC_RING_ALIGN - 1));
2419 
2420 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
2421 					       src_ring, attr);
2422 			if (status < 0)
2423 				goto error_target_access;
2424 			ce_ring_test_initial_indexes(CE_id, src_ring,
2425 						     "src_ring");
2426 		}
2427 	}
2428 
2429 	/* destination ring setup */
2430 	nentries = attr->dest_nentries;
2431 	if (nentries) {
2432 		struct CE_ring_state *dest_ring;
2433 
2434 		nentries = roundup_pwr2(nentries);
2435 		if (CE_state->dest_ring) {
2436 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
2437 		} else {
2438 			dest_ring = CE_state->dest_ring =
2439 				ce_alloc_ring_state(CE_state,
2440 						CE_RING_DEST,
2441 						nentries);
2442 			if (!dest_ring) {
2443 				/* cannot allocate dst ring. If the CE_state
2444 				 * or src ring is allocated locally free
2445 				 * CE_State and src ring and return error.
2446 				 */
2447 				hif_err("dest ring has no mem");
2448 				goto error_no_dma_mem;
2449 			}
2450 
2451 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
2452 				      dest_ring, attr);
2453 			if (status < 0)
2454 				goto error_target_access;
2455 
2456 			ce_ring_test_initial_indexes(CE_id, dest_ring,
2457 						     "dest_ring");
2458 
2459 			/* For srng based target, init status ring here */
2460 			if (ce_srng_based(CE_state->scn)) {
2461 				CE_state->status_ring =
2462 					ce_alloc_ring_state(CE_state,
2463 							CE_RING_STATUS,
2464 							nentries);
2465 				if (!CE_state->status_ring) {
2466 					/*Allocation failed. Cleanup*/
2467 					qdf_mem_free(CE_state->dest_ring);
2468 					if (malloc_src_ring) {
2469 						qdf_mem_free
2470 							(CE_state->src_ring);
2471 						CE_state->src_ring = NULL;
2472 						malloc_src_ring = false;
2473 					}
2474 					if (malloc_CE_state) {
2475 						/* allocated CE_state locally */
2476 						scn->ce_id_to_state[CE_id] =
2477 							NULL;
2478 						qdf_mem_free(CE_state);
2479 						malloc_CE_state = false;
2480 					}
2481 
2482 					return NULL;
2483 				}
2484 
2485 				status = ce_ring_setup(scn, CE_RING_STATUS,
2486 					       CE_id, CE_state->status_ring,
2487 					       attr);
2488 				if (status < 0)
2489 					goto error_target_access;
2490 
2491 			}
2492 
2493 			/* epping */
2494 			/* poll timer */
2495 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2496 				qdf_timer_init(scn->qdf_dev,
2497 						&CE_state->poll_timer,
2498 						ce_poll_timeout,
2499 						CE_state,
2500 						QDF_TIMER_TYPE_WAKE_APPS);
2501 				ce_enable_polling(CE_state);
2502 				qdf_timer_mod(&CE_state->poll_timer,
2503 						      CE_POLL_TIMEOUT);
2504 			}
2505 		}
2506 	}
2507 
2508 	if (!ce_srng_based(scn)) {
2509 		/* Enable CE error interrupts */
2510 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2511 			goto error_target_access;
2512 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
2513 		if (Q_TARGET_ACCESS_END(scn) < 0)
2514 			goto error_target_access;
2515 	}
2516 
2517 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
2518 			ce_oom_recovery, CE_state);
2519 
2520 	/* update the htt_data attribute */
2521 	ce_mark_datapath(CE_state);
2522 	scn->ce_id_to_state[CE_id] = CE_state;
2523 
2524 	mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
2525 	if (mem_status != QDF_STATUS_SUCCESS)
2526 		goto error_target_access;
2527 
2528 	return (struct CE_handle *)CE_state;
2529 
2530 error_target_access:
2531 error_no_dma_mem:
2532 	ce_fini((struct CE_handle *)CE_state);
2533 	return NULL;
2534 }
2535 
2536 /**
2537  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
2538  * @hif_ctx: HIF Context
2539  *
2540  * API to check if polling is enabled on all CEs. Returns true when polling
2541  * is enabled on all CEs.
2542  *
2543  * Return: bool
2544  */
2545 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
2546 {
2547 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2548 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2549 	struct CE_attr *attr;
2550 	int id;
2551 
2552 	for (id = 0; id < scn->ce_count; id++) {
2553 		attr = &hif_state->host_ce_config[id];
2554 		if (attr && (attr->dest_nentries) &&
2555 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
2556 			return false;
2557 	}
2558 	return true;
2559 }
2560 qdf_export_symbol(hif_is_polled_mode_enabled);
2561 
2562 static int hif_get_pktlog_ce_num(struct hif_softc *scn)
2563 {
2564 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2565 	int id;
2566 
2567 	for (id = 0; id < hif_state->sz_tgt_svc_map; id++) {
2568 		if (hif_state->tgt_svc_map[id].service_id ==  PACKET_LOG_SVC)
2569 			return hif_state->tgt_svc_map[id].pipenum;
2570 	}
2571 	return -EINVAL;
2572 }
2573 
2574 #ifdef WLAN_FEATURE_FASTPATH
2575 /**
2576  * hif_enable_fastpath() - Update that we have enabled fastpath mode
2577  * @hif_ctx: HIF context
2578  *
2579  * For use in data path
2580  *
2581  * Return: void
2582  */
2583 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
2584 {
2585 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2586 
2587 	if (ce_srng_based(scn)) {
2588 		hif_warn("srng rings do not support fastpath");
2589 		return;
2590 	}
2591 	hif_debug("Enabling fastpath mode");
2592 	scn->fastpath_mode_on = true;
2593 }
2594 
2595 /**
2596  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
2597  * @hif_ctx: HIF Context
2598  *
2599  * For use in data path to skip HTC
2600  *
2601  * Return: bool
2602  */
2603 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
2604 {
2605 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2606 
2607 	return scn->fastpath_mode_on;
2608 }
2609 
2610 /**
2611  * hif_get_ce_handle - API to get CE handle for FastPath mode
2612  * @hif_ctx: HIF Context
2613  * @id: CopyEngine Id
2614  *
2615  * API to return CE handle for fastpath mode
2616  *
2617  * Return: void
2618  */
2619 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
2620 {
2621 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2622 
2623 	return scn->ce_id_to_state[id];
2624 }
2625 qdf_export_symbol(hif_get_ce_handle);
2626 
2627 /**
2628  * ce_h2t_tx_ce_cleanup() - Place holder function for H2T CE cleanup.
2629  * No processing is required inside this function.
2630  * @ce_hdl: Cope engine handle
2631  * Using an assert, this function makes sure that,
2632  * the TX CE has been processed completely.
2633  *
2634  * This is called while dismantling CE structures. No other thread
2635  * should be using these structures while dismantling is occurring
2636  * therefore no locking is needed.
2637  *
2638  * Return: none
2639  */
2640 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
2641 {
2642 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2643 	struct CE_ring_state *src_ring = ce_state->src_ring;
2644 	struct hif_softc *sc = ce_state->scn;
2645 	uint32_t sw_index, write_index;
2646 
2647 	if (hif_is_nss_wifi_enabled(sc))
2648 		return;
2649 
2650 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
2651 		hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE");
2652 		sw_index = src_ring->sw_index;
2653 		write_index = src_ring->sw_index;
2654 
2655 		/* At this point Tx CE should be clean */
2656 		qdf_assert_always(sw_index == write_index);
2657 	}
2658 }
2659 
2660 /**
2661  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
2662  * @ce_hdl: Handle to CE
2663  *
2664  * These buffers are never allocated on the fly, but
2665  * are allocated only once during HIF start and freed
2666  * only once during HIF stop.
2667  * NOTE:
2668  * The assumption here is there is no in-flight DMA in progress
2669  * currently, so that buffers can be freed up safely.
2670  *
2671  * Return: NONE
2672  */
2673 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
2674 {
2675 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2676 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
2677 	qdf_nbuf_t nbuf;
2678 	int i;
2679 
2680 	if (ce_state->scn->fastpath_mode_on == false)
2681 		return;
2682 
2683 	if (!ce_state->htt_rx_data)
2684 		return;
2685 
2686 	/*
2687 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
2688 	 * this CE is completely full: does not leave one blank space, to
2689 	 * distinguish between empty queue & full queue. So free all the
2690 	 * entries.
2691 	 */
2692 	for (i = 0; i < dst_ring->nentries; i++) {
2693 		nbuf = dst_ring->per_transfer_context[i];
2694 
2695 		/*
2696 		 * The reasons for doing this check are:
2697 		 * 1) Protect against calling cleanup before allocating buffers
2698 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
2699 		 *    could have a partially filled ring, because of a memory
2700 		 *    allocation failure in the middle of allocating ring.
2701 		 *    This check accounts for that case, checking
2702 		 *    fastpath_mode_on flag or started flag would not have
2703 		 *    covered that case. This is not in performance path,
2704 		 *    so OK to do this.
2705 		 */
2706 		if (nbuf) {
2707 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
2708 					      QDF_DMA_FROM_DEVICE);
2709 			qdf_nbuf_free(nbuf);
2710 		}
2711 	}
2712 }
2713 
2714 /**
2715  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
2716  * @scn: HIF handle
2717  *
2718  * Datapath Rx CEs are special case, where we reuse all the message buffers.
2719  * Hence we have to post all the entries in the pipe, even, in the beginning
2720  * unlike for other CE pipes where one less than dest_nentries are filled in
2721  * the beginning.
2722  *
2723  * Return: None
2724  */
2725 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2726 {
2727 	int pipe_num;
2728 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2729 
2730 	if (scn->fastpath_mode_on == false)
2731 		return;
2732 
2733 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2734 		struct HIF_CE_pipe_info *pipe_info =
2735 			&hif_state->pipe_info[pipe_num];
2736 		struct CE_state *ce_state =
2737 			scn->ce_id_to_state[pipe_info->pipe_num];
2738 
2739 		if (ce_state->htt_rx_data)
2740 			atomic_inc(&pipe_info->recv_bufs_needed);
2741 	}
2742 }
2743 #else
2744 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2745 {
2746 }
2747 
2748 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
2749 {
2750 	return false;
2751 }
2752 #endif /* WLAN_FEATURE_FASTPATH */
2753 
2754 void ce_fini(struct CE_handle *copyeng)
2755 {
2756 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2757 	unsigned int CE_id = CE_state->id;
2758 	struct hif_softc *scn = CE_state->scn;
2759 	uint32_t desc_size;
2760 
2761 	bool inited = CE_state->timer_inited;
2762 	CE_state->state = CE_UNUSED;
2763 	scn->ce_id_to_state[CE_id] = NULL;
2764 	/* Set the flag to false first to stop processing in ce_poll_timeout */
2765 	ce_disable_polling(CE_state);
2766 
2767 	qdf_lro_deinit(CE_state->lro_data);
2768 
2769 	if (CE_state->src_ring) {
2770 		/* Cleanup the datapath Tx ring */
2771 		ce_h2t_tx_ce_cleanup(copyeng);
2772 
2773 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2774 		if (CE_state->src_ring->shadow_base_unaligned)
2775 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2776 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2777 			ce_free_desc_ring(scn, CE_state->id,
2778 					  CE_state->src_ring,
2779 					  desc_size);
2780 		ce_srng_cleanup(scn, CE_state, CE_RING_SRC);
2781 		qdf_mem_free(CE_state->src_ring);
2782 	}
2783 	if (CE_state->dest_ring) {
2784 		/* Cleanup the datapath Rx ring */
2785 		ce_t2h_msg_ce_cleanup(copyeng);
2786 
2787 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
2788 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
2789 			ce_free_desc_ring(scn, CE_state->id,
2790 					  CE_state->dest_ring,
2791 					  desc_size);
2792 		ce_srng_cleanup(scn, CE_state, CE_RING_DEST);
2793 		qdf_mem_free(CE_state->dest_ring);
2794 
2795 		/* epping */
2796 		if (inited) {
2797 			qdf_timer_free(&CE_state->poll_timer);
2798 		}
2799 	}
2800 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
2801 		/* Cleanup the datapath Tx ring */
2802 		ce_h2t_tx_ce_cleanup(copyeng);
2803 
2804 		if (CE_state->status_ring->shadow_base_unaligned)
2805 			qdf_mem_free(
2806 				CE_state->status_ring->shadow_base_unaligned);
2807 
2808 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
2809 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
2810 			ce_free_desc_ring(scn, CE_state->id,
2811 					  CE_state->status_ring,
2812 					  desc_size);
2813 		ce_srng_cleanup(scn, CE_state, CE_RING_STATUS);
2814 		qdf_mem_free(CE_state->status_ring);
2815 	}
2816 
2817 	free_mem_ce_debug_history(scn, CE_id);
2818 	reset_ce_debug_history(scn);
2819 	ce_deinit_ce_desc_event_log(scn, CE_id);
2820 
2821 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2822 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
2823 	qdf_spinlock_destroy(&CE_state->ce_interrupt_lock);
2824 #endif
2825 	qdf_mem_free(CE_state);
2826 }
2827 
2828 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2829 {
2830 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2831 
2832 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2833 		  sizeof(hif_state->msg_callbacks_pending));
2834 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2835 		  sizeof(hif_state->msg_callbacks_current));
2836 }
2837 
2838 /* Send the first nbytes bytes of the buffer */
2839 QDF_STATUS
2840 hif_send_head(struct hif_opaque_softc *hif_ctx,
2841 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2842 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2843 {
2844 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2845 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2846 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2847 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2848 	int bytes = nbytes, nfrags = 0;
2849 	struct ce_sendlist sendlist;
2850 	int i = 0;
2851 	QDF_STATUS status;
2852 	unsigned int mux_id = 0;
2853 
2854 	if (nbytes > qdf_nbuf_len(nbuf)) {
2855 		hif_err("nbytes: %d nbuf_len: %d", nbytes,
2856 		       (uint32_t)qdf_nbuf_len(nbuf));
2857 		QDF_ASSERT(0);
2858 	}
2859 
2860 	transfer_id =
2861 		(mux_id & MUX_ID_MASK) |
2862 		(transfer_id & TRANSACTION_ID_MASK);
2863 	data_attr &= DESC_DATA_FLAG_MASK;
2864 	/*
2865 	 * The common case involves sending multiple fragments within a
2866 	 * single download (the tx descriptor and the tx frame header).
2867 	 * So, optimize for the case of multiple fragments by not even
2868 	 * checking whether it's necessary to use a sendlist.
2869 	 * The overhead of using a sendlist for a single buffer download
2870 	 * is not a big deal, since it happens rarely (for WMI messages).
2871 	 */
2872 	ce_sendlist_init(&sendlist);
2873 	do {
2874 		qdf_dma_addr_t frag_paddr;
2875 		int frag_bytes;
2876 
2877 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2878 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2879 		/*
2880 		 * Clear the packet offset for all but the first CE desc.
2881 		 */
2882 		if (i++ > 0)
2883 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2884 
2885 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2886 				    frag_bytes >
2887 				    bytes ? bytes : frag_bytes,
2888 				    qdf_nbuf_get_frag_is_wordstream
2889 				    (nbuf,
2890 				    nfrags) ? 0 :
2891 				    CE_SEND_FLAG_SWAP_DISABLE,
2892 				    data_attr);
2893 		if (status != QDF_STATUS_SUCCESS) {
2894 			hif_err("frag_num: %d larger than limit (status=%d)",
2895 			       nfrags, status);
2896 			return status;
2897 		}
2898 		bytes -= frag_bytes;
2899 		nfrags++;
2900 	} while (bytes > 0);
2901 
2902 	/* Make sure we have resources to handle this request */
2903 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2904 	if (pipe_info->num_sends_allowed < nfrags) {
2905 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2906 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2907 		return QDF_STATUS_E_RESOURCES;
2908 	}
2909 	pipe_info->num_sends_allowed -= nfrags;
2910 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2911 
2912 	if (qdf_unlikely(!ce_hdl)) {
2913 		hif_err("CE handle is null");
2914 		return A_ERROR;
2915 	}
2916 
2917 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2918 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2919 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2920 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2921 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2922 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2923 
2924 	return status;
2925 }
2926 
2927 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2928 								int force)
2929 {
2930 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2931 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2932 
2933 	if (!force) {
2934 		int resources;
2935 		/*
2936 		 * Decide whether to actually poll for completions, or just
2937 		 * wait for a later chance. If there seem to be plenty of
2938 		 * resources left, then just wait, since checking involves
2939 		 * reading a CE register, which is a relatively expensive
2940 		 * operation.
2941 		 */
2942 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2943 		/*
2944 		 * If at least 50% of the total resources are still available,
2945 		 * don't bother checking again yet.
2946 		 */
2947 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2948 									 1))
2949 			return;
2950 	}
2951 #ifdef ATH_11AC_TXCOMPACT
2952 	ce_per_engine_servicereap(scn, pipe);
2953 #else
2954 	ce_per_engine_service(scn, pipe);
2955 #endif
2956 }
2957 
2958 #if defined(CE_TASKLET_SCHEDULE_ON_FULL) && defined(CE_TASKLET_DEBUG_ENABLE)
2959 #define CE_RING_FULL_THRESHOLD_TIME 3000000
2960 #define CE_RING_FULL_THRESHOLD 1024
2961 /* This function is called from htc_send path. If there is no resourse to send
2962  * packet via HTC, then check if interrupts are not processed from that
2963  * CE for last 3 seconds. If so, schedule a tasklet to reap available entries.
2964  * Also if Queue has reached 1024 entries within 3 seconds, then also schedule
2965  * tasklet.
2966  */
2967 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2968 {
2969 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2970 	int64_t diff_time = qdf_get_log_timestamp_usecs() -
2971 			hif_state->stats.tasklet_sched_entry_ts[pipe];
2972 
2973 	hif_state->stats.ce_ring_full_count[pipe]++;
2974 
2975 	if (diff_time >= CE_RING_FULL_THRESHOLD_TIME ||
2976 	    hif_state->stats.ce_ring_full_count[pipe] >=
2977 	    CE_RING_FULL_THRESHOLD) {
2978 		hif_state->stats.ce_ring_full_count[pipe] = 0;
2979 		hif_state->stats.ce_manual_tasklet_schedule_count[pipe]++;
2980 		hif_state->stats.ce_last_manual_tasklet_schedule_ts[pipe] =
2981 			qdf_get_log_timestamp_usecs();
2982 		ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]);
2983 	}
2984 }
2985 #else
2986 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2987 {
2988 }
2989 #endif
2990 
2991 uint16_t
2992 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2993 {
2994 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2995 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2996 	uint16_t rv;
2997 
2998 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2999 	rv = pipe_info->num_sends_allowed;
3000 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3001 	return rv;
3002 }
3003 
3004 /* Called by lower (CE) layer when a send to Target completes. */
3005 static void
3006 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
3007 		     void *transfer_context, qdf_dma_addr_t CE_data,
3008 		     unsigned int nbytes, unsigned int transfer_id,
3009 		     unsigned int sw_index, unsigned int hw_index,
3010 		     unsigned int toeplitz_hash_result)
3011 {
3012 	struct HIF_CE_pipe_info *pipe_info =
3013 		(struct HIF_CE_pipe_info *)ce_context;
3014 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
3015 	struct hif_msg_callbacks *msg_callbacks =
3016 		&pipe_info->pipe_callbacks;
3017 
3018 	do {
3019 		/*
3020 		 * The upper layer callback will be triggered
3021 		 * when last fragment is complteted.
3022 		 */
3023 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
3024 			msg_callbacks->txCompletionHandler(
3025 				msg_callbacks->Context,
3026 				transfer_context, transfer_id,
3027 				toeplitz_hash_result);
3028 
3029 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
3030 		pipe_info->num_sends_allowed++;
3031 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3032 	} while (ce_completed_send_next(copyeng,
3033 			&ce_context, &transfer_context,
3034 			&CE_data, &nbytes, &transfer_id,
3035 			&sw_idx, &hw_idx,
3036 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
3037 }
3038 
3039 /**
3040  * hif_ce_do_recv(): send message from copy engine to upper layers
3041  * @msg_callbacks: structure containing callback and callback context
3042  * @netbuf: skb containing message
3043  * @nbytes: number of bytes in the message
3044  * @pipe_info: used for the pipe_number info
3045  *
3046  * Checks the packet length, configures the length in the netbuff,
3047  * and calls the upper layer callback.
3048  *
3049  * return: None
3050  */
3051 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
3052 		qdf_nbuf_t netbuf, int nbytes,
3053 		struct HIF_CE_pipe_info *pipe_info) {
3054 	if (nbytes <= pipe_info->buf_sz) {
3055 		qdf_nbuf_set_pktlen(netbuf, nbytes);
3056 		msg_callbacks->
3057 			rxCompletionHandler(msg_callbacks->Context,
3058 					netbuf, pipe_info->pipe_num);
3059 	} else {
3060 		hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
3061 		qdf_nbuf_free(netbuf);
3062 	}
3063 }
3064 
3065 /* Called by lower (CE) layer when data is received from the Target. */
3066 static void
3067 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
3068 		     void *transfer_context, qdf_dma_addr_t CE_data,
3069 		     unsigned int nbytes, unsigned int transfer_id,
3070 		     unsigned int flags)
3071 {
3072 	struct HIF_CE_pipe_info *pipe_info =
3073 		(struct HIF_CE_pipe_info *)ce_context;
3074 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
3075 	struct CE_state *ce_state = (struct CE_state *) copyeng;
3076 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3077 	struct hif_msg_callbacks *msg_callbacks = &pipe_info->pipe_callbacks;
3078 
3079 	do {
3080 		hif_rtpm_record_ce_last_busy_evt(scn, ce_state->id);
3081 		hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE);
3082 		qdf_nbuf_unmap_single(scn->qdf_dev,
3083 				      (qdf_nbuf_t) transfer_context,
3084 				      QDF_DMA_FROM_DEVICE);
3085 
3086 		atomic_inc(&pipe_info->recv_bufs_needed);
3087 		hif_post_recv_buffers_for_pipe(pipe_info);
3088 		if (scn->target_status == TARGET_STATUS_RESET)
3089 			qdf_nbuf_free(transfer_context);
3090 		else
3091 			hif_ce_do_recv(msg_callbacks, transfer_context,
3092 				nbytes, pipe_info);
3093 
3094 		/* Set up force_break flag if num of receices reaches
3095 		 * MAX_NUM_OF_RECEIVES
3096 		 */
3097 		ce_state->receive_count++;
3098 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
3099 			ce_state->force_break = 1;
3100 			break;
3101 		}
3102 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
3103 					&CE_data, &nbytes, &transfer_id,
3104 					&flags) == QDF_STATUS_SUCCESS);
3105 
3106 }
3107 
3108 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
3109 
3110 void
3111 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
3112 	      struct hif_msg_callbacks *callbacks)
3113 {
3114 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3115 
3116 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3117 	spin_lock_init(&pcie_access_log_lock);
3118 #endif
3119 	/* Save callbacks for later installation */
3120 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
3121 		 sizeof(hif_state->msg_callbacks_pending));
3122 
3123 }
3124 
3125 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state,
3126 						 int pipe_num)
3127 {
3128 	struct CE_attr attr;
3129 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3130 	struct hif_msg_callbacks *hif_msg_callbacks =
3131 		&hif_state->msg_callbacks_current;
3132 	struct HIF_CE_pipe_info *pipe_info;
3133 	struct CE_state *ce_state;
3134 
3135 	if (pipe_num >= CE_COUNT_MAX)
3136 		return -EINVAL;
3137 
3138 	pipe_info = &hif_state->pipe_info[pipe_num];
3139 	ce_state = scn->ce_id_to_state[pipe_num];
3140 
3141 	if (!hif_msg_callbacks ||
3142 	    !hif_msg_callbacks->rxCompletionHandler ||
3143 	    !hif_msg_callbacks->txCompletionHandler) {
3144 		hif_err("%s: no completion handler registered", __func__);
3145 		return -EFAULT;
3146 	}
3147 
3148 	attr = hif_state->host_ce_config[pipe_num];
3149 	if (attr.src_nentries) {
3150 		/* pipe used to send to target */
3151 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
3152 			  __func__, pipe_num, pipe_info);
3153 		ce_send_cb_register(pipe_info->ce_hdl,
3154 				    hif_pci_ce_send_done, pipe_info,
3155 				    attr.flags & CE_ATTR_DISABLE_INTR);
3156 		pipe_info->num_sends_allowed = attr.src_nentries - 1;
3157 	}
3158 	if (attr.dest_nentries) {
3159 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
3160 			  __func__, pipe_num, pipe_info);
3161 		/* pipe used to receive from target */
3162 		ce_recv_cb_register(pipe_info->ce_hdl,
3163 				    hif_pci_ce_recv_data, pipe_info,
3164 				    attr.flags & CE_ATTR_DISABLE_INTR);
3165 	}
3166 
3167 	if (attr.src_nentries)
3168 		qdf_spinlock_create(&pipe_info->completion_freeq_lock);
3169 
3170 	if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND))
3171 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
3172 			     sizeof(pipe_info->pipe_callbacks));
3173 
3174 	return 0;
3175 }
3176 
3177 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
3178 {
3179 	struct CE_handle *ce_diag = hif_state->ce_diag;
3180 	int pipe_num, ret;
3181 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3182 
3183 	/* daemonize("hif_compl_thread"); */
3184 
3185 	if (scn->ce_count == 0) {
3186 		hif_err("ce_count is 0");
3187 		return -EINVAL;
3188 	}
3189 
3190 
3191 	A_TARGET_ACCESS_LIKELY(scn);
3192 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3193 		struct HIF_CE_pipe_info *pipe_info;
3194 
3195 		pipe_info = &hif_state->pipe_info[pipe_num];
3196 		if (pipe_info->ce_hdl == ce_diag)
3197 			continue;       /* Handle Diagnostic CE specially */
3198 
3199 		ret = hif_completion_thread_startup_by_ceid(hif_state,
3200 							    pipe_num);
3201 		if (ret < 0)
3202 			return ret;
3203 
3204 	}
3205 
3206 	A_TARGET_ACCESS_UNLIKELY(scn);
3207 	return 0;
3208 }
3209 
3210 /*
3211  * Install pending msg callbacks.
3212  *
3213  * TBDXXX: This hack is needed because upper layers install msg callbacks
3214  * for use with HTC before BMI is done; yet this HIF implementation
3215  * needs to continue to use BMI msg callbacks. Really, upper layers
3216  * should not register HTC callbacks until AFTER BMI phase.
3217  */
3218 static void hif_msg_callbacks_install(struct hif_softc *scn)
3219 {
3220 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3221 
3222 	qdf_mem_copy(&hif_state->msg_callbacks_current,
3223 		 &hif_state->msg_callbacks_pending,
3224 		 sizeof(hif_state->msg_callbacks_pending));
3225 }
3226 
3227 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
3228 							uint8_t *DLPipe)
3229 {
3230 	int ul_is_polled, dl_is_polled;
3231 
3232 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
3233 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
3234 }
3235 
3236 /**
3237  * hif_dump_pipe_debug_count() - Log error count
3238  * @scn: hif_softc pointer.
3239  *
3240  * Output the pipe error counts of each pipe to log file
3241  *
3242  * Return: N/A
3243  */
3244 void hif_dump_pipe_debug_count(struct hif_softc *scn)
3245 {
3246 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3247 	int pipe_num;
3248 
3249 	if (!hif_state) {
3250 		hif_err("hif_state is NULL");
3251 		return;
3252 	}
3253 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3254 		struct HIF_CE_pipe_info *pipe_info;
3255 
3256 	pipe_info = &hif_state->pipe_info[pipe_num];
3257 
3258 	if (pipe_info->nbuf_alloc_err_count > 0 ||
3259 			pipe_info->nbuf_dma_err_count > 0 ||
3260 			pipe_info->nbuf_ce_enqueue_err_count)
3261 		hif_err(
3262 			"pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
3263 			pipe_info->pipe_num,
3264 			atomic_read(&pipe_info->recv_bufs_needed),
3265 			pipe_info->nbuf_alloc_err_count,
3266 			pipe_info->nbuf_dma_err_count,
3267 			pipe_info->nbuf_ce_enqueue_err_count);
3268 	}
3269 }
3270 
3271 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
3272 					  void *nbuf, uint32_t *error_cnt,
3273 					  enum hif_ce_event_type failure_type,
3274 					  const char *failure_type_string)
3275 {
3276 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
3277 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
3278 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3279 	int ce_id = CE_state->id;
3280 	uint32_t error_cnt_tmp;
3281 
3282 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3283 	error_cnt_tmp = ++(*error_cnt);
3284 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3285 	hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s",
3286 		  pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
3287 		  failure_type_string);
3288 	hif_record_ce_desc_event(scn, ce_id, failure_type,
3289 				 NULL, nbuf, bufs_needed_tmp, 0);
3290 	/* if we fail to allocate the last buffer for an rx pipe,
3291 	 *	there is no trigger to refill the ce and we will
3292 	 *	eventually crash
3293 	 */
3294 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
3295 	    (ce_srng_based(scn) &&
3296 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
3297 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
3298 
3299 }
3300 
3301 
3302 
3303 
3304 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
3305 {
3306 	struct CE_handle *ce_hdl;
3307 	qdf_size_t buf_sz;
3308 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3309 	QDF_STATUS status;
3310 	uint32_t bufs_posted = 0;
3311 	unsigned int ce_id;
3312 
3313 	buf_sz = pipe_info->buf_sz;
3314 	if (buf_sz == 0) {
3315 		/* Unused Copy Engine */
3316 		return QDF_STATUS_SUCCESS;
3317 	}
3318 
3319 	ce_hdl = pipe_info->ce_hdl;
3320 	ce_id = ((struct CE_state *)ce_hdl)->id;
3321 
3322 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3323 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
3324 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
3325 		qdf_nbuf_t nbuf;
3326 
3327 		atomic_dec(&pipe_info->recv_bufs_needed);
3328 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3329 
3330 		hif_record_ce_desc_event(scn, ce_id,
3331 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
3332 					 0, 0);
3333 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
3334 		if (!nbuf) {
3335 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3336 					&pipe_info->nbuf_alloc_err_count,
3337 					 HIF_RX_NBUF_ALLOC_FAILURE,
3338 					"HIF_RX_NBUF_ALLOC_FAILURE");
3339 			return QDF_STATUS_E_NOMEM;
3340 		}
3341 
3342 		hif_record_ce_desc_event(scn, ce_id,
3343 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
3344 					 0, 0);
3345 		/*
3346 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
3347 		 * CE_data = dma_map_single(dev, data, buf_sz, );
3348 		 * DMA_FROM_DEVICE);
3349 		 */
3350 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
3351 					    QDF_DMA_FROM_DEVICE);
3352 
3353 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3354 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3355 					&pipe_info->nbuf_dma_err_count,
3356 					 HIF_RX_NBUF_MAP_FAILURE,
3357 					"HIF_RX_NBUF_MAP_FAILURE");
3358 			qdf_nbuf_free(nbuf);
3359 			return status;
3360 		}
3361 
3362 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
3363 		hif_record_ce_desc_event(scn, ce_id,
3364 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
3365 					 0, 0);
3366 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
3367 					       buf_sz, DMA_FROM_DEVICE);
3368 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
3369 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3370 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3371 					&pipe_info->nbuf_ce_enqueue_err_count,
3372 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
3373 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
3374 
3375 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
3376 						QDF_DMA_FROM_DEVICE);
3377 			qdf_nbuf_free(nbuf);
3378 			return status;
3379 		}
3380 
3381 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3382 		bufs_posted++;
3383 	}
3384 	pipe_info->nbuf_alloc_err_count =
3385 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
3386 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
3387 	pipe_info->nbuf_dma_err_count =
3388 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
3389 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
3390 	pipe_info->nbuf_ce_enqueue_err_count =
3391 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
3392 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
3393 
3394 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3395 
3396 	return QDF_STATUS_SUCCESS;
3397 }
3398 
3399 /*
3400  * Try to post all desired receive buffers for all pipes.
3401  * Returns 0 for non fastpath rx copy engine as
3402  * oom_allocation_work will be scheduled to recover any
3403  * failures, non-zero if unable to completely replenish
3404  * receive buffers for fastpath rx Copy engine.
3405  */
3406 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
3407 {
3408 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3409 	int pipe_num;
3410 	struct CE_state *ce_state = NULL;
3411 	QDF_STATUS qdf_status;
3412 
3413 	A_TARGET_ACCESS_LIKELY(scn);
3414 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3415 		struct HIF_CE_pipe_info *pipe_info;
3416 
3417 		ce_state = scn->ce_id_to_state[pipe_num];
3418 		pipe_info = &hif_state->pipe_info[pipe_num];
3419 
3420 		if (!ce_state)
3421 			continue;
3422 
3423 		/* Do not init dynamic CEs, during initial load */
3424 		if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)
3425 			continue;
3426 
3427 		if (hif_is_nss_wifi_enabled(scn) &&
3428 		    ce_state && (ce_state->htt_rx_data))
3429 			continue;
3430 
3431 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
3432 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
3433 			ce_state->htt_rx_data &&
3434 			scn->fastpath_mode_on) {
3435 			A_TARGET_ACCESS_UNLIKELY(scn);
3436 			return qdf_status;
3437 		}
3438 	}
3439 
3440 	A_TARGET_ACCESS_UNLIKELY(scn);
3441 
3442 	return QDF_STATUS_SUCCESS;
3443 }
3444 
3445 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
3446 {
3447 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3448 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3449 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
3450 
3451 	hif_update_fastpath_recv_bufs_cnt(scn);
3452 
3453 	hif_msg_callbacks_install(scn);
3454 
3455 	if (hif_completion_thread_startup(hif_state))
3456 		return QDF_STATUS_E_FAILURE;
3457 
3458 	/* enable buffer cleanup */
3459 	hif_state->started = true;
3460 
3461 	/* Post buffers once to start things off. */
3462 	qdf_status = hif_post_recv_buffers(scn);
3463 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3464 		/* cleanup is done in hif_ce_disable */
3465 		hif_err("Failed to post buffers");
3466 		return qdf_status;
3467 	}
3468 
3469 	return qdf_status;
3470 }
3471 
3472 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3473 {
3474 	struct hif_softc *scn;
3475 	struct CE_handle *ce_hdl;
3476 	uint32_t buf_sz;
3477 	struct HIF_CE_state *hif_state;
3478 	qdf_nbuf_t netbuf;
3479 	qdf_dma_addr_t CE_data;
3480 	void *per_CE_context;
3481 
3482 	buf_sz = pipe_info->buf_sz;
3483 	/* Unused Copy Engine */
3484 	if (buf_sz == 0)
3485 		return;
3486 
3487 
3488 	hif_state = pipe_info->HIF_CE_state;
3489 	if (!hif_state->started)
3490 		return;
3491 
3492 	scn = HIF_GET_SOFTC(hif_state);
3493 	ce_hdl = pipe_info->ce_hdl;
3494 
3495 	if (!scn->qdf_dev)
3496 		return;
3497 	while (ce_revoke_recv_next
3498 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
3499 			&CE_data) == QDF_STATUS_SUCCESS) {
3500 		if (netbuf) {
3501 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
3502 					      QDF_DMA_FROM_DEVICE);
3503 			qdf_nbuf_free(netbuf);
3504 		}
3505 	}
3506 }
3507 
3508 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3509 {
3510 	struct CE_handle *ce_hdl;
3511 	struct HIF_CE_state *hif_state;
3512 	struct hif_softc *scn;
3513 	qdf_nbuf_t netbuf;
3514 	void *per_CE_context;
3515 	qdf_dma_addr_t CE_data;
3516 	unsigned int nbytes;
3517 	unsigned int id;
3518 	uint32_t buf_sz;
3519 	uint32_t toeplitz_hash_result;
3520 
3521 	buf_sz = pipe_info->buf_sz;
3522 	if (buf_sz == 0) {
3523 		/* Unused Copy Engine */
3524 		return;
3525 	}
3526 
3527 	hif_state = pipe_info->HIF_CE_state;
3528 	if (!hif_state->started) {
3529 		return;
3530 	}
3531 
3532 	scn = HIF_GET_SOFTC(hif_state);
3533 
3534 	ce_hdl = pipe_info->ce_hdl;
3535 
3536 	while (ce_cancel_send_next
3537 		       (ce_hdl, &per_CE_context,
3538 		       (void **)&netbuf, &CE_data, &nbytes,
3539 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
3540 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
3541 			/*
3542 			 * Packets enqueued by htt_h2t_ver_req_msg() and
3543 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
3544 			 * freed in htt_htc_misc_pkt_pool_free() in
3545 			 * wlantl_close(), so do not free them here again
3546 			 * by checking whether it's the endpoint
3547 			 * which they are queued in.
3548 			 */
3549 			if (id == scn->htc_htt_tx_endpoint)
3550 				return;
3551 			/* Indicate the completion to higher
3552 			 * layer to free the buffer
3553 			 */
3554 			if (pipe_info->pipe_callbacks.txCompletionHandler)
3555 				pipe_info->pipe_callbacks.
3556 				    txCompletionHandler(pipe_info->
3557 					    pipe_callbacks.Context,
3558 					    netbuf, id, toeplitz_hash_result);
3559 		}
3560 	}
3561 }
3562 
3563 /*
3564  * Cleanup residual buffers for device shutdown:
3565  *    buffers that were enqueued for receive
3566  *    buffers that were to be sent
3567  * Note: Buffers that had completed but which were
3568  * not yet processed are on a completion queue. They
3569  * are handled when the completion thread shuts down.
3570  */
3571 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
3572 {
3573 	int pipe_num;
3574 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3575 	struct CE_state *ce_state;
3576 
3577 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3578 		struct HIF_CE_pipe_info *pipe_info;
3579 
3580 		ce_state = scn->ce_id_to_state[pipe_num];
3581 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3582 				((ce_state->htt_tx_data) ||
3583 				 (ce_state->htt_rx_data))) {
3584 			continue;
3585 		}
3586 
3587 		pipe_info = &hif_state->pipe_info[pipe_num];
3588 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
3589 		hif_send_buffer_cleanup_on_pipe(pipe_info);
3590 	}
3591 }
3592 
3593 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
3594 {
3595 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3596 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3597 
3598 	hif_buffer_cleanup(hif_state);
3599 }
3600 
3601 static void hif_destroy_oom_work(struct hif_softc *scn)
3602 {
3603 	struct CE_state *ce_state;
3604 	int ce_id;
3605 
3606 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3607 		ce_state = scn->ce_id_to_state[ce_id];
3608 		if (ce_state)
3609 			qdf_destroy_work(scn->qdf_dev,
3610 					 &ce_state->oom_allocation_work);
3611 	}
3612 }
3613 
3614 void hif_ce_stop(struct hif_softc *scn)
3615 {
3616 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3617 	int pipe_num;
3618 
3619 	/*
3620 	 * before cleaning up any memory, ensure irq &
3621 	 * bottom half contexts will not be re-entered
3622 	 */
3623 	hif_disable_isr(&scn->osc);
3624 	hif_destroy_oom_work(scn);
3625 	scn->hif_init_done = false;
3626 
3627 	/*
3628 	 * At this point, asynchronous threads are stopped,
3629 	 * The Target should not DMA nor interrupt, Host code may
3630 	 * not initiate anything more.  So we just need to clean
3631 	 * up Host-side state.
3632 	 */
3633 
3634 	if (scn->athdiag_procfs_inited) {
3635 		athdiag_procfs_remove();
3636 		scn->athdiag_procfs_inited = false;
3637 	}
3638 
3639 	hif_buffer_cleanup(hif_state);
3640 
3641 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3642 		struct HIF_CE_pipe_info *pipe_info;
3643 		struct CE_attr attr;
3644 		struct CE_handle *ce_diag = hif_state->ce_diag;
3645 
3646 		pipe_info = &hif_state->pipe_info[pipe_num];
3647 		if (pipe_info->ce_hdl) {
3648 			if (pipe_info->ce_hdl != ce_diag &&
3649 			    hif_state->started) {
3650 				attr = hif_state->host_ce_config[pipe_num];
3651 				if (attr.src_nentries)
3652 					qdf_spinlock_destroy(&pipe_info->
3653 							completion_freeq_lock);
3654 			}
3655 			ce_fini(pipe_info->ce_hdl);
3656 			pipe_info->ce_hdl = NULL;
3657 			pipe_info->buf_sz = 0;
3658 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3659 		}
3660 	}
3661 
3662 	if (hif_state->sleep_timer_init) {
3663 		qdf_timer_stop(&hif_state->sleep_timer);
3664 		qdf_timer_free(&hif_state->sleep_timer);
3665 		hif_state->sleep_timer_init = false;
3666 	}
3667 
3668 	hif_state->started = false;
3669 }
3670 
3671 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
3672 				   struct shadow_reg_cfg
3673 				   **target_shadow_reg_cfg_ret,
3674 				   uint32_t *shadow_cfg_sz_ret)
3675 {
3676 	if (target_shadow_reg_cfg_ret)
3677 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
3678 	if (shadow_cfg_sz_ret)
3679 		*shadow_cfg_sz_ret = shadow_cfg_sz;
3680 }
3681 
3682 /**
3683  * hif_get_target_ce_config() - get copy engine configuration
3684  * @scn: HIF context
3685  * @target_ce_config_ret: basic copy engine configuration
3686  * @target_ce_config_sz_ret: size of the basic configuration in bytes
3687  * @target_service_to_ce_map_ret: service mapping for the copy engines
3688  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
3689  * @target_shadow_reg_cfg_ret: shadow register configuration
3690  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
3691  *
3692  * providing accessor to these values outside of this file.
3693  * currently these are stored in static pointers to const sections.
3694  * there are multiple configurations that are selected from at compile time.
3695  * Runtime selection would need to consider mode, target type and bus type.
3696  *
3697  * Return: return by parameter.
3698  */
3699 void hif_get_target_ce_config(struct hif_softc *scn,
3700 		struct CE_pipe_config **target_ce_config_ret,
3701 		uint32_t *target_ce_config_sz_ret,
3702 		struct service_to_pipe **target_service_to_ce_map_ret,
3703 		uint32_t *target_service_to_ce_map_sz_ret,
3704 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
3705 		uint32_t *shadow_cfg_sz_ret)
3706 {
3707 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3708 
3709 	*target_ce_config_ret = hif_state->target_ce_config;
3710 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
3711 
3712 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
3713 				       target_service_to_ce_map_sz_ret);
3714 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
3715 			       shadow_cfg_sz_ret);
3716 }
3717 
3718 #ifdef CONFIG_SHADOW_V3
3719 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3720 {
3721 	int i;
3722 
3723 	hif_err("v3: num_config %d", cfg->num_shadow_reg_v3_cfg);
3724 
3725 	for (i = 0; i < cfg->num_shadow_reg_v3_cfg; i++) {
3726 		hif_err("i %d, val %x", i, cfg->shadow_reg_v3_cfg[i].addr);
3727 	}
3728 }
3729 
3730 #elif defined(CONFIG_SHADOW_V2)
3731 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3732 {
3733 	int i;
3734 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3735 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
3736 
3737 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
3738 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3739 		     "%s: i %d, val %x", __func__, i,
3740 		     cfg->shadow_reg_v2_cfg[i].addr);
3741 	}
3742 }
3743 
3744 #else
3745 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3746 {
3747 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3748 		  "%s: CONFIG_SHADOW V2/V3 not defined", __func__);
3749 }
3750 #endif
3751 
3752 #ifdef ADRASTEA_RRI_ON_DDR
3753 /**
3754  * hif_get_src_ring_read_index(): Called to get the SRRI
3755  *
3756  * @scn: hif_softc pointer
3757  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3758  *
3759  * This function returns the SRRI to the caller. For CEs that
3760  * dont have interrupts enabled, we look at the DDR based SRRI
3761  *
3762  * Return: SRRI
3763  */
3764 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3765 		uint32_t CE_ctrl_addr)
3766 {
3767 	struct CE_attr attr;
3768 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3769 
3770 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3771 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3772 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3773 	} else {
3774 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3775 			return A_TARGET_READ(scn,
3776 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3777 		else
3778 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3779 					CE_ctrl_addr);
3780 	}
3781 }
3782 
3783 /**
3784  * hif_get_dst_ring_read_index(): Called to get the DRRI
3785  *
3786  * @scn: hif_softc pointer
3787  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3788  *
3789  * This function returns the DRRI to the caller. For CEs that
3790  * dont have interrupts enabled, we look at the DDR based DRRI
3791  *
3792  * Return: DRRI
3793  */
3794 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3795 		uint32_t CE_ctrl_addr)
3796 {
3797 	struct CE_attr attr;
3798 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3799 
3800 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3801 
3802 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3803 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3804 	} else {
3805 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3806 			return A_TARGET_READ(scn,
3807 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3808 		else
3809 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3810 					CE_ctrl_addr);
3811 	}
3812 }
3813 
3814 /**
3815  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
3816  * @scn: hif_softc pointer
3817  *
3818  * Return: qdf status
3819  */
3820 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
3821 {
3822 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
3823 
3824 	scn->vaddr_rri_on_ddr =
3825 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3826 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
3827 		&paddr_rri_on_ddr);
3828 
3829 	if (!scn->vaddr_rri_on_ddr) {
3830 		hif_err("dmaable page alloc fail");
3831 		return QDF_STATUS_E_NOMEM;
3832 	}
3833 
3834 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3835 
3836 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
3837 
3838 	return QDF_STATUS_SUCCESS;
3839 }
3840 #endif
3841 
3842 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
3843 /**
3844  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3845  *
3846  * @scn: hif_softc pointer
3847  *
3848  * This function allocates non cached memory on ddr and sends
3849  * the physical address of this memory to the CE hardware. The
3850  * hardware updates the RRI on this particular location.
3851  *
3852  * Return: None
3853  */
3854 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3855 {
3856 	unsigned int i;
3857 	uint32_t high_paddr, low_paddr;
3858 
3859 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3860 		return;
3861 
3862 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
3863 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
3864 
3865 	hif_debug("using srri and drri from DDR");
3866 
3867 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3868 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3869 
3870 	for (i = 0; i < CE_COUNT; i++)
3871 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3872 }
3873 #else
3874 /**
3875  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3876  *
3877  * @scn: hif_softc pointer
3878  *
3879  * This is a dummy implementation for platforms that don't
3880  * support this functionality.
3881  *
3882  * Return: None
3883  */
3884 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3885 {
3886 }
3887 #endif
3888 
3889 /**
3890  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
3891  *                                    QMI command
3892  * @scn: hif context
3893  * @cfg: wlan enable config
3894  *
3895  * In case of Genoa, rri_over_ddr memory configuration is passed
3896  * to firmware through QMI configure command.
3897  */
3898 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3899 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3900 					   struct pld_wlan_enable_cfg *cfg)
3901 {
3902 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3903 		return;
3904 
3905 	cfg->rri_over_ddr_cfg_valid = true;
3906 	cfg->rri_over_ddr_cfg.base_addr_low =
3907 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3908 	cfg->rri_over_ddr_cfg.base_addr_high =
3909 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3910 }
3911 #else
3912 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3913 					   struct pld_wlan_enable_cfg *cfg)
3914 {
3915 }
3916 #endif
3917 
3918 /**
3919  * hif_wlan_enable(): call the platform driver to enable wlan
3920  * @scn: HIF Context
3921  *
3922  * This function passes the con_mode and CE configuration to
3923  * platform driver to enable wlan.
3924  *
3925  * Return: linux error code
3926  */
3927 int hif_wlan_enable(struct hif_softc *scn)
3928 {
3929 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3930 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3931 	struct pld_wlan_enable_cfg cfg = { 0 };
3932 	enum pld_driver_mode mode;
3933 	uint32_t con_mode = hif_get_conparam(scn);
3934 
3935 	hif_get_target_ce_config(scn,
3936 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3937 			&cfg.num_ce_tgt_cfg,
3938 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3939 			&cfg.num_ce_svc_pipe_cfg,
3940 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3941 			&cfg.num_shadow_reg_cfg);
3942 
3943 	/* translate from structure size to array size */
3944 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3945 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3946 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3947 
3948 	switch (tgt_info->target_type) {
3949 	case TARGET_TYPE_KIWI:
3950 	case TARGET_TYPE_MANGO:
3951 	case TARGET_TYPE_PEACH:
3952 		hif_prepare_hal_shadow_reg_cfg_v3(scn, &cfg);
3953 		break;
3954 	default:
3955 		hif_prepare_hal_shadow_register_cfg(scn,
3956 						    &cfg.shadow_reg_v2_cfg,
3957 						    &cfg.num_shadow_reg_v2_cfg);
3958 		break;
3959 	}
3960 
3961 	hif_print_hal_shadow_register_cfg(&cfg);
3962 
3963 	hif_update_rri_over_ddr_config(scn, &cfg);
3964 
3965 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3966 		mode = PLD_FTM;
3967 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3968 		mode = PLD_COLDBOOT_CALIBRATION;
3969 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3970 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3971 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3972 		mode = PLD_EPPING;
3973 	else
3974 		mode = PLD_MISSION;
3975 
3976 	if (BYPASS_QMI)
3977 		return 0;
3978 	else
3979 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3980 }
3981 
3982 #ifdef WLAN_FEATURE_EPPING
3983 
3984 #define CE_EPPING_USES_IRQ true
3985 
3986 void hif_ce_prepare_epping_config(struct hif_softc *scn,
3987 				  struct HIF_CE_state *hif_state)
3988 {
3989 	if (CE_EPPING_USES_IRQ)
3990 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3991 	else
3992 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3993 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3994 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3995 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3996 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3997 	scn->ce_count = EPPING_HOST_CE_COUNT;
3998 }
3999 #endif
4000 
4001 #ifdef QCN7605_SUPPORT
4002 static inline
4003 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
4004 			       struct HIF_CE_state *hif_state)
4005 {
4006 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
4007 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
4008 	hif_state->target_ce_config_sz =
4009 				 sizeof(target_ce_config_wlan_qcn7605);
4010 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
4011 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
4012 	scn->ce_count = QCN7605_CE_COUNT;
4013 }
4014 #else
4015 static inline
4016 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
4017 			       struct HIF_CE_state *hif_state)
4018 {
4019 	hif_err("QCN7605 not supported");
4020 }
4021 #endif
4022 
4023 #ifdef CE_SVC_CMN_INIT
4024 #ifdef QCA_WIFI_SUPPORT_SRNG
4025 static inline void hif_ce_service_init(void)
4026 {
4027 	ce_service_srng_init();
4028 }
4029 #else
4030 static inline void hif_ce_service_init(void)
4031 {
4032 	ce_service_legacy_init();
4033 }
4034 #endif
4035 #else
4036 static inline void hif_ce_service_init(void)
4037 {
4038 }
4039 #endif
4040 
4041 #ifdef FEATURE_DIRECT_LINK
4042 /**
4043  * hif_ce_select_config_kiwi() - Select the host and target CE
4044  *  configuration for Kiwi
4045  * @hif_state: HIF CE context
4046  *
4047  * Return: None
4048  */
4049 static inline
4050 void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state)
4051 {
4052 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif_state);
4053 
4054 	if (pld_is_direct_link_supported(hif_ctx->qdf_dev->dev)) {
4055 		hif_state->host_ce_config =
4056 				host_ce_config_wlan_kiwi_direct_link;
4057 		hif_state->target_ce_config =
4058 				target_ce_config_wlan_kiwi_direct_link;
4059 		hif_state->target_ce_config_sz =
4060 				sizeof(target_ce_config_wlan_kiwi_direct_link);
4061 	} else {
4062 		hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4063 		hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4064 		hif_state->target_ce_config_sz =
4065 				sizeof(target_ce_config_wlan_kiwi);
4066 	}
4067 }
4068 #else
4069 static inline
4070 void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state)
4071 {
4072 	hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4073 	hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4074 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_kiwi);
4075 }
4076 #endif
4077 
4078 /**
4079  * hif_ce_prepare_config() - load the correct static tables.
4080  * @scn: hif context
4081  *
4082  * Epping uses different static attribute tables than mission mode.
4083  */
4084 void hif_ce_prepare_config(struct hif_softc *scn)
4085 {
4086 	uint32_t mode = hif_get_conparam(scn);
4087 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4088 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
4089 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4090 	int ret;
4091 	int msi_data_count = 0;
4092 	int msi_data_start = 0;
4093 	int msi_irq_start = 0;
4094 
4095 	hif_ce_service_init();
4096 	hif_state->ce_services = ce_services_attach(scn);
4097 
4098 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4099 					  &msi_data_count, &msi_data_start,
4100 					  &msi_irq_start);
4101 
4102 	scn->ce_count = HOST_CE_COUNT;
4103 	scn->int_assignment = &ce_int_context[msi_data_count];
4104 	scn->free_irq_done = false;
4105 	/* if epping is enabled we need to use the epping configuration. */
4106 	if (QDF_IS_EPPING_ENABLED(mode)) {
4107 		hif_ce_prepare_epping_config(scn, hif_state);
4108 		return;
4109 	}
4110 
4111 	switch (tgt_info->target_type) {
4112 	default:
4113 		hif_state->host_ce_config = host_ce_config_wlan;
4114 		hif_state->target_ce_config = target_ce_config_wlan;
4115 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
4116 		break;
4117 	case TARGET_TYPE_QCN7605:
4118 		hif_set_ce_config_qcn7605(scn, hif_state);
4119 		break;
4120 	case TARGET_TYPE_AR900B:
4121 	case TARGET_TYPE_QCA9984:
4122 	case TARGET_TYPE_QCA9888:
4123 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4124 			hif_state->host_ce_config =
4125 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
4126 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4127 			hif_state->host_ce_config =
4128 				host_lowdesc_ce_cfg_wlan_ar900b;
4129 		} else {
4130 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
4131 		}
4132 
4133 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
4134 		hif_state->target_ce_config_sz =
4135 				sizeof(target_ce_config_wlan_ar900b);
4136 
4137 		break;
4138 
4139 	case TARGET_TYPE_AR9888:
4140 	case TARGET_TYPE_AR9888V2:
4141 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4142 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
4143 		} else {
4144 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
4145 		}
4146 
4147 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
4148 		hif_state->target_ce_config_sz =
4149 					sizeof(target_ce_config_wlan_ar9888);
4150 
4151 		break;
4152 
4153 	case TARGET_TYPE_QCA8074:
4154 	case TARGET_TYPE_QCA8074V2:
4155 	case TARGET_TYPE_QCA6018:
4156 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
4157 			hif_state->host_ce_config =
4158 					host_ce_config_wlan_qca8074_pci;
4159 			hif_state->target_ce_config =
4160 				target_ce_config_wlan_qca8074_pci;
4161 			hif_state->target_ce_config_sz =
4162 				sizeof(target_ce_config_wlan_qca8074_pci);
4163 		} else {
4164 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
4165 			hif_state->target_ce_config =
4166 					target_ce_config_wlan_qca8074;
4167 			hif_state->target_ce_config_sz =
4168 				sizeof(target_ce_config_wlan_qca8074);
4169 		}
4170 		break;
4171 	case TARGET_TYPE_QCA6290:
4172 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
4173 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
4174 		hif_state->target_ce_config_sz =
4175 					sizeof(target_ce_config_wlan_qca6290);
4176 
4177 		scn->ce_count = QCA_6290_CE_COUNT;
4178 		break;
4179 	case TARGET_TYPE_QCN9000:
4180 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
4181 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
4182 		hif_state->target_ce_config_sz =
4183 					sizeof(target_ce_config_wlan_qcn9000);
4184 		scn->ce_count = QCN_9000_CE_COUNT;
4185 		scn->ini_cfg.disable_wake_irq = 1;
4186 		break;
4187 	case TARGET_TYPE_QCN9224:
4188 		hif_set_ce_config_qcn9224(scn, hif_state);
4189 		break;
4190 	case TARGET_TYPE_QCA5332:
4191 		hif_state->host_ce_config = host_ce_config_wlan_qca5332;
4192 		hif_state->target_ce_config = target_ce_config_wlan_qca5332;
4193 		hif_state->target_ce_config_sz =
4194 					 sizeof(target_ce_config_wlan_qca5332);
4195 		scn->ce_count = QCA_5332_CE_COUNT;
4196 		break;
4197 	case TARGET_TYPE_QCN6122:
4198 		hif_state->host_ce_config = host_ce_config_wlan_qcn6122;
4199 		hif_state->target_ce_config = target_ce_config_wlan_qcn6122;
4200 		hif_state->target_ce_config_sz =
4201 					sizeof(target_ce_config_wlan_qcn6122);
4202 		scn->ce_count = QCN_6122_CE_COUNT;
4203 		scn->ini_cfg.disable_wake_irq = 1;
4204 		break;
4205 	case TARGET_TYPE_QCN9160:
4206 		hif_state->host_ce_config = host_ce_config_wlan_qcn9160;
4207 		hif_state->target_ce_config = target_ce_config_wlan_qcn9160;
4208 		hif_state->target_ce_config_sz =
4209 					sizeof(target_ce_config_wlan_qcn9160);
4210 		scn->ce_count = QCN_9160_CE_COUNT;
4211 		scn->ini_cfg.disable_wake_irq = 1;
4212 		break;
4213 	case TARGET_TYPE_QCA5018:
4214 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
4215 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
4216 		hif_state->target_ce_config_sz =
4217 					sizeof(target_ce_config_wlan_qca5018);
4218 		scn->ce_count = QCA_5018_CE_COUNT;
4219 		break;
4220 	case TARGET_TYPE_QCA9574:
4221 		hif_state->host_ce_config = host_ce_config_wlan_qca9574;
4222 		hif_state->target_ce_config = target_ce_config_wlan_qca9574;
4223 		hif_state->target_ce_config_sz =
4224 					sizeof(target_ce_config_wlan_qca9574);
4225 		break;
4226 	case TARGET_TYPE_QCA6390:
4227 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
4228 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
4229 		hif_state->target_ce_config_sz =
4230 					sizeof(target_ce_config_wlan_qca6390);
4231 
4232 		scn->ce_count = QCA_6390_CE_COUNT;
4233 		break;
4234 	case TARGET_TYPE_QCA6490:
4235 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
4236 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
4237 		hif_state->target_ce_config_sz =
4238 					sizeof(target_ce_config_wlan_qca6490);
4239 
4240 		scn->ce_count = QCA_6490_CE_COUNT;
4241 		break;
4242 	case TARGET_TYPE_QCA6750:
4243 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
4244 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
4245 		hif_state->target_ce_config_sz =
4246 					sizeof(target_ce_config_wlan_qca6750);
4247 
4248 		scn->ce_count = QCA_6750_CE_COUNT;
4249 		break;
4250 	case TARGET_TYPE_KIWI:
4251 	case TARGET_TYPE_MANGO:
4252 	case TARGET_TYPE_PEACH:
4253 		hif_ce_select_config_kiwi(hif_state);
4254 		scn->ce_count = KIWI_CE_COUNT;
4255 		break;
4256 	case TARGET_TYPE_ADRASTEA:
4257 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4258 			hif_state->host_ce_config =
4259 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
4260 			hif_state->target_ce_config =
4261 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
4262 			hif_state->target_ce_config_sz =
4263 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
4264 		} else {
4265 			hif_state->host_ce_config =
4266 				host_ce_config_wlan_adrastea;
4267 			hif_state->target_ce_config =
4268 					target_ce_config_wlan_adrastea;
4269 			hif_state->target_ce_config_sz =
4270 					sizeof(target_ce_config_wlan_adrastea);
4271 		}
4272 		break;
4273 
4274 	}
4275 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
4276 }
4277 
4278 /**
4279  * hif_ce_open() - do ce specific allocations
4280  * @hif_sc: pointer to hif context
4281  *
4282  * return: 0 for success or QDF_STATUS_E_NOMEM
4283  */
4284 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
4285 {
4286 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4287 
4288 	qdf_spinlock_create(&hif_state->irq_reg_lock);
4289 	qdf_spinlock_create(&hif_state->keep_awake_lock);
4290 	return QDF_STATUS_SUCCESS;
4291 }
4292 
4293 /**
4294  * hif_ce_close() - do ce specific free
4295  * @hif_sc: pointer to hif context
4296  */
4297 void hif_ce_close(struct hif_softc *hif_sc)
4298 {
4299 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4300 
4301 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
4302 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
4303 }
4304 
4305 /**
4306  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
4307  * @hif_sc: hif context
4308  *
4309  * uses state variables to support cleaning up when hif_config_ce fails.
4310  */
4311 void hif_unconfig_ce(struct hif_softc *hif_sc)
4312 {
4313 	int pipe_num;
4314 	struct HIF_CE_pipe_info *pipe_info;
4315 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4316 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
4317 
4318 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4319 		pipe_info = &hif_state->pipe_info[pipe_num];
4320 		if (pipe_info->ce_hdl) {
4321 			ce_unregister_irq(hif_state, (1 << pipe_num));
4322 		}
4323 	}
4324 	deinit_tasklet_workers(hif_hdl);
4325 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4326 		pipe_info = &hif_state->pipe_info[pipe_num];
4327 		if (pipe_info->ce_hdl) {
4328 			ce_fini(pipe_info->ce_hdl);
4329 			pipe_info->ce_hdl = NULL;
4330 			pipe_info->buf_sz = 0;
4331 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
4332 		}
4333 	}
4334 	if (hif_sc->athdiag_procfs_inited) {
4335 		athdiag_procfs_remove();
4336 		hif_sc->athdiag_procfs_inited = false;
4337 	}
4338 }
4339 
4340 #ifdef CONFIG_BYPASS_QMI
4341 #ifdef QCN7605_SUPPORT
4342 /**
4343  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4344  * @scn: pointer to HIF structure
4345  *
4346  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4347  *
4348  * Return: void
4349  */
4350 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4351 {
4352 	phys_addr_t target_pa;
4353 	struct ce_info *ce_info_ptr;
4354 	uint32_t msi_data_start;
4355 	uint32_t msi_data_count;
4356 	uint32_t msi_irq_start;
4357 	uint32_t i = 0;
4358 	int ret;
4359 
4360 	scn->vaddr_qmi_bypass =
4361 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4362 							     scn->qdf_dev->dev,
4363 							     FW_SHARED_MEM,
4364 							     &target_pa);
4365 	if (!scn->vaddr_qmi_bypass) {
4366 		hif_err("Memory allocation failed could not post target buf");
4367 		return;
4368 	}
4369 
4370 	scn->paddr_qmi_bypass = target_pa;
4371 
4372 	ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass;
4373 
4374 	if (scn->vaddr_rri_on_ddr) {
4375 		ce_info_ptr->rri_over_ddr_low_paddr  =
4376 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
4377 		ce_info_ptr->rri_over_ddr_high_paddr =
4378 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
4379 	}
4380 
4381 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4382 					  &msi_data_count, &msi_data_start,
4383 					  &msi_irq_start);
4384 	if (ret) {
4385 		hif_err("Failed to get CE msi config");
4386 		return;
4387 	}
4388 
4389 	for (i = 0; i < CE_COUNT_MAX; i++) {
4390 		ce_info_ptr->cfg[i].ce_id = i;
4391 		ce_info_ptr->cfg[i].msi_vector =
4392 			 (i % msi_data_count) + msi_irq_start;
4393 	}
4394 
4395 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4396 	hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass,
4397 		 &target_pa);
4398 }
4399 
4400 /**
4401  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4402  * @scn: pointer to HIF structure
4403  *
4404  *
4405  * Return: void
4406  */
4407 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4408 {
4409 	void *target_va = scn->vaddr_qmi_bypass;
4410 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4411 
4412 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4413 				FW_SHARED_MEM, target_va,
4414 				target_pa, 0);
4415 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4416 }
4417 #else
4418 /**
4419  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4420  * @scn: pointer to HIF structure
4421  *
4422  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4423  *
4424  * Return: void
4425  */
4426 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4427 {
4428 	qdf_dma_addr_t target_pa;
4429 
4430 	scn->vaddr_qmi_bypass =
4431 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4432 							     scn->qdf_dev->dev,
4433 							     FW_SHARED_MEM,
4434 							     &target_pa);
4435 	if (!scn->vaddr_qmi_bypass) {
4436 		hif_err("Memory allocation failed could not post target buf");
4437 		return;
4438 	}
4439 
4440 	scn->paddr_qmi_bypass = target_pa;
4441 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4442 }
4443 
4444 /**
4445  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4446  * @scn: pointer to HIF structure
4447  *
4448  *
4449  * Return: void
4450  */
4451 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4452 {
4453 	void *target_va = scn->vaddr_qmi_bypass;
4454 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4455 
4456 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4457 				FW_SHARED_MEM, target_va,
4458 				target_pa, 0);
4459 	hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4460 }
4461 #endif
4462 
4463 #else
4464 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
4465 {
4466 }
4467 
4468 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4469 {
4470 }
4471 #endif
4472 
4473 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
4474 				bool wait_for_it)
4475 {
4476 	/* todo */
4477 	return 0;
4478 }
4479 
4480 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num)
4481 {
4482 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4483 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4484 	struct HIF_CE_pipe_info *pipe_info;
4485 	struct CE_state *ce_state = NULL;
4486 	struct CE_attr *attr;
4487 	int rv = 0;
4488 
4489 	if (pipe_num >= CE_COUNT_MAX)
4490 		return -EINVAL;
4491 
4492 	pipe_info = &hif_state->pipe_info[pipe_num];
4493 	pipe_info->pipe_num = pipe_num;
4494 	pipe_info->HIF_CE_state = hif_state;
4495 	attr = &hif_state->host_ce_config[pipe_num];
4496 	ce_state = scn->ce_id_to_state[pipe_num];
4497 
4498 	if (ce_state) {
4499 		/* Do not reinitialize the CE if its done already */
4500 		rv = QDF_STATUS_E_BUSY;
4501 		goto err;
4502 	}
4503 
4504 	pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
4505 	ce_state = scn->ce_id_to_state[pipe_num];
4506 	if (!ce_state) {
4507 		A_TARGET_ACCESS_UNLIKELY(scn);
4508 		rv = QDF_STATUS_E_FAILURE;
4509 		goto err;
4510 	}
4511 	qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
4512 	QDF_ASSERT(pipe_info->ce_hdl);
4513 	if (!pipe_info->ce_hdl) {
4514 		rv = QDF_STATUS_E_FAILURE;
4515 		A_TARGET_ACCESS_UNLIKELY(scn);
4516 		goto err;
4517 	}
4518 
4519 	ce_state->lro_data = qdf_lro_init();
4520 
4521 	if (attr->flags & CE_ATTR_DIAG) {
4522 		/* Reserve the ultimate CE for
4523 		 * Diagnostic Window support
4524 		 */
4525 		hif_state->ce_diag = pipe_info->ce_hdl;
4526 		goto skip;
4527 	}
4528 
4529 	if (hif_is_nss_wifi_enabled(scn) && ce_state &&
4530 	    (ce_state->htt_rx_data)) {
4531 		goto skip;
4532 	}
4533 
4534 	pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max);
4535 	if (attr->dest_nentries > 0) {
4536 		atomic_set(&pipe_info->recv_bufs_needed,
4537 			   init_buffer_count(attr->dest_nentries - 1));
4538 		/*SRNG based CE has one entry less */
4539 		if (ce_srng_based(scn))
4540 			atomic_dec(&pipe_info->recv_bufs_needed);
4541 	} else {
4542 		atomic_set(&pipe_info->recv_bufs_needed, 0);
4543 	}
4544 	ce_tasklet_init(hif_state, (1 << pipe_num));
4545 	ce_register_irq(hif_state, (1 << pipe_num));
4546 
4547 	init_tasklet_worker_by_ceid(hif_hdl, pipe_num);
4548 skip:
4549 	return 0;
4550 err:
4551 	return rv;
4552 }
4553 
4554 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
4555 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
4556 {
4557 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
4558 	uint8_t ce_id, hist_idx = 0;
4559 
4560 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
4561 		if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & (1 << ce_id))
4562 			ce_hist->ce_id_hist_map[ce_id] = hist_idx++;
4563 		else
4564 			ce_hist->ce_id_hist_map[ce_id] = -1;
4565 	}
4566 }
4567 #else
4568 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
4569 {
4570 }
4571 #endif
4572 
4573 /**
4574  * hif_config_ce() - configure copy engines
4575  * @scn: hif context
4576  *
4577  * Prepares fw, copy engine hardware and host sw according
4578  * to the attributes selected by hif_ce_prepare_config.
4579  *
4580  * also calls athdiag_procfs_init
4581  *
4582  * return: 0 for success nonzero for failure.
4583  */
4584 int hif_config_ce(struct hif_softc *scn)
4585 {
4586 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4587 	struct HIF_CE_pipe_info *pipe_info;
4588 	int pipe_num;
4589 
4590 #ifdef ADRASTEA_SHADOW_REGISTERS
4591 	int i;
4592 #endif
4593 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
4594 
4595 	scn->notice_send = true;
4596 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
4597 
4598 	hif_post_static_buf_to_target(scn);
4599 
4600 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
4601 
4602 	hif_config_rri_on_ddr(scn);
4603 
4604 	if (ce_srng_based(scn))
4605 		scn->bus_ops.hif_target_sleep_state_adjust =
4606 			&hif_srng_sleep_state_adjust;
4607 
4608 	/* Initialise the CE debug history sysfs interface inputs ce_id and
4609 	 * index. Disable data storing
4610 	 */
4611 	reset_ce_debug_history(scn);
4612 	hif_gen_ce_id_history_idx_mapping(scn);
4613 
4614 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4615 		struct CE_attr *attr;
4616 
4617 		pipe_info = &hif_state->pipe_info[pipe_num];
4618 		attr = &hif_state->host_ce_config[pipe_num];
4619 
4620 		if (attr->flags & CE_ATTR_INIT_ON_DEMAND)
4621 			continue;
4622 
4623 		if (hif_config_ce_by_id(scn, pipe_num))
4624 			goto err;
4625 	}
4626 
4627 	if (athdiag_procfs_init(scn) != 0) {
4628 		A_TARGET_ACCESS_UNLIKELY(scn);
4629 		goto err;
4630 	}
4631 	scn->athdiag_procfs_inited = true;
4632 
4633 	hif_debug("ce_init done");
4634 	hif_debug("%s: X, ret = %d", __func__, rv);
4635 
4636 #ifdef ADRASTEA_SHADOW_REGISTERS
4637 	hif_debug("Using Shadow Registers instead of CE Registers");
4638 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
4639 		hif_debug("Shadow Register%d is mapped to address %x",
4640 			  i,
4641 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
4642 	}
4643 #endif
4644 
4645 	return rv != QDF_STATUS_SUCCESS;
4646 err:
4647 	/* Failure, so clean up */
4648 	hif_unconfig_ce(scn);
4649 	hif_info("X, ret = %d", rv);
4650 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4651 }
4652 
4653 /**
4654  * hif_config_ce_pktlog() - configure copy engines
4655  * @hif_hdl: hif context
4656  *
4657  * Prepares fw, copy engine hardware and host sw according
4658  * to the attributes selected by hif_ce_prepare_config.
4659  *
4660  * also calls athdiag_procfs_init
4661  *
4662  * return: 0 for success nonzero for failure.
4663  */
4664 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl)
4665 {
4666 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4667 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4668 	int pipe_num;
4669 	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
4670 	struct HIF_CE_pipe_info *pipe_info;
4671 
4672 	if (!scn)
4673 		goto err;
4674 
4675 	if (scn->pktlog_init)
4676 		return QDF_STATUS_SUCCESS;
4677 
4678 	pipe_num =  hif_get_pktlog_ce_num(scn);
4679 	if (pipe_num < 0) {
4680 		qdf_status = QDF_STATUS_E_FAILURE;
4681 		goto err;
4682 	}
4683 
4684 	pipe_info = &hif_state->pipe_info[pipe_num];
4685 
4686 	qdf_status = hif_config_ce_by_id(scn, pipe_num);
4687 	/* CE Already initialized. Do not try to reinitialized again */
4688 	if (qdf_status == QDF_STATUS_E_BUSY)
4689 		return QDF_STATUS_SUCCESS;
4690 
4691 	qdf_status = hif_config_irq_by_ceid(scn, pipe_num);
4692 	if (qdf_status < 0)
4693 		goto err;
4694 
4695 	qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num);
4696 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4697 		hif_err("%s:failed to start hif thread", __func__);
4698 		goto err;
4699 	}
4700 
4701 	/* Post buffers for pktlog copy engine. */
4702 	qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
4703 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4704 		/* cleanup is done in hif_ce_disable */
4705 		hif_err("%s:failed to post buffers", __func__);
4706 		return qdf_status;
4707 	}
4708 	scn->pktlog_init = true;
4709 	return qdf_status != QDF_STATUS_SUCCESS;
4710 
4711 err:
4712 	hif_debug("%s: X, ret = %d", __func__, qdf_status);
4713 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4714 }
4715 
4716 #ifdef IPA_OFFLOAD
4717 /**
4718  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
4719  * @scn: bus context
4720  * @ce_sr: copyengine source ring base physical address
4721  * @ce_sr_ring_size: copyengine source ring size
4722  * @ce_reg_paddr: copyengine register physical address
4723  *
4724  * IPA micro controller data path offload feature enabled,
4725  * HIF should release copy engine related resource information to IPA UC
4726  * IPA UC will access hardware resource with released information
4727  *
4728  * Return: None
4729  */
4730 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
4731 			     qdf_shared_mem_t **ce_sr,
4732 			     uint32_t *ce_sr_ring_size,
4733 			     qdf_dma_addr_t *ce_reg_paddr)
4734 {
4735 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4736 	struct HIF_CE_pipe_info *pipe_info =
4737 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
4738 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4739 
4740 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
4741 			    ce_reg_paddr);
4742 }
4743 #endif /* IPA_OFFLOAD */
4744 
4745 
4746 #ifdef ADRASTEA_SHADOW_REGISTERS
4747 
4748 /*
4749  * Current shadow register config
4750  *
4751  * -----------------------------------------------------------
4752  * Shadow Register      |     CE   |    src/dst write index
4753  * -----------------------------------------------------------
4754  *         0            |     0    |           src
4755  *         1     No Config - Doesn't point to anything
4756  *         2     No Config - Doesn't point to anything
4757  *         3            |     3    |           src
4758  *         4            |     4    |           src
4759  *         5            |     5    |           src
4760  *         6     No Config - Doesn't point to anything
4761  *         7            |     7    |           src
4762  *         8     No Config - Doesn't point to anything
4763  *         9     No Config - Doesn't point to anything
4764  *         10    No Config - Doesn't point to anything
4765  *         11    No Config - Doesn't point to anything
4766  * -----------------------------------------------------------
4767  *         12    No Config - Doesn't point to anything
4768  *         13           |     1    |           dst
4769  *         14           |     2    |           dst
4770  *         15    No Config - Doesn't point to anything
4771  *         16    No Config - Doesn't point to anything
4772  *         17    No Config - Doesn't point to anything
4773  *         18    No Config - Doesn't point to anything
4774  *         19           |     7    |           dst
4775  *         20           |     8    |           dst
4776  *         21    No Config - Doesn't point to anything
4777  *         22    No Config - Doesn't point to anything
4778  *         23    No Config - Doesn't point to anything
4779  * -----------------------------------------------------------
4780  *
4781  *
4782  * ToDo - Move shadow register config to following in the future
4783  * This helps free up a block of shadow registers towards the end.
4784  * Can be used for other purposes
4785  *
4786  * -----------------------------------------------------------
4787  * Shadow Register      |     CE   |    src/dst write index
4788  * -----------------------------------------------------------
4789  *      0            |     0    |           src
4790  *      1            |     3    |           src
4791  *      2            |     4    |           src
4792  *      3            |     5    |           src
4793  *      4            |     7    |           src
4794  * -----------------------------------------------------------
4795  *      5            |     1    |           dst
4796  *      6            |     2    |           dst
4797  *      7            |     7    |           dst
4798  *      8            |     8    |           dst
4799  * -----------------------------------------------------------
4800  *      9     No Config - Doesn't point to anything
4801  *      12    No Config - Doesn't point to anything
4802  *      13    No Config - Doesn't point to anything
4803  *      14    No Config - Doesn't point to anything
4804  *      15    No Config - Doesn't point to anything
4805  *      16    No Config - Doesn't point to anything
4806  *      17    No Config - Doesn't point to anything
4807  *      18    No Config - Doesn't point to anything
4808  *      19    No Config - Doesn't point to anything
4809  *      20    No Config - Doesn't point to anything
4810  *      21    No Config - Doesn't point to anything
4811  *      22    No Config - Doesn't point to anything
4812  *      23    No Config - Doesn't point to anything
4813  * -----------------------------------------------------------
4814 */
4815 #ifndef QCN7605_SUPPORT
4816 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4817 {
4818 	u32 addr = 0;
4819 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4820 
4821 	switch (ce) {
4822 	case 0:
4823 		addr = SHADOW_VALUE0;
4824 		break;
4825 	case 3:
4826 		addr = SHADOW_VALUE3;
4827 		break;
4828 	case 4:
4829 		addr = SHADOW_VALUE4;
4830 		break;
4831 	case 5:
4832 		addr = SHADOW_VALUE5;
4833 		break;
4834 	case 7:
4835 		addr = SHADOW_VALUE7;
4836 		break;
4837 	default:
4838 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4839 		QDF_ASSERT(0);
4840 	}
4841 	return addr;
4842 
4843 }
4844 
4845 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4846 {
4847 	u32 addr = 0;
4848 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4849 
4850 	switch (ce) {
4851 	case 1:
4852 		addr = SHADOW_VALUE13;
4853 		break;
4854 	case 2:
4855 		addr = SHADOW_VALUE14;
4856 		break;
4857 	case 5:
4858 		addr = SHADOW_VALUE17;
4859 		break;
4860 	case 7:
4861 		addr = SHADOW_VALUE19;
4862 		break;
4863 	case 8:
4864 		addr = SHADOW_VALUE20;
4865 		break;
4866 	case 9:
4867 		addr = SHADOW_VALUE21;
4868 		break;
4869 	case 10:
4870 		addr = SHADOW_VALUE22;
4871 		break;
4872 	case 11:
4873 		addr = SHADOW_VALUE23;
4874 		break;
4875 	default:
4876 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4877 		QDF_ASSERT(0);
4878 	}
4879 
4880 	return addr;
4881 
4882 }
4883 #else
4884 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4885 {
4886 	u32 addr = 0;
4887 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4888 
4889 	switch (ce) {
4890 	case 0:
4891 		addr = SHADOW_VALUE0;
4892 		break;
4893 	case 3:
4894 		addr = SHADOW_VALUE3;
4895 		break;
4896 	case 4:
4897 		addr = SHADOW_VALUE4;
4898 		break;
4899 	case 5:
4900 		addr = SHADOW_VALUE5;
4901 		break;
4902 	default:
4903 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4904 		QDF_ASSERT(0);
4905 	}
4906 	return addr;
4907 }
4908 
4909 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4910 {
4911 	u32 addr = 0;
4912 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4913 
4914 	switch (ce) {
4915 	case 1:
4916 		addr = SHADOW_VALUE13;
4917 		break;
4918 	case 2:
4919 		addr = SHADOW_VALUE14;
4920 		break;
4921 	case 3:
4922 		addr = SHADOW_VALUE15;
4923 		break;
4924 	case 5:
4925 		addr = SHADOW_VALUE17;
4926 		break;
4927 	case 7:
4928 		addr = SHADOW_VALUE19;
4929 		break;
4930 	case 8:
4931 		addr = SHADOW_VALUE20;
4932 		break;
4933 	case 9:
4934 		addr = SHADOW_VALUE21;
4935 		break;
4936 	case 10:
4937 		addr = SHADOW_VALUE22;
4938 		break;
4939 	case 11:
4940 		addr = SHADOW_VALUE23;
4941 		break;
4942 	default:
4943 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4944 		QDF_ASSERT(0);
4945 	}
4946 
4947 	return addr;
4948 }
4949 #endif
4950 #endif
4951 
4952 #if defined(FEATURE_LRO)
4953 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
4954 {
4955 	struct CE_state *ce_state;
4956 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4957 
4958 	ce_state = scn->ce_id_to_state[ctx_id];
4959 
4960 	return ce_state->lro_data;
4961 }
4962 #endif
4963 
4964 /**
4965  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
4966  * this service
4967  * @hif_hdl: hif_softc pointer.
4968  * @svc_id: Service ID for which the mapping is needed.
4969  * @ul_pipe: address of the container in which ul pipe is returned.
4970  * @dl_pipe: address of the container in which dl pipe is returned.
4971  * @ul_is_polled: address of the container in which a bool
4972  *			indicating if the UL CE for this service
4973  *			is polled is returned.
4974  * @dl_is_polled: address of the container in which a bool
4975  *			indicating if the DL CE for this service
4976  *			is polled is returned.
4977  *
4978  * Return: Indicates whether the service has been found in the table.
4979  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
4980  *         There will be warning logs if either leg has not been updated
4981  *         because it missed the entry in the table (but this is not an err).
4982  */
4983 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
4984 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
4985 			int *dl_is_polled)
4986 {
4987 	int status = -EINVAL;
4988 	unsigned int i;
4989 	struct service_to_pipe element;
4990 	struct service_to_pipe *tgt_svc_map_to_use;
4991 	uint32_t sz_tgt_svc_map_to_use;
4992 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4993 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4994 	bool dl_updated = false;
4995 	bool ul_updated = false;
4996 
4997 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
4998 				       &sz_tgt_svc_map_to_use);
4999 
5000 	*dl_is_polled = 0;  /* polling for received messages not supported */
5001 
5002 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
5003 
5004 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
5005 		if (element.service_id == svc_id) {
5006 			if (element.pipedir == PIPEDIR_OUT) {
5007 				*ul_pipe = element.pipenum;
5008 				*ul_is_polled =
5009 					(hif_state->host_ce_config[*ul_pipe].flags &
5010 					 CE_ATTR_DISABLE_INTR) != 0;
5011 				ul_updated = true;
5012 			} else if (element.pipedir == PIPEDIR_IN) {
5013 				*dl_pipe = element.pipenum;
5014 				dl_updated = true;
5015 			}
5016 			status = 0;
5017 		}
5018 	}
5019 	if (ul_updated == false)
5020 		hif_debug("ul pipe is NOT updated for service %d", svc_id);
5021 	if (dl_updated == false)
5022 		hif_debug("dl pipe is NOT updated for service %d", svc_id);
5023 
5024 	return status;
5025 }
5026 
5027 #ifdef SHADOW_REG_DEBUG
5028 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
5029 		uint32_t CE_ctrl_addr)
5030 {
5031 	uint32_t read_from_hw, srri_from_ddr = 0;
5032 
5033 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
5034 
5035 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
5036 
5037 	if (read_from_hw != srri_from_ddr) {
5038 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
5039 		       srri_from_ddr, read_from_hw,
5040 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
5041 		QDF_ASSERT(0);
5042 	}
5043 	return srri_from_ddr;
5044 }
5045 
5046 
5047 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
5048 		uint32_t CE_ctrl_addr)
5049 {
5050 	uint32_t read_from_hw, drri_from_ddr = 0;
5051 
5052 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
5053 
5054 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
5055 
5056 	if (read_from_hw != drri_from_ddr) {
5057 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
5058 		       drri_from_ddr, read_from_hw,
5059 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
5060 		QDF_ASSERT(0);
5061 	}
5062 	return drri_from_ddr;
5063 }
5064 
5065 #endif
5066 
5067 /**
5068  * hif_dump_ce_registers() - dump ce registers
5069  * @scn: hif_opaque_softc pointer.
5070  *
5071  * Output the copy engine registers
5072  *
5073  * Return: 0 for success or error code
5074  */
5075 int hif_dump_ce_registers(struct hif_softc *scn)
5076 {
5077 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
5078 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
5079 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
5080 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
5081 	uint16_t i;
5082 	QDF_STATUS status;
5083 
5084 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
5085 		if (!scn->ce_id_to_state[i]) {
5086 			hif_debug("CE%d not used", i);
5087 			continue;
5088 		}
5089 
5090 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
5091 					   (uint8_t *) &ce_reg_values[0],
5092 					   ce_reg_word_size * sizeof(uint32_t));
5093 
5094 		if (status != QDF_STATUS_SUCCESS) {
5095 			hif_err("Dumping CE register failed!");
5096 			return -EACCES;
5097 		}
5098 		hif_debug("CE%d=>", i);
5099 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
5100 				   (uint8_t *) &ce_reg_values[0],
5101 				   ce_reg_word_size * sizeof(uint32_t));
5102 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
5103 				+ SR_WR_INDEX_ADDRESS),
5104 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
5105 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
5106 				+ CURRENT_SRRI_ADDRESS),
5107 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
5108 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
5109 				+ DST_WR_INDEX_ADDRESS),
5110 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
5111 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
5112 				+ CURRENT_DRRI_ADDRESS),
5113 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
5114 		qdf_print("---");
5115 	}
5116 	return 0;
5117 }
5118 qdf_export_symbol(hif_dump_ce_registers);
5119 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
5120 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
5121 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
5122 {
5123 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5124 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
5125 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
5126 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
5127 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
5128 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
5129 	struct CE_ring_state *src_ring = ce_state->src_ring;
5130 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
5131 
5132 	if (src_ring) {
5133 		hif_info->ul_pipe.nentries = src_ring->nentries;
5134 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
5135 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
5136 		hif_info->ul_pipe.write_index = src_ring->write_index;
5137 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
5138 		hif_info->ul_pipe.base_addr_CE_space =
5139 			src_ring->base_addr_CE_space;
5140 		hif_info->ul_pipe.base_addr_owner_space =
5141 			src_ring->base_addr_owner_space;
5142 	}
5143 
5144 
5145 	if (dest_ring) {
5146 		hif_info->dl_pipe.nentries = dest_ring->nentries;
5147 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
5148 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
5149 		hif_info->dl_pipe.write_index = dest_ring->write_index;
5150 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
5151 		hif_info->dl_pipe.base_addr_CE_space =
5152 			dest_ring->base_addr_CE_space;
5153 		hif_info->dl_pipe.base_addr_owner_space =
5154 			dest_ring->base_addr_owner_space;
5155 	}
5156 
5157 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
5158 	hif_info->ctrl_addr = ce_state->ctrl_addr;
5159 
5160 	return hif_info;
5161 }
5162 qdf_export_symbol(hif_get_addl_pipe_info);
5163 
5164 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
5165 {
5166 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5167 
5168 	scn->nss_wifi_ol_mode = mode;
5169 	return 0;
5170 }
5171 qdf_export_symbol(hif_set_nss_wifiol_mode);
5172 #endif
5173 
5174 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
5175 {
5176 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5177 	scn->hif_attribute = hif_attrib;
5178 }
5179 
5180 
5181 /* disable interrupts (only applicable for legacy copy engine currently */
5182 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
5183 {
5184 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5185 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
5186 	uint32_t ctrl_addr = CE_state->ctrl_addr;
5187 
5188 	Q_TARGET_ACCESS_BEGIN(scn);
5189 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
5190 	Q_TARGET_ACCESS_END(scn);
5191 }
5192 qdf_export_symbol(hif_disable_interrupt);
5193 
5194 /**
5195  * hif_fw_event_handler() - hif fw event handler
5196  * @hif_state: pointer to hif ce state structure
5197  *
5198  * Process fw events and raise HTC callback to process fw events.
5199  *
5200  * Return: none
5201  */
5202 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
5203 {
5204 	struct hif_msg_callbacks *msg_callbacks =
5205 		&hif_state->msg_callbacks_current;
5206 
5207 	if (!msg_callbacks->fwEventHandler)
5208 		return;
5209 
5210 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
5211 			QDF_STATUS_E_FAILURE);
5212 }
5213 
5214 #ifndef QCA_WIFI_3_0
5215 /**
5216  * hif_fw_interrupt_handler() - FW interrupt handler
5217  * @irq: irq number
5218  * @arg: the user pointer
5219  *
5220  * Called from the PCI interrupt handler when a
5221  * firmware-generated interrupt to the Host.
5222  *
5223  * only registered for legacy ce devices
5224  *
5225  * Return: status of handled irq
5226  */
5227 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5228 {
5229 	struct hif_softc *scn = arg;
5230 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5231 	uint32_t fw_indicator_address, fw_indicator;
5232 
5233 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
5234 		return ATH_ISR_NOSCHED;
5235 
5236 	fw_indicator_address = hif_state->fw_indicator_address;
5237 	/* For sudden unplug this will return ~0 */
5238 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
5239 
5240 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
5241 		/* ACK: clear Target-side pending event */
5242 		A_TARGET_WRITE(scn, fw_indicator_address,
5243 			       fw_indicator & ~FW_IND_EVENT_PENDING);
5244 		if (Q_TARGET_ACCESS_END(scn) < 0)
5245 			return ATH_ISR_SCHED;
5246 
5247 		if (hif_state->started) {
5248 			hif_fw_event_handler(hif_state);
5249 		} else {
5250 			/*
5251 			 * Probable Target failure before we're prepared
5252 			 * to handle it.  Generally unexpected.
5253 			 * fw_indicator used as bitmap, and defined as below:
5254 			 *     FW_IND_EVENT_PENDING    0x1
5255 			 *     FW_IND_INITIALIZED      0x2
5256 			 *     FW_IND_NEEDRECOVER      0x4
5257 			 */
5258 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
5259 				("%s: Early firmware event indicated 0x%x\n",
5260 				 __func__, fw_indicator));
5261 		}
5262 	} else {
5263 		if (Q_TARGET_ACCESS_END(scn) < 0)
5264 			return ATH_ISR_SCHED;
5265 	}
5266 
5267 	return ATH_ISR_SCHED;
5268 }
5269 #else
5270 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5271 {
5272 	return ATH_ISR_SCHED;
5273 }
5274 #endif /* #ifdef QCA_WIFI_3_0 */
5275 
5276 
5277 /**
5278  * hif_wlan_disable(): call the platform driver to disable wlan
5279  * @scn: HIF Context
5280  *
5281  * This function passes the con_mode to platform driver to disable
5282  * wlan.
5283  *
5284  * Return: void
5285  */
5286 void hif_wlan_disable(struct hif_softc *scn)
5287 {
5288 	enum pld_driver_mode mode;
5289 	uint32_t con_mode = hif_get_conparam(scn);
5290 
5291 	if (scn->target_status == TARGET_STATUS_RESET)
5292 		return;
5293 
5294 	if (QDF_GLOBAL_FTM_MODE == con_mode)
5295 		mode = PLD_FTM;
5296 	else if (QDF_IS_EPPING_ENABLED(con_mode))
5297 		mode = PLD_EPPING;
5298 	else
5299 		mode = PLD_MISSION;
5300 
5301 	pld_wlan_disable(scn->qdf_dev->dev, mode);
5302 }
5303 
5304 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5305 {
5306 	int status;
5307 	uint8_t ul_pipe, dl_pipe;
5308 	int ul_is_polled, dl_is_polled;
5309 
5310 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
5311 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5312 					 HTC_CTRL_RSVD_SVC,
5313 					 &ul_pipe, &dl_pipe,
5314 					 &ul_is_polled, &dl_is_polled);
5315 	if (status) {
5316 		hif_err("Failed to map pipe: %d", status);
5317 		return status;
5318 	}
5319 
5320 	*ce_id = dl_pipe;
5321 
5322 	return 0;
5323 }
5324 
5325 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5326 {
5327 	int status;
5328 	uint8_t ul_pipe, dl_pipe;
5329 	int ul_is_polled, dl_is_polled;
5330 
5331 	/* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */
5332 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5333 					 WMI_CONTROL_DIAG_SVC,
5334 					 &ul_pipe, &dl_pipe,
5335 					 &ul_is_polled, &dl_is_polled);
5336 	if (status) {
5337 		hif_err("Failed to map pipe: %d", status);
5338 		return status;
5339 	}
5340 
5341 	*ce_id = dl_pipe;
5342 
5343 	return 0;
5344 }
5345 
5346 #ifdef HIF_CE_LOG_INFO
5347 /**
5348  * ce_get_index_info(): Get CE index info
5349  * @scn: HIF Context
5350  * @ce_state: CE opaque handle
5351  * @info: CE info
5352  *
5353  * Return: 0 for success and non zero for failure
5354  */
5355 static
5356 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
5357 		      struct ce_index *info)
5358 {
5359 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5360 
5361 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
5362 }
5363 
5364 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
5365 		     unsigned int *offset)
5366 {
5367 	struct hang_event_info info = {0};
5368 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
5369 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
5370 	uint8_t curr_index = 0;
5371 	uint8_t i;
5372 	uint16_t size;
5373 
5374 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
5375 	info.active_grp_tasklet_cnt =
5376 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
5377 
5378 	for (i = 0; i < scn->ce_count; i++) {
5379 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
5380 			continue;
5381 
5382 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
5383 				      &info.ce_info[curr_index]))
5384 			continue;
5385 
5386 		curr_index++;
5387 	}
5388 
5389 	info.ce_count = curr_index;
5390 	size = sizeof(info) -
5391 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
5392 
5393 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
5394 		return;
5395 
5396 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
5397 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
5398 
5399 	qdf_mem_copy(data + *offset, &info, size);
5400 	*offset = *offset + size;
5401 }
5402 
5403 #ifdef FEATURE_DIRECT_LINK
5404 QDF_STATUS
5405 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
5406 			   uint64_t addr, uint32_t data)
5407 {
5408 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
5409 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5410 
5411 	if (hif_state->ce_services->ce_set_irq_config_by_ceid)
5412 		return hif_state->ce_services->ce_set_irq_config_by_ceid(
5413 									hif_ctx,
5414 									ce_id,
5415 									addr,
5416 									data);
5417 
5418 	return QDF_STATUS_E_NOSUPPORT;
5419 }
5420 
5421 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
5422 						  uint64_t **dma_addr,
5423 						  uint32_t *buf_size)
5424 {
5425 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
5426 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5427 	struct ce_ops *ce_services = hif_state->ce_services;
5428 
5429 	if (ce_services->ce_get_direct_link_dest_buffers)
5430 		return ce_services->ce_get_direct_link_dest_buffers(hif_ctx,
5431 								    dma_addr,
5432 								    buf_size);
5433 
5434 	return 0;
5435 }
5436 
5437 QDF_STATUS
5438 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
5439 				 struct hif_direct_link_ce_info *info,
5440 				 uint8_t max_ce_info_len)
5441 {
5442 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
5443 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5444 	struct ce_ops *ce_services = hif_state->ce_services;
5445 
5446 	if (ce_services->ce_get_direct_link_ring_info)
5447 		return ce_services->ce_get_direct_link_ring_info(hif_ctx,
5448 							       info,
5449 							       max_ce_info_len);
5450 
5451 	return QDF_STATUS_E_NOSUPPORT;
5452 }
5453 #endif
5454 #endif
5455