xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 4902c68f4d4507da5cf7607fa013a97fcb10adba)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "targcfg.h"
21 #include "qdf_lock.h"
22 #include "qdf_status.h"
23 #include "qdf_status.h"
24 #include <qdf_atomic.h>         /* qdf_atomic_read */
25 #include <targaddrs.h>
26 #include "hif_io32.h"
27 #include <hif.h>
28 #include <target_type.h>
29 #include "regtable.h"
30 #define ATH_MODULE_NAME hif
31 #include <a_debug.h>
32 #include "hif_main.h"
33 #include "ce_api.h"
34 #include "qdf_trace.h"
35 #include "pld_common.h"
36 #include "hif_debug.h"
37 #include "ce_internal.h"
38 #include "ce_reg.h"
39 #include "ce_assignment.h"
40 #include "ce_tasklet.h"
41 #include "qdf_module.h"
42 #include "qdf_ssr_driver_dump.h"
43 #include <wbuff.h>
44 
45 #define CE_POLL_TIMEOUT 10      /* ms */
46 
47 #define AGC_DUMP         1
48 #define CHANINFO_DUMP    2
49 #define BB_WATCHDOG_DUMP 3
50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51 #define PCIE_ACCESS_DUMP 4
52 #endif
53 #include "mp_dev.h"
54 #ifdef HIF_CE_LOG_INFO
55 #include "qdf_hang_event_notifier.h"
56 #endif
57 
58 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
59 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \
60 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCA5332) || \
61 	defined(QCA_WIFI_QCA9574)) && !defined(QCA_WIFI_SUPPORT_SRNG) && \
62 	!defined(QCA_WIFI_WCN6450)
63 #define QCA_WIFI_SUPPORT_SRNG
64 #endif
65 
66 #ifdef QCA_WIFI_SUPPORT_SRNG
67 #include <hal_api.h>
68 #endif
69 
70 /* Forward references */
71 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
72 
73 /*
74  * Fix EV118783, poll to check whether a BMI response comes
75  * other than waiting for the interruption which may be lost.
76  */
77 /* #define BMI_RSP_POLLING */
78 #define BMI_RSP_TO_MILLISEC  1000
79 
80 #ifdef CONFIG_BYPASS_QMI
81 #define BYPASS_QMI 1
82 #else
83 #define BYPASS_QMI 0
84 #endif
85 
86 static void hif_config_rri_on_ddr(struct hif_softc *scn);
87 
88 /**
89  * hif_target_access_log_dump() - dump access log
90  *
91  * dump access log
92  *
93  * Return: n/a
94  */
95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96 static void hif_target_access_log_dump(void)
97 {
98 	hif_target_dump_access_log();
99 }
100 #endif
101 
102 /*
103  * This structure contains the interrupt index for each Copy engine
104  * for various number of MSIs available in the system.
105  */
106 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = {
107 	/* Default configuration */
108 	{{ CE_INTERRUPT_IDX(0),
109 	  CE_INTERRUPT_IDX(1),
110 	  CE_INTERRUPT_IDX(2),
111 	  CE_INTERRUPT_IDX(3),
112 	  CE_INTERRUPT_IDX(4),
113 	  CE_INTERRUPT_IDX(5),
114 	  CE_INTERRUPT_IDX(6),
115 	  CE_INTERRUPT_IDX(7),
116 	  CE_INTERRUPT_IDX(8),
117 	  CE_INTERRUPT_IDX(9),
118 	  CE_INTERRUPT_IDX(10),
119 	  CE_INTERRUPT_IDX(11),
120 #ifdef QCA_WIFI_QCN9224
121 	  CE_INTERRUPT_IDX(12),
122 	  CE_INTERRUPT_IDX(13),
123 	  CE_INTERRUPT_IDX(14),
124 	  CE_INTERRUPT_IDX(15),
125 #endif
126 	} },
127 	/* Interrupt assignment for 1 MSI combination */
128 	{{ CE_INTERRUPT_IDX(0),
129 	  CE_INTERRUPT_IDX(0),
130 	  CE_INTERRUPT_IDX(0),
131 	  CE_INTERRUPT_IDX(0),
132 	  CE_INTERRUPT_IDX(0),
133 	  CE_INTERRUPT_IDX(0),
134 	  CE_INTERRUPT_IDX(0),
135 	  CE_INTERRUPT_IDX(0),
136 	  CE_INTERRUPT_IDX(0),
137 	  CE_INTERRUPT_IDX(0),
138 	  CE_INTERRUPT_IDX(0),
139 	  CE_INTERRUPT_IDX(0),
140 #ifdef QCA_WIFI_QCN9224
141 	  CE_INTERRUPT_IDX(0),
142 	  CE_INTERRUPT_IDX(0),
143 	  CE_INTERRUPT_IDX(0),
144 	  CE_INTERRUPT_IDX(0),
145 #endif
146 	} },
147 	/* Interrupt assignment for 2 MSI combination */
148 	{{ CE_INTERRUPT_IDX(0),
149 	  CE_INTERRUPT_IDX(1),
150 	  CE_INTERRUPT_IDX(0),
151 	  CE_INTERRUPT_IDX(1),
152 	  CE_INTERRUPT_IDX(0),
153 	  CE_INTERRUPT_IDX(1),
154 	  CE_INTERRUPT_IDX(0),
155 	  CE_INTERRUPT_IDX(0),
156 	  CE_INTERRUPT_IDX(0),
157 	  CE_INTERRUPT_IDX(0),
158 	  CE_INTERRUPT_IDX(0),
159 	  CE_INTERRUPT_IDX(0),
160 #ifdef QCA_WIFI_QCN9224
161 	  CE_INTERRUPT_IDX(0),
162 	  CE_INTERRUPT_IDX(0),
163 	  CE_INTERRUPT_IDX(0),
164 	  CE_INTERRUPT_IDX(0),
165 #endif
166 	} },
167 	/* Interrupt assignment for 3 MSI combination */
168 	{{ CE_INTERRUPT_IDX(0),
169 	  CE_INTERRUPT_IDX(1),
170 	  CE_INTERRUPT_IDX(2),
171 	  CE_INTERRUPT_IDX(1),
172 	  CE_INTERRUPT_IDX(0),
173 	  CE_INTERRUPT_IDX(1),
174 	  CE_INTERRUPT_IDX(0),
175 	  CE_INTERRUPT_IDX(0),
176 	  CE_INTERRUPT_IDX(0),
177 	  CE_INTERRUPT_IDX(0),
178 	  CE_INTERRUPT_IDX(0),
179 	  CE_INTERRUPT_IDX(0),
180 #ifdef QCA_WIFI_QCN9224
181 	  CE_INTERRUPT_IDX(0),
182 	  CE_INTERRUPT_IDX(0),
183 	  CE_INTERRUPT_IDX(0),
184 	  CE_INTERRUPT_IDX(0),
185 #endif
186 	} },
187 	/* Interrupt assignment for 4 MSI combination */
188 	{{ CE_INTERRUPT_IDX(0),
189 	  CE_INTERRUPT_IDX(1),
190 	  CE_INTERRUPT_IDX(2),
191 	  CE_INTERRUPT_IDX(3),
192 	  CE_INTERRUPT_IDX(0),
193 	  CE_INTERRUPT_IDX(1),
194 	  CE_INTERRUPT_IDX(0),
195 	  CE_INTERRUPT_IDX(0),
196 	  CE_INTERRUPT_IDX(0),
197 	  CE_INTERRUPT_IDX(0),
198 	  CE_INTERRUPT_IDX(0),
199 	  CE_INTERRUPT_IDX(0),
200 #ifdef QCA_WIFI_QCN9224
201 	  CE_INTERRUPT_IDX(0),
202 	  CE_INTERRUPT_IDX(0),
203 	  CE_INTERRUPT_IDX(0),
204 	  CE_INTERRUPT_IDX(0),
205 #endif
206 	} },
207 	/* Interrupt assignment for 5 MSI combination */
208 	{{ CE_INTERRUPT_IDX(0),
209 	  CE_INTERRUPT_IDX(1),
210 	  CE_INTERRUPT_IDX(2),
211 	  CE_INTERRUPT_IDX(3),
212 	  CE_INTERRUPT_IDX(0),
213 	  CE_INTERRUPT_IDX(4),
214 	  CE_INTERRUPT_IDX(0),
215 	  CE_INTERRUPT_IDX(0),
216 	  CE_INTERRUPT_IDX(0),
217 	  CE_INTERRUPT_IDX(0),
218 	  CE_INTERRUPT_IDX(0),
219 	  CE_INTERRUPT_IDX(0),
220 #ifdef QCA_WIFI_QCN9224
221 	  CE_INTERRUPT_IDX(0),
222 	  CE_INTERRUPT_IDX(0),
223 	  CE_INTERRUPT_IDX(0),
224 	  CE_INTERRUPT_IDX(0),
225 #endif
226 	} },
227 	/* Interrupt assignment for 6 MSI combination */
228 	{{ CE_INTERRUPT_IDX(0),
229 	  CE_INTERRUPT_IDX(1),
230 	  CE_INTERRUPT_IDX(2),
231 	  CE_INTERRUPT_IDX(3),
232 	  CE_INTERRUPT_IDX(4),
233 	  CE_INTERRUPT_IDX(5),
234 	  CE_INTERRUPT_IDX(0),
235 	  CE_INTERRUPT_IDX(0),
236 	  CE_INTERRUPT_IDX(0),
237 	  CE_INTERRUPT_IDX(0),
238 	  CE_INTERRUPT_IDX(0),
239 	  CE_INTERRUPT_IDX(0),
240 #ifdef QCA_WIFI_QCN9224
241 	  CE_INTERRUPT_IDX(0),
242 	  CE_INTERRUPT_IDX(0),
243 	  CE_INTERRUPT_IDX(0),
244 	  CE_INTERRUPT_IDX(0),
245 #endif
246 	} },
247 	/* Interrupt assignment for 7 MSI combination */
248 	{{ CE_INTERRUPT_IDX(0),
249 	  CE_INTERRUPT_IDX(1),
250 	  CE_INTERRUPT_IDX(2),
251 	  CE_INTERRUPT_IDX(3),
252 	  CE_INTERRUPT_IDX(4),
253 	  CE_INTERRUPT_IDX(5),
254 	  CE_INTERRUPT_IDX(6),
255 	  CE_INTERRUPT_IDX(0),
256 	  CE_INTERRUPT_IDX(0),
257 	  CE_INTERRUPT_IDX(0),
258 	  CE_INTERRUPT_IDX(0),
259 	  CE_INTERRUPT_IDX(0),
260 #ifdef QCA_WIFI_QCN9224
261 	  CE_INTERRUPT_IDX(0),
262 	  CE_INTERRUPT_IDX(0),
263 	  CE_INTERRUPT_IDX(0),
264 	  CE_INTERRUPT_IDX(0),
265 #endif
266 	} },
267 	/* Interrupt assignment for 8 MSI combination */
268 	{{ CE_INTERRUPT_IDX(0),
269 	  CE_INTERRUPT_IDX(1),
270 	  CE_INTERRUPT_IDX(2),
271 	  CE_INTERRUPT_IDX(3),
272 	  CE_INTERRUPT_IDX(4),
273 	  CE_INTERRUPT_IDX(5),
274 	  CE_INTERRUPT_IDX(6),
275 	  CE_INTERRUPT_IDX(7),
276 	  CE_INTERRUPT_IDX(0),
277 	  CE_INTERRUPT_IDX(0),
278 	  CE_INTERRUPT_IDX(0),
279 	  CE_INTERRUPT_IDX(0),
280 #ifdef QCA_WIFI_QCN9224
281 	  CE_INTERRUPT_IDX(0),
282 	  CE_INTERRUPT_IDX(0),
283 	  CE_INTERRUPT_IDX(0),
284 	  CE_INTERRUPT_IDX(0),
285 #endif
286 	} },
287 	/* Interrupt assignment for 9 MSI combination */
288 	{{ CE_INTERRUPT_IDX(0),
289 	  CE_INTERRUPT_IDX(1),
290 	  CE_INTERRUPT_IDX(2),
291 	  CE_INTERRUPT_IDX(3),
292 	  CE_INTERRUPT_IDX(4),
293 	  CE_INTERRUPT_IDX(5),
294 	  CE_INTERRUPT_IDX(6),
295 	  CE_INTERRUPT_IDX(7),
296 	  CE_INTERRUPT_IDX(8),
297 	  CE_INTERRUPT_IDX(0),
298 	  CE_INTERRUPT_IDX(0),
299 	  CE_INTERRUPT_IDX(0),
300 #ifdef QCA_WIFI_QCN9224
301 	  CE_INTERRUPT_IDX(0),
302 	  CE_INTERRUPT_IDX(0),
303 	  CE_INTERRUPT_IDX(0),
304 	  CE_INTERRUPT_IDX(0),
305 #endif
306 	} },
307 	/* Interrupt assignment for 10 MSI combination */
308 	{{ CE_INTERRUPT_IDX(0),
309 	  CE_INTERRUPT_IDX(1),
310 	  CE_INTERRUPT_IDX(2),
311 	  CE_INTERRUPT_IDX(3),
312 	  CE_INTERRUPT_IDX(4),
313 	  CE_INTERRUPT_IDX(5),
314 	  CE_INTERRUPT_IDX(6),
315 	  CE_INTERRUPT_IDX(7),
316 	  CE_INTERRUPT_IDX(8),
317 	  CE_INTERRUPT_IDX(9),
318 	  CE_INTERRUPT_IDX(0),
319 	  CE_INTERRUPT_IDX(0),
320 #ifdef QCA_WIFI_QCN9224
321 	  CE_INTERRUPT_IDX(0),
322 	  CE_INTERRUPT_IDX(0),
323 	  CE_INTERRUPT_IDX(0),
324 	  CE_INTERRUPT_IDX(0),
325 #endif
326 	} },
327 	/* Interrupt assignment for 11 MSI combination */
328 	{{ CE_INTERRUPT_IDX(0),
329 	  CE_INTERRUPT_IDX(1),
330 	  CE_INTERRUPT_IDX(2),
331 	  CE_INTERRUPT_IDX(3),
332 	  CE_INTERRUPT_IDX(4),
333 	  CE_INTERRUPT_IDX(5),
334 	  CE_INTERRUPT_IDX(6),
335 	  CE_INTERRUPT_IDX(7),
336 	  CE_INTERRUPT_IDX(8),
337 	  CE_INTERRUPT_IDX(9),
338 	  CE_INTERRUPT_IDX(10),
339 	  CE_INTERRUPT_IDX(0),
340 #ifdef QCA_WIFI_QCN9224
341 	  CE_INTERRUPT_IDX(0),
342 	  CE_INTERRUPT_IDX(0),
343 	  CE_INTERRUPT_IDX(0),
344 	  CE_INTERRUPT_IDX(0),
345 #endif
346 	} },
347 	/* Interrupt assignment for 12 MSI combination */
348 	{{ CE_INTERRUPT_IDX(0),
349 	  CE_INTERRUPT_IDX(1),
350 	  CE_INTERRUPT_IDX(2),
351 	  CE_INTERRUPT_IDX(3),
352 	  CE_INTERRUPT_IDX(4),
353 	  CE_INTERRUPT_IDX(5),
354 	  CE_INTERRUPT_IDX(6),
355 	  CE_INTERRUPT_IDX(7),
356 	  CE_INTERRUPT_IDX(8),
357 	  CE_INTERRUPT_IDX(9),
358 	  CE_INTERRUPT_IDX(10),
359 	  CE_INTERRUPT_IDX(11),
360 #ifdef QCA_WIFI_QCN9224
361 	  CE_INTERRUPT_IDX(0),
362 	  CE_INTERRUPT_IDX(0),
363 	  CE_INTERRUPT_IDX(0),
364 	  CE_INTERRUPT_IDX(0),
365 #endif
366 	} },
367 #ifdef QCA_WIFI_QCN9224
368 	/* Interrupt assignment for 13 MSI combination */
369 	{{ CE_INTERRUPT_IDX(0),
370 	  CE_INTERRUPT_IDX(1),
371 	  CE_INTERRUPT_IDX(2),
372 	  CE_INTERRUPT_IDX(3),
373 	  CE_INTERRUPT_IDX(4),
374 	  CE_INTERRUPT_IDX(5),
375 	  CE_INTERRUPT_IDX(6),
376 	  CE_INTERRUPT_IDX(7),
377 	  CE_INTERRUPT_IDX(8),
378 	  CE_INTERRUPT_IDX(9),
379 	  CE_INTERRUPT_IDX(10),
380 	  CE_INTERRUPT_IDX(11),
381 	  CE_INTERRUPT_IDX(12),
382 	  CE_INTERRUPT_IDX(0),
383 	  CE_INTERRUPT_IDX(0),
384 	  CE_INTERRUPT_IDX(0),
385 	} },
386 	/* Interrupt assignment for 14 MSI combination */
387 	{{ CE_INTERRUPT_IDX(0),
388 	  CE_INTERRUPT_IDX(1),
389 	  CE_INTERRUPT_IDX(2),
390 	  CE_INTERRUPT_IDX(3),
391 	  CE_INTERRUPT_IDX(4),
392 	  CE_INTERRUPT_IDX(5),
393 	  CE_INTERRUPT_IDX(6),
394 	  CE_INTERRUPT_IDX(7),
395 	  CE_INTERRUPT_IDX(8),
396 	  CE_INTERRUPT_IDX(9),
397 	  CE_INTERRUPT_IDX(10),
398 	  CE_INTERRUPT_IDX(11),
399 	  CE_INTERRUPT_IDX(12),
400 	  CE_INTERRUPT_IDX(13),
401 	  CE_INTERRUPT_IDX(0),
402 	  CE_INTERRUPT_IDX(0),
403 	} },
404 	/* Interrupt assignment for 15 MSI combination */
405 	{{ CE_INTERRUPT_IDX(0),
406 	  CE_INTERRUPT_IDX(1),
407 	  CE_INTERRUPT_IDX(2),
408 	  CE_INTERRUPT_IDX(3),
409 	  CE_INTERRUPT_IDX(4),
410 	  CE_INTERRUPT_IDX(5),
411 	  CE_INTERRUPT_IDX(6),
412 	  CE_INTERRUPT_IDX(7),
413 	  CE_INTERRUPT_IDX(8),
414 	  CE_INTERRUPT_IDX(9),
415 	  CE_INTERRUPT_IDX(10),
416 	  CE_INTERRUPT_IDX(11),
417 	  CE_INTERRUPT_IDX(12),
418 	  CE_INTERRUPT_IDX(13),
419 	  CE_INTERRUPT_IDX(14),
420 	  CE_INTERRUPT_IDX(0),
421 	} },
422 	/* Interrupt assignment for 16 MSI combination */
423 	{{ CE_INTERRUPT_IDX(0),
424 	  CE_INTERRUPT_IDX(1),
425 	  CE_INTERRUPT_IDX(2),
426 	  CE_INTERRUPT_IDX(3),
427 	  CE_INTERRUPT_IDX(4),
428 	  CE_INTERRUPT_IDX(5),
429 	  CE_INTERRUPT_IDX(6),
430 	  CE_INTERRUPT_IDX(7),
431 	  CE_INTERRUPT_IDX(8),
432 	  CE_INTERRUPT_IDX(9),
433 	  CE_INTERRUPT_IDX(10),
434 	  CE_INTERRUPT_IDX(11),
435 	  CE_INTERRUPT_IDX(12),
436 	  CE_INTERRUPT_IDX(13),
437 	  CE_INTERRUPT_IDX(14),
438 	  CE_INTERRUPT_IDX(15),
439 	} },
440 #endif
441 };
442 
443 
444 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
445 		      uint8_t cmd_id, bool start)
446 {
447 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
448 
449 	switch (cmd_id) {
450 	case AGC_DUMP:
451 		if (start)
452 			priv_start_agc(scn);
453 		else
454 			priv_dump_agc(scn);
455 		break;
456 	case CHANINFO_DUMP:
457 		if (start)
458 			priv_start_cap_chaninfo(scn);
459 		else
460 			priv_dump_chaninfo(scn);
461 		break;
462 	case BB_WATCHDOG_DUMP:
463 		priv_dump_bbwatchdog(scn);
464 		break;
465 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
466 	case PCIE_ACCESS_DUMP:
467 		hif_target_access_log_dump();
468 		break;
469 #endif
470 	default:
471 		hif_err("Invalid htc dump command: %d", cmd_id);
472 		break;
473 	}
474 }
475 
476 static void ce_poll_timeout(void *arg)
477 {
478 	struct CE_state *CE_state = (struct CE_state *)arg;
479 
480 	if (CE_state->timer_inited) {
481 		ce_per_engine_service(CE_state->scn, CE_state->id);
482 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
483 	}
484 }
485 
486 static unsigned int roundup_pwr2(unsigned int n)
487 {
488 	int i;
489 	unsigned int test_pwr2;
490 
491 	if (!(n & (n - 1)))
492 		return n; /* already a power of 2 */
493 
494 	test_pwr2 = 4;
495 	for (i = 0; i < 29; i++) {
496 		if (test_pwr2 > n)
497 			return test_pwr2;
498 		test_pwr2 = test_pwr2 << 1;
499 	}
500 
501 	QDF_ASSERT(0); /* n too large */
502 	return 0;
503 }
504 
505 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
506 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
507 
508 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
509 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
510 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
511 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
512 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
513 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
514 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
515 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
516 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
517 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
518 #ifdef QCA_WIFI_3_0_ADRASTEA
519 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
520 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
521 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
522 #endif
523 };
524 
525 #ifdef QCN7605_SUPPORT
526 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
527 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
528 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
529 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
530 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
531 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
532 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
533 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
534 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
535 };
536 #endif
537 
538 #ifdef WLAN_FEATURE_EPPING
539 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
540 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
541 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
542 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
543 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
544 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
545 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
546 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
547 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
548 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
549 };
550 #endif
551 
552 /* CE_PCI TABLE */
553 /*
554  * NOTE: the table below is out of date, though still a useful reference.
555  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
556  * mapping of HTC services to HIF pipes.
557  */
558 /*
559  * This authoritative table defines Copy Engine configuration and the mapping
560  * of services/endpoints to CEs.  A subset of this information is passed to
561  * the Target during startup as a prerequisite to entering BMI phase.
562  * See:
563  *    target_service_to_ce_map - Target-side mapping
564  *    hif_map_service_to_pipe      - Host-side mapping
565  *    target_ce_config         - Target-side configuration
566  *    host_ce_config           - Host-side configuration
567    ============================================================================
568    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
569  |                      |      | ctio | Size     | Frequency
570  |                      |      | n    |          |
571    ============================================================================
572    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
573    descriptor |                      |      |      | O(100B)  | and regular
574    download   |                      |      |      |          |
575    ----------------------------------------------------------------------------
576    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
577    indication |                      |      |      | O(10B)   | regular
578    upload     |                      |      |      |          |
579    ----------------------------------------------------------------------------
580    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
581    upload     |                      |      |      | O(1000B) | (frequent
582    e.g. noise |                      |      |      |          | during IP1.0
583    packets    |                      |      |      |          | testing)
584    ----------------------------------------------------------------------------
585    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
586    download   |                      |      |      | O(1000B) | (frequent
587    e.g.       |                      |      |      |          | during IP1.0
588    misdirecte |                      |      |      |          | testing)
589    d EAPOL    |                      |      |      |          |
590    packets    |                      |      |      |          |
591    ----------------------------------------------------------------------------
592    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
593  | DATA_VO (uplink)     |      |      |          |
594    ----------------------------------------------------------------------------
595    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
596  | DATA_VO (downlink)   |      |      |          |
597    ----------------------------------------------------------------------------
598    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
599  |                      |      |      | O(100B)  |
600    ----------------------------------------------------------------------------
601    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
602    messages   | (downlink)           |      |      | O(100B)  |
603  |                      |      |      |          |
604    ----------------------------------------------------------------------------
605    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
606  | HTC_RAW_STREAMS      |      |      |          |
607  | (uplink)             |      |      |          |
608    ----------------------------------------------------------------------------
609    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
610  | HTC_RAW_STREAMS      |      |      |          |
611  | (downlink)           |      |      |          |
612    ----------------------------------------------------------------------------
613    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
614  |                      |      |      |          | infrequent
615    ============================================================================
616  */
617 
618 /*
619  * Map from service/endpoint to Copy Engine.
620  * This table is derived from the CE_PCI TABLE, above.
621  * It is passed to the Target at startup for use by firmware.
622  */
623 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
624 	{
625 		WMI_DATA_VO_SVC,
626 		PIPEDIR_OUT,    /* out = UL = host -> target */
627 		3,
628 	},
629 	{
630 		WMI_DATA_VO_SVC,
631 		PIPEDIR_IN,     /* in = DL = target -> host */
632 		2,
633 	},
634 	{
635 		WMI_DATA_BK_SVC,
636 		PIPEDIR_OUT,    /* out = UL = host -> target */
637 		3,
638 	},
639 	{
640 		WMI_DATA_BK_SVC,
641 		PIPEDIR_IN,     /* in = DL = target -> host */
642 		2,
643 	},
644 	{
645 		WMI_DATA_BE_SVC,
646 		PIPEDIR_OUT,    /* out = UL = host -> target */
647 		3,
648 	},
649 	{
650 		WMI_DATA_BE_SVC,
651 		PIPEDIR_IN,     /* in = DL = target -> host */
652 		2,
653 	},
654 	{
655 		WMI_DATA_VI_SVC,
656 		PIPEDIR_OUT,    /* out = UL = host -> target */
657 		3,
658 	},
659 	{
660 		WMI_DATA_VI_SVC,
661 		PIPEDIR_IN,     /* in = DL = target -> host */
662 		2,
663 	},
664 	{
665 		WMI_CONTROL_SVC,
666 		PIPEDIR_OUT,    /* out = UL = host -> target */
667 		3,
668 	},
669 	{
670 		WMI_CONTROL_SVC,
671 		PIPEDIR_IN,     /* in = DL = target -> host */
672 		2,
673 	},
674 	{
675 		HTC_CTRL_RSVD_SVC,
676 		PIPEDIR_OUT,    /* out = UL = host -> target */
677 		0,              /* could be moved to 3 (share with WMI) */
678 	},
679 	{
680 		HTC_CTRL_RSVD_SVC,
681 		PIPEDIR_IN,     /* in = DL = target -> host */
682 		2,
683 	},
684 	{
685 		HTC_RAW_STREAMS_SVC, /* not currently used */
686 		PIPEDIR_OUT,    /* out = UL = host -> target */
687 		0,
688 	},
689 	{
690 		HTC_RAW_STREAMS_SVC, /* not currently used */
691 		PIPEDIR_IN,     /* in = DL = target -> host */
692 		2,
693 	},
694 	{
695 		HTT_DATA_MSG_SVC,
696 		PIPEDIR_OUT,    /* out = UL = host -> target */
697 		4,
698 	},
699 	{
700 		HTT_DATA_MSG_SVC,
701 		PIPEDIR_IN,     /* in = DL = target -> host */
702 		1,
703 	},
704 	{
705 		WDI_IPA_TX_SVC,
706 		PIPEDIR_OUT,    /* in = DL = target -> host */
707 		5,
708 	},
709 #if defined(QCA_WIFI_3_0_ADRASTEA)
710 	{
711 		HTT_DATA2_MSG_SVC,
712 		PIPEDIR_IN,    /* in = DL = target -> host */
713 		9,
714 	},
715 	{
716 		HTT_DATA3_MSG_SVC,
717 		PIPEDIR_IN,    /* in = DL = target -> host */
718 		10,
719 	},
720 	{
721 		PACKET_LOG_SVC,
722 		PIPEDIR_IN,    /* in = DL = target -> host */
723 		11,
724 	},
725 #endif
726 	/* (Additions here) */
727 
728 	{                       /* Must be last */
729 		0,
730 		0,
731 		0,
732 	},
733 };
734 
735 /* PIPEDIR_OUT = HOST to Target */
736 /* PIPEDIR_IN  = TARGET to HOST */
737 #if (defined(QCA_WIFI_QCA8074))
738 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
739 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
740 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
741 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
742 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
743 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
744 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
745 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
746 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
747 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
748 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
749 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
750 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
751 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
752 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
753 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
754 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
755 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
756 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
757 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
758 	/* (Additions here) */
759 	{ 0, 0, 0, },
760 };
761 #else
762 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
763 };
764 #endif
765 
766 #if (defined(QCA_WIFI_QCA9574))
767 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
768 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
769 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
770 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
771 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
772 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
773 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
774 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
775 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
776 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
777 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
778 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
779 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
780 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
781 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
782 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
783 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
784 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
785 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
786 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
787 	/* (Additions here) */
788 	{ 0, 0, 0, },
789 };
790 #else
791 static struct service_to_pipe target_service_to_ce_map_qca9574[] = {
792 };
793 #endif
794 
795 #if (defined(QCA_WIFI_QCA8074V2))
796 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
797 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
798 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
799 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
800 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
801 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
802 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
803 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
804 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
805 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
806 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
807 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
808 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
809 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
810 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
811 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
812 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
813 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
814 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
815 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
816 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
817 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
818 	/* (Additions here) */
819 	{ 0, 0, 0, },
820 };
821 #else
822 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
823 };
824 #endif
825 
826 #if (defined(QCA_WIFI_QCA6018))
827 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
828 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
829 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
830 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
831 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
832 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
833 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
834 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
835 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
836 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
837 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
838 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
839 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
840 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
841 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
842 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
843 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
844 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
845 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
846 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
847 	/* (Additions here) */
848 	{ 0, 0, 0, },
849 };
850 #else
851 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
852 };
853 #endif
854 
855 #if (defined(QCA_WIFI_QCN9000))
856 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
857 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
858 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
859 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
860 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
861 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
862 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
863 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
864 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
865 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
866 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
867 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
868 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
869 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
870 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
871 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
872 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
873 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
874 	/* (Additions here) */
875 	{ 0, 0, 0, },
876 };
877 #else
878 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
879 };
880 #endif
881 
882 #if (defined(QCA_WIFI_QCA5332) || defined(QCA_WIFI_QCN6432))
883 static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
884 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
885 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
886 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
887 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
888 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
889 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
890 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
891 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
892 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
893 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
894 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
895 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
896 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
897 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
898 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
899 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
900 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
901 #ifdef WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE
902 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 9, },
903 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 9, },
904 #else
905 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 2, },
906 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 2, },
907 #endif
908 	/* (Additions here) */
909 	{ 0, 0, 0, },
910 };
911 #else
912 static struct service_to_pipe target_service_to_ce_map_qca5332[] = {
913 };
914 #endif
915 
916 #if (defined(QCA_WIFI_QCN9224))
917 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = {
918 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
919 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
920 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
921 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
922 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
923 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
924 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
925 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
926 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
927 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
928 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
929 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
930 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
931 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
932 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
933 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
934 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, },
935 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, },
936 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
937 #ifdef WLAN_DIAG_AND_DBR_OVER_SEPARATE_CE
938 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, },
939 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 14, },
940 #else
941 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 2, },
942 	{ WMI_CONTROL_DBR_SVC, PIPEDIR_IN, 2, },
943 #endif
944 	/* (Additions here) */
945 	{ 0, 0, 0, },
946 };
947 #endif
948 
949 #if defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCN9160)
950 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
951 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
952 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
953 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
954 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
955 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
956 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
957 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
958 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
959 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
960 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
961 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
962 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
963 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
964 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
965 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
966 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
967 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
968 	/* (Additions here) */
969 	{ 0, 0, 0, },
970 };
971 #else
972 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
973 };
974 #endif
975 
976 /* PIPEDIR_OUT = HOST to Target */
977 /* PIPEDIR_IN  = TARGET to HOST */
978 #ifdef QCN7605_SUPPORT
979 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
980 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
981 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
982 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
983 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
984 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
985 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
986 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
987 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
988 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
989 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
990 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
991 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
992 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
993 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
994 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
995 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
996 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
997 #ifdef IPA_OFFLOAD
998 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
999 #else
1000 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
1001 #endif
1002 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
1003 	/* (Additions here) */
1004 	{ 0, 0, 0, },
1005 };
1006 #endif
1007 
1008 #if (defined(QCA_WIFI_QCA6290))
1009 #ifdef QCA_6290_AP_MODE
1010 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1011 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1012 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
1013 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1014 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
1015 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1016 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
1017 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1018 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
1019 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1020 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
1021 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1022 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
1023 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1024 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
1025 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
1026 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
1027 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1028 	/* (Additions here) */
1029 	{ 0, 0, 0, },
1030 };
1031 #else
1032 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1033 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1034 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1035 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1036 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1037 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1038 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1039 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1040 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1041 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1042 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1043 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1044 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1045 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1046 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1047 	/* (Additions here) */
1048 	{ 0, 0, 0, },
1049 };
1050 #endif
1051 #else
1052 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
1053 };
1054 #endif
1055 
1056 #if (defined(QCA_WIFI_QCA6390))
1057 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1058 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1059 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1060 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1061 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1062 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1063 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1064 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1065 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1066 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1067 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1068 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1069 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1070 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1071 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1072 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1073 	/* (Additions here) */
1074 	{ 0, 0, 0, },
1075 };
1076 #else
1077 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1078 };
1079 #endif
1080 
1081 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
1082 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1083 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1084 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1085 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1086 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1087 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1088 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1089 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1090 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1091 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1092 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1093 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1094 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1095 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1096 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1097 	/* (Additions here) */
1098 	{ 0, 0, 0, },
1099 };
1100 
1101 #if (defined(QCA_WIFI_QCA6750))
1102 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1103 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1104 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1105 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1106 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1107 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1108 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1109 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1110 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1111 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1112 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1113 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1114 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1115 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1116 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1117 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1118 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1119 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1120 #endif
1121 	/* (Additions here) */
1122 	{ 0, 0, 0, },
1123 };
1124 #else
1125 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1126 };
1127 #endif
1128 
1129 #if (defined(QCA_WIFI_KIWI))
1130 #ifdef FEATURE_DIRECT_LINK
1131 static struct service_to_pipe target_service_to_ce_map_kiwi_direct_link[] = {
1132 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1133 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1134 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1135 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1136 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1137 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1138 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1139 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1140 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1141 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1142 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 4, },
1143 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1144 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1145 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1146 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1147 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1148 #endif
1149 	{ LPASS_DATA_MSG_SVC, PIPEDIR_OUT, 0, },
1150 	{ LPASS_DATA_MSG_SVC, PIPEDIR_IN, 5, },
1151 	/* (Additions here) */
1152 	{ 0, 0, 0, },
1153 };
1154 #endif
1155 
1156 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1157 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1158 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1159 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1160 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1161 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1162 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1163 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1164 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1165 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1166 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1167 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1168 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1169 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1170 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1171 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1172 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1173 #endif
1174 	/* (Additions here) */
1175 	{ 0, 0, 0, },
1176 };
1177 #else
1178 static struct service_to_pipe target_service_to_ce_map_kiwi[] = {
1179 };
1180 #endif
1181 
1182 #ifdef QCA_WIFI_WCN6450
1183 static struct service_to_pipe target_service_to_ce_map_wcn6450[] = {
1184 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1185 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1186 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1187 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1188 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1189 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1190 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1191 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1192 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1193 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1194 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1195 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1196 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1197 	{ HTT_DATA2_MSG_SVC, PIPEDIR_OUT, 5, },
1198 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1199 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 10, },
1200 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 11, },
1201 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1202 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1203 #endif
1204 	/* (Additions here) */
1205 	{ 0, 0, 0, },
1206 };
1207 #else
1208 static struct service_to_pipe target_service_to_ce_map_wcn6450[] = {
1209 };
1210 #endif
1211 
1212 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
1213 	{
1214 		WMI_DATA_VO_SVC,
1215 		PIPEDIR_OUT,    /* out = UL = host -> target */
1216 		3,
1217 	},
1218 	{
1219 		WMI_DATA_VO_SVC,
1220 		PIPEDIR_IN,     /* in = DL = target -> host */
1221 		2,
1222 	},
1223 	{
1224 		WMI_DATA_BK_SVC,
1225 		PIPEDIR_OUT,    /* out = UL = host -> target */
1226 		3,
1227 	},
1228 	{
1229 		WMI_DATA_BK_SVC,
1230 		PIPEDIR_IN,     /* in = DL = target -> host */
1231 		2,
1232 	},
1233 	{
1234 		WMI_DATA_BE_SVC,
1235 		PIPEDIR_OUT,    /* out = UL = host -> target */
1236 		3,
1237 	},
1238 	{
1239 		WMI_DATA_BE_SVC,
1240 		PIPEDIR_IN,     /* in = DL = target -> host */
1241 		2,
1242 	},
1243 	{
1244 		WMI_DATA_VI_SVC,
1245 		PIPEDIR_OUT,    /* out = UL = host -> target */
1246 		3,
1247 	},
1248 	{
1249 		WMI_DATA_VI_SVC,
1250 		PIPEDIR_IN,     /* in = DL = target -> host */
1251 		2,
1252 	},
1253 	{
1254 		WMI_CONTROL_SVC,
1255 		PIPEDIR_OUT,    /* out = UL = host -> target */
1256 		3,
1257 	},
1258 	{
1259 		WMI_CONTROL_SVC,
1260 		PIPEDIR_IN,     /* in = DL = target -> host */
1261 		2,
1262 	},
1263 	{
1264 		HTC_CTRL_RSVD_SVC,
1265 		PIPEDIR_OUT,    /* out = UL = host -> target */
1266 		0,              /* could be moved to 3 (share with WMI) */
1267 	},
1268 	{
1269 		HTC_CTRL_RSVD_SVC,
1270 		PIPEDIR_IN,     /* in = DL = target -> host */
1271 		1,
1272 	},
1273 	{
1274 		HTC_RAW_STREAMS_SVC, /* not currently used */
1275 		PIPEDIR_OUT,    /* out = UL = host -> target */
1276 		0,
1277 	},
1278 	{
1279 		HTC_RAW_STREAMS_SVC, /* not currently used */
1280 		PIPEDIR_IN,     /* in = DL = target -> host */
1281 		1,
1282 	},
1283 	{
1284 		HTT_DATA_MSG_SVC,
1285 		PIPEDIR_OUT,    /* out = UL = host -> target */
1286 		4,
1287 	},
1288 #ifdef WLAN_FEATURE_FASTPATH
1289 	{
1290 		HTT_DATA_MSG_SVC,
1291 		PIPEDIR_IN,     /* in = DL = target -> host */
1292 		5,
1293 	},
1294 #else /* WLAN_FEATURE_FASTPATH */
1295 	{
1296 		HTT_DATA_MSG_SVC,
1297 		PIPEDIR_IN,  /* in = DL = target -> host */
1298 		1,
1299 	},
1300 #endif /* WLAN_FEATURE_FASTPATH */
1301 
1302 	/* (Additions here) */
1303 
1304 	{                       /* Must be last */
1305 		0,
1306 		0,
1307 		0,
1308 	},
1309 };
1310 
1311 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1312 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1313 
1314 #ifdef WLAN_FEATURE_EPPING
1315 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1316 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1317 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1318 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
1319 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
1320 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1321 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1322 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1323 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1324 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1325 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1326 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
1327 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
1328 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1329 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
1330 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
1331 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
1332 	{0, 0, 0,},             /* Must be last */
1333 };
1334 
1335 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
1336 					   **tgt_svc_map_to_use,
1337 					   uint32_t *sz_tgt_svc_map_to_use)
1338 {
1339 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
1340 	*sz_tgt_svc_map_to_use =
1341 			sizeof(target_service_to_ce_map_wlan_epping);
1342 }
1343 #endif
1344 
1345 #ifdef QCN7605_SUPPORT
1346 static inline
1347 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1348 			       uint32_t *sz_tgt_svc_map_to_use)
1349 {
1350 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
1351 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
1352 }
1353 #else
1354 static inline
1355 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1356 			       uint32_t *sz_tgt_svc_map_to_use)
1357 {
1358 	hif_err("QCN7605 not supported");
1359 }
1360 #endif
1361 
1362 #ifdef QCA_WIFI_QCN9224
1363 static
1364 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1365 			       struct HIF_CE_state *hif_state)
1366 {
1367 	hif_state->host_ce_config = host_ce_config_wlan_qcn9224;
1368 	hif_state->target_ce_config = target_ce_config_wlan_qcn9224;
1369 	hif_state->target_ce_config_sz =
1370 				 sizeof(target_ce_config_wlan_qcn9224);
1371 	scn->ce_count = QCN_9224_CE_COUNT;
1372 	scn->ini_cfg.disable_wake_irq = 1;
1373 }
1374 
1375 static
1376 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1377 			       uint32_t *sz_tgt_svc_map_to_use)
1378 {
1379 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn9224;
1380 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224);
1381 }
1382 #else
1383 static inline
1384 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1385 			       struct HIF_CE_state *hif_state)
1386 {
1387 	hif_err("QCN9224 not supported");
1388 }
1389 
1390 static inline
1391 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1392 			       uint32_t *sz_tgt_svc_map_to_use)
1393 {
1394 	hif_err("QCN9224 not supported");
1395 }
1396 #endif
1397 
1398 #ifdef FEATURE_DIRECT_LINK
1399 /**
1400  * hif_select_service_to_pipe_map_kiwi() - Select service to CE map
1401  *  configuration for Kiwi
1402  * @scn: HIF context
1403  * @tgt_svc_map_to_use: returned service map
1404  * @sz_tgt_svc_map_to_use: returned length of the service map
1405  *
1406  * Return: None
1407  */
1408 static inline void
1409 hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn,
1410 				    struct service_to_pipe **tgt_svc_map_to_use,
1411 				    uint32_t *sz_tgt_svc_map_to_use)
1412 {
1413 	if (pld_is_direct_link_supported(scn->qdf_dev->dev)) {
1414 		*tgt_svc_map_to_use = target_service_to_ce_map_kiwi_direct_link;
1415 		*sz_tgt_svc_map_to_use =
1416 			sizeof(target_service_to_ce_map_kiwi_direct_link);
1417 	} else {
1418 		*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1419 		*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi);
1420 	}
1421 }
1422 #else
1423 static inline void
1424 hif_select_service_to_pipe_map_kiwi(struct hif_softc *scn,
1425 				    struct service_to_pipe **tgt_svc_map_to_use,
1426 				    uint32_t *sz_tgt_svc_map_to_use)
1427 {
1428 	*tgt_svc_map_to_use = target_service_to_ce_map_kiwi;
1429 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_kiwi);
1430 }
1431 #endif
1432 
1433 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
1434 				    struct service_to_pipe **tgt_svc_map_to_use,
1435 				    uint32_t *sz_tgt_svc_map_to_use)
1436 {
1437 	uint32_t mode = hif_get_conparam(scn);
1438 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1439 	struct hif_target_info *tgt_info = &scn->target_info;
1440 
1441 	if (QDF_IS_EPPING_ENABLED(mode)) {
1442 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
1443 						      sz_tgt_svc_map_to_use);
1444 	} else {
1445 		switch (tgt_info->target_type) {
1446 		default:
1447 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
1448 			*sz_tgt_svc_map_to_use =
1449 				sizeof(target_service_to_ce_map_wlan);
1450 			break;
1451 		case TARGET_TYPE_QCN7605:
1452 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
1453 						  sz_tgt_svc_map_to_use);
1454 			break;
1455 		case TARGET_TYPE_AR900B:
1456 		case TARGET_TYPE_QCA9984:
1457 		case TARGET_TYPE_QCA9888:
1458 		case TARGET_TYPE_AR9888:
1459 		case TARGET_TYPE_AR9888V2:
1460 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
1461 			*sz_tgt_svc_map_to_use =
1462 				sizeof(target_service_to_ce_map_ar900b);
1463 			break;
1464 		case TARGET_TYPE_QCA6290:
1465 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
1466 			*sz_tgt_svc_map_to_use =
1467 				sizeof(target_service_to_ce_map_qca6290);
1468 			break;
1469 		case TARGET_TYPE_QCA6390:
1470 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
1471 			*sz_tgt_svc_map_to_use =
1472 				sizeof(target_service_to_ce_map_qca6390);
1473 			break;
1474 		case TARGET_TYPE_QCA6490:
1475 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
1476 			*sz_tgt_svc_map_to_use =
1477 				sizeof(target_service_to_ce_map_qca6490);
1478 			break;
1479 		case TARGET_TYPE_QCA6750:
1480 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
1481 			*sz_tgt_svc_map_to_use =
1482 				sizeof(target_service_to_ce_map_qca6750);
1483 			break;
1484 		case TARGET_TYPE_KIWI:
1485 		case TARGET_TYPE_MANGO:
1486 		case TARGET_TYPE_PEACH:
1487 			hif_select_service_to_pipe_map_kiwi(scn,
1488 							 tgt_svc_map_to_use,
1489 							 sz_tgt_svc_map_to_use);
1490 			break;
1491 		case TARGET_TYPE_WCN6450:
1492 			*tgt_svc_map_to_use = target_service_to_ce_map_wcn6450;
1493 			*sz_tgt_svc_map_to_use =
1494 				 sizeof(target_service_to_ce_map_wcn6450);
1495 			break;
1496 		case TARGET_TYPE_QCA8074:
1497 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
1498 			*sz_tgt_svc_map_to_use =
1499 				sizeof(target_service_to_ce_map_qca8074);
1500 			break;
1501 		case TARGET_TYPE_QCA8074V2:
1502 			*tgt_svc_map_to_use =
1503 				target_service_to_ce_map_qca8074_v2;
1504 			*sz_tgt_svc_map_to_use =
1505 				sizeof(target_service_to_ce_map_qca8074_v2);
1506 			break;
1507 		case TARGET_TYPE_QCA9574:
1508 			*tgt_svc_map_to_use =
1509 				target_service_to_ce_map_qca9574;
1510 			*sz_tgt_svc_map_to_use =
1511 				sizeof(target_service_to_ce_map_qca9574);
1512 			break;
1513 		case TARGET_TYPE_QCA6018:
1514 			*tgt_svc_map_to_use =
1515 				target_service_to_ce_map_qca6018;
1516 			*sz_tgt_svc_map_to_use =
1517 				sizeof(target_service_to_ce_map_qca6018);
1518 			break;
1519 		case TARGET_TYPE_QCN9000:
1520 			*tgt_svc_map_to_use =
1521 				target_service_to_ce_map_qcn9000;
1522 			*sz_tgt_svc_map_to_use =
1523 				sizeof(target_service_to_ce_map_qcn9000);
1524 			break;
1525 		case TARGET_TYPE_QCN9224:
1526 			hif_select_ce_map_qcn9224(tgt_svc_map_to_use,
1527 						  sz_tgt_svc_map_to_use);
1528 			break;
1529 		case TARGET_TYPE_QCA5332:
1530 		case TARGET_TYPE_QCN6432:
1531 			*tgt_svc_map_to_use = target_service_to_ce_map_qca5332;
1532 			*sz_tgt_svc_map_to_use =
1533 				sizeof(target_service_to_ce_map_qca5332);
1534 			break;
1535 		case TARGET_TYPE_QCA5018:
1536 		case TARGET_TYPE_QCN6122:
1537 		case TARGET_TYPE_QCN9160:
1538 			*tgt_svc_map_to_use =
1539 				target_service_to_ce_map_qca5018;
1540 			*sz_tgt_svc_map_to_use =
1541 				sizeof(target_service_to_ce_map_qca5018);
1542 			break;
1543 		}
1544 	}
1545 	hif_state->tgt_svc_map = *tgt_svc_map_to_use;
1546 	hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use /
1547 					sizeof(struct service_to_pipe);
1548 }
1549 
1550 #ifndef QCA_WIFI_WCN6450
1551 /**
1552  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
1553  * @ce_state : pointer to the state context of the CE
1554  *
1555  * Description:
1556  *   Sets htt_rx_data attribute of the state structure if the
1557  *   CE serves one of the HTT DATA services.
1558  *
1559  * Return:
1560  *  false (attribute set to false)
1561  *  true  (attribute set to true);
1562  */
1563 static bool ce_mark_datapath(struct CE_state *ce_state)
1564 {
1565 	struct service_to_pipe *svc_map;
1566 	uint32_t map_sz, map_len;
1567 	int    i;
1568 	bool   rc = false;
1569 
1570 	if (ce_state) {
1571 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1572 					       &map_sz);
1573 
1574 		map_len = map_sz / sizeof(struct service_to_pipe);
1575 		for (i = 0; i < map_len; i++) {
1576 			if ((svc_map[i].pipenum == ce_state->id) &&
1577 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1578 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1579 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
1580 				/* HTT CEs are unidirectional */
1581 				if (svc_map[i].pipedir == PIPEDIR_IN)
1582 					ce_state->htt_rx_data = true;
1583 				else
1584 					ce_state->htt_tx_data = true;
1585 				rc = true;
1586 			}
1587 		}
1588 	}
1589 	return rc;
1590 }
1591 
1592 static void ce_update_msi_batch_intr_flags(struct CE_state *ce_state)
1593 {
1594 }
1595 
1596 static inline void ce_update_wrt_idx_offset(struct hif_softc *scn,
1597 					    struct CE_state *ce_state,
1598 					    uint8_t ring_type)
1599 {
1600 }
1601 #else
1602 static bool ce_mark_datapath(struct CE_state *ce_state)
1603 {
1604 	struct service_to_pipe *svc_map;
1605 	uint32_t map_sz, map_len;
1606 	int i;
1607 
1608 	if (ce_state) {
1609 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1610 					       &map_sz);
1611 
1612 		map_len = map_sz / sizeof(struct service_to_pipe);
1613 		for (i = 0; i < map_len; i++) {
1614 			if ((svc_map[i].pipenum == ce_state->id) &&
1615 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1616 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1617 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC)) &&
1618 			    (svc_map[i].pipedir == PIPEDIR_IN))
1619 				ce_state->htt_rx_data = true;
1620 			else if ((svc_map[i].pipenum == ce_state->id) &&
1621 				 (svc_map[i].service_id == HTT_DATA2_MSG_SVC) &&
1622 				 (svc_map[i].pipedir == PIPEDIR_OUT))
1623 				ce_state->htt_tx_data = true;
1624 		}
1625 	}
1626 
1627 	return (ce_state->htt_rx_data || ce_state->htt_tx_data);
1628 }
1629 
1630 static void ce_update_msi_batch_intr_flags(struct CE_state *ce_state)
1631 {
1632 	ce_state->msi_supported = true;
1633 	ce_state->batch_intr_supported = true;
1634 }
1635 
1636 static inline void ce_update_wrt_idx_offset(struct hif_softc *scn,
1637 					    struct CE_state *ce_state,
1638 					    uint8_t ring_type)
1639 {
1640 	if (ring_type == CE_RING_SRC)
1641 		ce_state->ce_wrt_idx_offset =
1642 			CE_SRC_WR_IDX_OFFSET_GET(scn, ce_state->ctrl_addr);
1643 	else if (ring_type == CE_RING_DEST)
1644 		ce_state->ce_wrt_idx_offset =
1645 			CE_DST_WR_IDX_OFFSET_GET(scn, ce_state->ctrl_addr);
1646 	else
1647 		QDF_BUG(0);
1648 }
1649 
1650 /*
1651  * hif_ce_print_ring_stats() - Print ce ring statistics
1652  *
1653  * @hif_ctx: hif context
1654  *
1655  * Returns: None
1656  */
1657 void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx)
1658 {
1659 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1660 	struct CE_state *ce_state;
1661 	int i;
1662 
1663 	for (i = 0; i < scn->ce_count; i++) {
1664 		ce_state = scn->ce_id_to_state[i];
1665 		if (!ce_state)
1666 			continue;
1667 
1668 		if (ce_state->src_ring) {
1669 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1670 				  "ce%d:SW: sw_index %u write_index %u",
1671 				  ce_state->src_ring->sw_index,
1672 				  ce_state->src_ring->write_index);
1673 
1674 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1675 				  "ce%d:HW: read_index %u write_index %u",
1676 				  CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr),
1677 				  CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr));
1678 		}
1679 
1680 		if (ce_state->dest_ring) {
1681 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1682 				  "ce%d:SW: sw_index %u write_index %u",
1683 				  ce_state->dest_ring->sw_index,
1684 				  ce_state->dest_ring->write_index);
1685 
1686 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1687 				  "ce%d:HW: read_index %u write_index %u",
1688 				  CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr),
1689 				  CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ce_state->ctrl_addr));
1690 		}
1691 	}
1692 }
1693 #endif
1694 
1695 /**
1696  * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
1697  * @hif_ctx: hif opaque handle
1698  *
1699  * Description:
1700  *   Gets number of WMI EPs configured in target svc map. Since EP map
1701  *   include IN and OUT direction pipes, count only OUT pipes to get EPs
1702  *   configured for WMI service.
1703  *
1704  * Return:
1705  *  uint8_t: count for WMI eps in target svc map
1706  */
1707 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *hif_ctx)
1708 {
1709 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1710 	struct service_to_pipe *svc_map;
1711 	uint32_t map_sz, map_len;
1712 	int    i;
1713 	uint8_t   wmi_ep_count = 0;
1714 
1715 	hif_select_service_to_pipe_map(scn, &svc_map,
1716 				       &map_sz);
1717 	map_len = map_sz / sizeof(struct service_to_pipe);
1718 
1719 	for (i = 0; i < map_len; i++) {
1720 		/* Count number of WMI EPs based on out direction */
1721 		if ((svc_map[i].pipedir == PIPEDIR_OUT) &&
1722 		    ((svc_map[i].service_id == WMI_CONTROL_SVC)  ||
1723 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC1) ||
1724 		    (svc_map[i].service_id == WMI_CONTROL_SVC_WMAC2))) {
1725 			wmi_ep_count++;
1726 		}
1727 	}
1728 
1729 	return wmi_ep_count;
1730 }
1731 
1732 /**
1733  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
1734  * @ce_id: ce in question
1735  * @ring: ring state being examined
1736  * @type: "src_ring" or "dest_ring" string for identifying the ring
1737  *
1738  * Warns on non-zero index values.
1739  * Causes a kernel panic if the ring is not empty during initialization.
1740  */
1741 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
1742 					 char *type)
1743 {
1744 	if (ring->write_index != 0 || ring->sw_index != 0)
1745 		hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d",
1746 			  ce_id, type, ring->sw_index, ring->write_index);
1747 	if (ring->write_index != ring->sw_index)
1748 		QDF_BUG(0);
1749 }
1750 
1751 #ifdef IPA_OFFLOAD
1752 /**
1753  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
1754  * @scn: softc instance
1755  * @CE_id: ce in question
1756  * @base_addr: pointer to copyengine ring base address
1757  * @ce_ring: copyengine instance
1758  * @nentries: number of entries should be allocated
1759  * @desc_size: ce desc size
1760  *
1761  * Return: QDF_STATUS_SUCCESS - for success
1762  */
1763 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1764 				     qdf_dma_addr_t *base_addr,
1765 				     struct CE_ring_state *ce_ring,
1766 				     unsigned int nentries, uint32_t desc_size)
1767 {
1768 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1769 	    !ce_srng_based(scn)) {
1770 		if (!scn->ipa_ce_ring) {
1771 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1772 				scn->qdf_dev,
1773 				nentries * desc_size + CE_DESC_RING_ALIGN);
1774 			if (!scn->ipa_ce_ring) {
1775 				hif_err(
1776 				"Failed to allocate memory for IPA ce ring");
1777 				return QDF_STATUS_E_NOMEM;
1778 			}
1779 		}
1780 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1781 						&scn->ipa_ce_ring->mem_info);
1782 		ce_ring->base_addr_owner_space_unaligned =
1783 						scn->ipa_ce_ring->vaddr;
1784 	} else {
1785 		ce_ring->base_addr_owner_space_unaligned =
1786 			hif_mem_alloc_consistent_unaligned
1787 					(scn,
1788 					 (nentries * desc_size +
1789 					  CE_DESC_RING_ALIGN),
1790 					 base_addr,
1791 					 ce_ring->hal_ring_type,
1792 					 &ce_ring->is_ring_prealloc);
1793 
1794 		if (!ce_ring->base_addr_owner_space_unaligned) {
1795 			hif_err("Failed to allocate DMA memory for ce ring id: %u",
1796 			       CE_id);
1797 			return QDF_STATUS_E_NOMEM;
1798 		}
1799 	}
1800 	return QDF_STATUS_SUCCESS;
1801 }
1802 
1803 /**
1804  * ce_free_desc_ring() - Frees copyengine descriptor ring
1805  * @scn: softc instance
1806  * @CE_id: ce in question
1807  * @ce_ring: copyengine instance
1808  * @desc_size: ce desc size
1809  *
1810  * Return: None
1811  */
1812 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1813 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1814 {
1815 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1816 	    !ce_srng_based(scn)) {
1817 		if (scn->ipa_ce_ring) {
1818 			qdf_mem_shared_mem_free(scn->qdf_dev,
1819 						scn->ipa_ce_ring);
1820 			scn->ipa_ce_ring = NULL;
1821 		}
1822 		ce_ring->base_addr_owner_space_unaligned = NULL;
1823 	} else {
1824 		hif_mem_free_consistent_unaligned
1825 			(scn,
1826 			 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1827 			 ce_ring->base_addr_owner_space_unaligned,
1828 			 ce_ring->base_addr_CE_space, 0,
1829 			 ce_ring->is_ring_prealloc);
1830 		ce_ring->base_addr_owner_space_unaligned = NULL;
1831 	}
1832 }
1833 #else
1834 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1835 				     qdf_dma_addr_t *base_addr,
1836 				     struct CE_ring_state *ce_ring,
1837 				     unsigned int nentries, uint32_t desc_size)
1838 {
1839 	ce_ring->base_addr_owner_space_unaligned =
1840 			hif_mem_alloc_consistent_unaligned
1841 					(scn,
1842 					 (nentries * desc_size +
1843 					  CE_DESC_RING_ALIGN),
1844 					 base_addr,
1845 					 ce_ring->hal_ring_type,
1846 					 &ce_ring->is_ring_prealloc);
1847 
1848 	if (!ce_ring->base_addr_owner_space_unaligned) {
1849 		hif_err("Failed to allocate DMA memory for ce ring id: %u",
1850 		       CE_id);
1851 		return QDF_STATUS_E_NOMEM;
1852 	}
1853 	return QDF_STATUS_SUCCESS;
1854 }
1855 
1856 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1857 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1858 {
1859 	hif_mem_free_consistent_unaligned
1860 		(scn,
1861 		 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1862 		 ce_ring->base_addr_owner_space_unaligned,
1863 		 ce_ring->base_addr_CE_space, 0,
1864 		 ce_ring->is_ring_prealloc);
1865 	ce_ring->base_addr_owner_space_unaligned = NULL;
1866 }
1867 #endif /* IPA_OFFLOAD */
1868 
1869 /*
1870  * TODO: Need to explore the possibility of having this as part of a
1871  * target context instead of a global array.
1872  */
1873 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1874 
1875 void ce_service_register_module(enum ce_target_type target_type,
1876 				struct ce_ops* (*ce_attach)(void))
1877 {
1878 	if (target_type < CE_MAX_TARGET_TYPE)
1879 		ce_attach_register[target_type] = ce_attach;
1880 }
1881 
1882 qdf_export_symbol(ce_service_register_module);
1883 
1884 /**
1885  * ce_srng_based() - Does this target use srng
1886  * @scn: pointer to the state context of the CE
1887  *
1888  * Description:
1889  *   returns true if the target is SRNG based
1890  *
1891  * Return:
1892  *  false (attribute set to false)
1893  *  true  (attribute set to true);
1894  */
1895 bool ce_srng_based(struct hif_softc *scn)
1896 {
1897 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1898 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1899 
1900 	switch (tgt_info->target_type) {
1901 	case TARGET_TYPE_QCA8074:
1902 	case TARGET_TYPE_QCA8074V2:
1903 	case TARGET_TYPE_QCA6290:
1904 	case TARGET_TYPE_QCA6390:
1905 	case TARGET_TYPE_QCA6490:
1906 	case TARGET_TYPE_QCA6750:
1907 	case TARGET_TYPE_QCA6018:
1908 	case TARGET_TYPE_QCN9000:
1909 	case TARGET_TYPE_QCN6122:
1910 	case TARGET_TYPE_QCN9160:
1911 	case TARGET_TYPE_QCA5018:
1912 	case TARGET_TYPE_KIWI:
1913 	case TARGET_TYPE_MANGO:
1914 	case TARGET_TYPE_PEACH:
1915 	case TARGET_TYPE_QCN9224:
1916 	case TARGET_TYPE_QCA9574:
1917 	case TARGET_TYPE_QCA5332:
1918 	case TARGET_TYPE_QCN6432:
1919 		return true;
1920 	default:
1921 		return false;
1922 	}
1923 	return false;
1924 }
1925 qdf_export_symbol(ce_srng_based);
1926 
1927 #ifdef QCA_WIFI_SUPPORT_SRNG
1928 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1929 {
1930 	struct ce_ops *ops = NULL;
1931 
1932 	if (ce_srng_based(scn)) {
1933 		if (ce_attach_register[CE_SVC_SRNG])
1934 			ops = ce_attach_register[CE_SVC_SRNG]();
1935 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1936 		ops = ce_attach_register[CE_SVC_LEGACY]();
1937 	}
1938 
1939 	return ops;
1940 }
1941 
1942 
1943 #else	/* QCA_LITHIUM */
1944 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1945 {
1946 	if (ce_attach_register[CE_SVC_LEGACY])
1947 		return ce_attach_register[CE_SVC_LEGACY]();
1948 
1949 	return NULL;
1950 }
1951 #endif /* QCA_LITHIUM */
1952 
1953 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1954 		struct pld_shadow_reg_v2_cfg **shadow_config,
1955 		int *num_shadow_registers_configured) {
1956 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1957 
1958 	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1959 			scn, shadow_config, num_shadow_registers_configured);
1960 
1961 	return;
1962 }
1963 
1964 #ifdef CONFIG_SHADOW_V3
1965 static inline void
1966 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1967 				  struct pld_wlan_enable_cfg *cfg)
1968 {
1969 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1970 
1971 	if (!hif_state->ce_services->ce_prepare_shadow_register_v3_cfg)
1972 		return;
1973 
1974 	hif_state->ce_services->ce_prepare_shadow_register_v3_cfg(
1975 			scn, &cfg->shadow_reg_v3_cfg,
1976 			&cfg->num_shadow_reg_v3_cfg);
1977 }
1978 #else
1979 static inline void
1980 hif_prepare_hal_shadow_reg_cfg_v3(struct hif_softc *scn,
1981 				  struct pld_wlan_enable_cfg *cfg)
1982 {
1983 }
1984 #endif
1985 
1986 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1987 						uint8_t ring_type)
1988 {
1989 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1990 
1991 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1992 }
1993 
1994 #ifdef QCA_WIFI_SUPPORT_SRNG
1995 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1996 {
1997 	switch (ce_ring_type) {
1998 	case CE_RING_SRC:
1999 		return CE_SRC;
2000 	case CE_RING_DEST:
2001 		return CE_DST;
2002 	case CE_RING_STATUS:
2003 		return CE_DST_STATUS;
2004 	default:
2005 		return -EINVAL;
2006 	}
2007 }
2008 #else
2009 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
2010 {
2011 	return 0;
2012 }
2013 #endif
2014 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
2015 		uint8_t ring_type, uint32_t nentries)
2016 {
2017 	uint32_t ce_nbytes;
2018 	char *ptr;
2019 	qdf_dma_addr_t base_addr;
2020 	struct CE_ring_state *ce_ring;
2021 	uint32_t desc_size;
2022 	struct hif_softc *scn = CE_state->scn;
2023 
2024 	ce_nbytes = sizeof(struct CE_ring_state)
2025 		+ (nentries * sizeof(void *));
2026 	ptr = qdf_mem_malloc(ce_nbytes);
2027 	if (!ptr)
2028 		return NULL;
2029 
2030 	ce_ring = (struct CE_ring_state *)ptr;
2031 	ptr += sizeof(struct CE_ring_state);
2032 	ce_ring->nentries = nentries;
2033 	ce_ring->nentries_mask = nentries - 1;
2034 
2035 	ce_ring->low_water_mark_nentries = 0;
2036 	ce_ring->high_water_mark_nentries = nentries;
2037 	ce_ring->per_transfer_context = (void **)ptr;
2038 	ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type);
2039 
2040 	desc_size = ce_get_desc_size(scn, ring_type);
2041 
2042 	/* Legacy platforms that do not support cache
2043 	 * coherent DMA are unsupported
2044 	 */
2045 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
2046 			       ce_ring, nentries,
2047 			       desc_size) !=
2048 	    QDF_STATUS_SUCCESS) {
2049 		hif_err("ring has no DMA mem");
2050 		qdf_mem_free(ce_ring);
2051 		return NULL;
2052 	}
2053 	ce_ring->base_addr_CE_space_unaligned = base_addr;
2054 
2055 	/* Correctly initialize memory to 0 to
2056 	 * prevent garbage data crashing system
2057 	 * when download firmware
2058 	 */
2059 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
2060 			nentries * desc_size +
2061 			CE_DESC_RING_ALIGN);
2062 
2063 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
2064 
2065 		ce_ring->base_addr_CE_space =
2066 			(ce_ring->base_addr_CE_space_unaligned +
2067 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
2068 
2069 		ce_ring->base_addr_owner_space = (void *)
2070 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
2071 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
2072 	} else {
2073 		ce_ring->base_addr_CE_space =
2074 				ce_ring->base_addr_CE_space_unaligned;
2075 		ce_ring->base_addr_owner_space =
2076 				ce_ring->base_addr_owner_space_unaligned;
2077 	}
2078 
2079 	return ce_ring;
2080 }
2081 
2082 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
2083 			uint32_t ce_id, struct CE_ring_state *ring,
2084 			struct CE_attr *attr)
2085 {
2086 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2087 
2088 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
2089 					      ring, attr);
2090 }
2091 
2092 static void ce_srng_cleanup(struct hif_softc *scn, struct CE_state *CE_state,
2093 			    uint8_t ring_type)
2094 {
2095 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2096 
2097 	if (hif_state->ce_services->ce_srng_cleanup)
2098 		hif_state->ce_services->ce_srng_cleanup(scn,
2099 					CE_state, ring_type);
2100 }
2101 
2102 int hif_ce_bus_early_suspend(struct hif_softc *scn)
2103 {
2104 	uint8_t ul_pipe, dl_pipe;
2105 	int ce_id, status, ul_is_polled, dl_is_polled;
2106 	struct CE_state *ce_state;
2107 
2108 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
2109 					 &ul_pipe, &dl_pipe,
2110 					 &ul_is_polled, &dl_is_polled);
2111 	if (status) {
2112 		hif_err("pipe_mapping failure");
2113 		return status;
2114 	}
2115 
2116 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2117 		if (ce_id == ul_pipe)
2118 			continue;
2119 		if (ce_id == dl_pipe)
2120 			continue;
2121 
2122 		ce_state = scn->ce_id_to_state[ce_id];
2123 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2124 		if (ce_state->state == CE_RUNNING)
2125 			ce_state->state = CE_PAUSED;
2126 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2127 	}
2128 
2129 	return status;
2130 }
2131 
2132 int hif_ce_bus_late_resume(struct hif_softc *scn)
2133 {
2134 	int ce_id;
2135 	struct CE_state *ce_state;
2136 	int write_index = 0;
2137 	bool index_updated;
2138 
2139 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2140 		ce_state = scn->ce_id_to_state[ce_id];
2141 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2142 		if (ce_state->state == CE_PENDING) {
2143 			write_index = ce_state->src_ring->write_index;
2144 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2145 					write_index);
2146 			ce_state->state = CE_RUNNING;
2147 			index_updated = true;
2148 		} else {
2149 			index_updated = false;
2150 		}
2151 
2152 		if (ce_state->state == CE_PAUSED)
2153 			ce_state->state = CE_RUNNING;
2154 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2155 
2156 		if (index_updated)
2157 			hif_record_ce_desc_event(scn, ce_id,
2158 				RESUME_WRITE_INDEX_UPDATE,
2159 				NULL, NULL, write_index, 0);
2160 	}
2161 
2162 	return 0;
2163 }
2164 
2165 /**
2166  * ce_oom_recovery() - try to recover rx ce from oom condition
2167  * @context: CE_state of the CE with oom rx ring
2168  *
2169  * the executing work Will continue to be rescheduled until
2170  * at least 1 descriptor is successfully posted to the rx ring.
2171  *
2172  * return: none
2173  */
2174 static void ce_oom_recovery(void *context)
2175 {
2176 	struct CE_state *ce_state = context;
2177 	struct hif_softc *scn = ce_state->scn;
2178 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
2179 	struct HIF_CE_pipe_info *pipe_info =
2180 		&ce_softc->pipe_info[ce_state->id];
2181 
2182 	hif_post_recv_buffers_for_pipe(pipe_info);
2183 }
2184 
2185 #ifdef HIF_CE_DEBUG_DATA_BUF
2186 /**
2187  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
2188  * the CE descriptors.
2189  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
2190  * @scn: hif scn handle
2191  * @ce_id: Copy Engine Id
2192  *
2193  * Return: QDF_STATUS
2194  */
2195 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
2196 {
2197 	struct hif_ce_desc_event *event = NULL;
2198 	struct hif_ce_desc_event *hist_ev = NULL;
2199 	uint32_t index = 0;
2200 
2201 	hist_ev =
2202 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2203 
2204 	if (!hist_ev)
2205 		return QDF_STATUS_E_NOMEM;
2206 
2207 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
2208 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2209 		event = &hist_ev[index];
2210 		event->data =
2211 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
2212 		if (!event->data) {
2213 			hif_err_rl("ce debug data alloc failed");
2214 			scn->hif_ce_desc_hist.data_enable[ce_id] = false;
2215 			return QDF_STATUS_E_NOMEM;
2216 		}
2217 	}
2218 	return QDF_STATUS_SUCCESS;
2219 }
2220 
2221 /**
2222  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
2223  * the CE descriptors.
2224  * @scn: hif scn handle
2225  * @ce_id: Copy Engine Id
2226  *
2227  * Return:
2228  */
2229 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
2230 {
2231 	struct hif_ce_desc_event *event = NULL;
2232 	struct hif_ce_desc_event *hist_ev = NULL;
2233 	uint32_t index = 0;
2234 
2235 	hist_ev =
2236 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
2237 
2238 	if (!hist_ev)
2239 		return;
2240 
2241 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
2242 		event = &hist_ev[index];
2243 		if (event->data)
2244 			qdf_mem_free(event->data);
2245 		event->data = NULL;
2246 		event = NULL;
2247 	}
2248 
2249 }
2250 #endif /* HIF_CE_DEBUG_DATA_BUF */
2251 
2252 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
2253 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2254 
2255 /* define below variables for crashscope parse */
2256 struct hif_ce_desc_event *hif_ce_desc_history[CE_COUNT_MAX];
2257 uint32_t hif_ce_history_max = HIF_CE_HISTORY_MAX;
2258 uint32_t hif_ce_count_max = CE_COUNT_MAX;
2259 
2260 /*
2261  * for debug build, it will enable ce history for all ce, but for
2262  * perf build(if CONFIG_SLUB_DEBUG_ON is N), it only enable for
2263  * ce2(wmi event) & ce3(wmi cmd) history.
2264  */
2265 #if defined(CONFIG_SLUB_DEBUG_ON)
2266 #define CE_DESC_HISTORY_BUFF_CNT  CE_COUNT_MAX
2267 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE  0
2268 #else
2269 /* CE2, CE3, CE7 */
2270 #define CE_DESC_HISTORY_BUFF_CNT  3
2271 #define IS_CE_DEBUG_ONLY_FOR_CRIT_CE (BIT(2) | BIT(3) | BIT(7))
2272 #endif
2273 bool hif_ce_only_for_crit = IS_CE_DEBUG_ONLY_FOR_CRIT_CE;
2274 struct hif_ce_desc_event
2275 	hif_ce_desc_history_buff[CE_DESC_HISTORY_BUFF_CNT][HIF_CE_HISTORY_MAX];
2276 
2277 static void
2278 __hif_ce_desc_history_log_register(struct hif_softc *scn)
2279 {
2280 	qdf_ssr_driver_dump_register_region("hif_ce_desc_history_buff",
2281 					    hif_ce_desc_history_buff,
2282 					    sizeof(hif_ce_desc_history_buff));
2283 	qdf_ssr_driver_dump_register_region("hif_ce_desc_hist",
2284 					    &scn->hif_ce_desc_hist,
2285 					    sizeof(scn->hif_ce_desc_hist));
2286 	qdf_ssr_driver_dump_register_region("hif_ce_count_max",
2287 					    &hif_ce_count_max,
2288 					    sizeof(hif_ce_count_max));
2289 	qdf_ssr_driver_dump_register_region("hif_ce_history_max",
2290 					    &hif_ce_history_max,
2291 					    sizeof(hif_ce_history_max));
2292 	qdf_ssr_driver_dump_register_region("hif_ce_only_for_crit",
2293 					    &hif_ce_only_for_crit,
2294 					    sizeof(hif_ce_only_for_crit));
2295 }
2296 
2297 static void __hif_ce_desc_history_log_unregister(void)
2298 {
2299 	qdf_ssr_driver_dump_unregister_region("hif_ce_only_for_crit");
2300 	qdf_ssr_driver_dump_unregister_region("hif_ce_history_max");
2301 	qdf_ssr_driver_dump_unregister_region("hif_ce_count_max");
2302 	qdf_ssr_driver_dump_unregister_region("hif_ce_desc_hist");
2303 	qdf_ssr_driver_dump_unregister_region("hif_ce_desc_history_buff");
2304 }
2305 
2306 static struct hif_ce_desc_event *
2307 	hif_ce_debug_history_buf_get(struct hif_softc *scn, unsigned int ce_id)
2308 {
2309 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2310 
2311 	hif_debug("get ce debug buffer ce_id %u, only_ce2/ce3=0x%lx, idx=%u",
2312 		  ce_id, IS_CE_DEBUG_ONLY_FOR_CRIT_CE,
2313 		  ce_hist->ce_id_hist_map[ce_id]);
2314 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2315 	    (ce_id == CE_ID_2 || ce_id == CE_ID_3 || ce_id == CE_ID_7)) {
2316 		uint8_t idx = ce_hist->ce_id_hist_map[ce_id];
2317 
2318 		hif_ce_desc_history[ce_id] = hif_ce_desc_history_buff[idx];
2319 	} else {
2320 		hif_ce_desc_history[ce_id] =
2321 			hif_ce_desc_history_buff[ce_id];
2322 	}
2323 
2324 	return hif_ce_desc_history[ce_id];
2325 }
2326 
2327 /**
2328  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
2329  * @scn: hif scn handle
2330  * @ce_id: Copy Engine Id
2331  * @src_nentries: source ce ring entries
2332  * Return: QDF_STATUS
2333  */
2334 static QDF_STATUS
2335 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
2336 			   uint32_t src_nentries)
2337 {
2338 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2339 	QDF_STATUS status = QDF_STATUS_SUCCESS;
2340 
2341 	/* For perf build, return directly for non ce2/ce3 */
2342 	if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE &&
2343 	    ce_id != CE_ID_2 &&
2344 	    ce_id != CE_ID_3 &&
2345 	    ce_id != CE_ID_7) {
2346 		ce_hist->enable[ce_id] = false;
2347 		ce_hist->data_enable[ce_id] = false;
2348 		return QDF_STATUS_SUCCESS;
2349 	}
2350 
2351 	ce_hist->hist_ev[ce_id] = hif_ce_debug_history_buf_get(scn, ce_id);
2352 	ce_hist->enable[ce_id] = true;
2353 
2354 	if (src_nentries) {
2355 		status = alloc_mem_ce_debug_hist_data(scn, ce_id);
2356 		if (status != QDF_STATUS_SUCCESS) {
2357 			ce_hist->enable[ce_id] = false;
2358 			ce_hist->hist_ev[ce_id] = NULL;
2359 			return status;
2360 		}
2361 	} else {
2362 		ce_hist->data_enable[ce_id] = false;
2363 	}
2364 
2365 	return QDF_STATUS_SUCCESS;
2366 }
2367 
2368 /**
2369  * free_mem_ce_debug_history() - Free CE descriptor history
2370  * @scn: hif scn handle
2371  * @ce_id: Copy Engine Id
2372  *
2373  * Return: None
2374  */
2375 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
2376 {
2377 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2378 
2379 	if (!ce_hist->enable[ce_id])
2380 		return;
2381 
2382 	ce_hist->enable[ce_id] = false;
2383 	if (ce_hist->data_enable[ce_id]) {
2384 		ce_hist->data_enable[ce_id] = false;
2385 		free_mem_ce_debug_hist_data(scn, ce_id);
2386 	}
2387 	ce_hist->hist_ev[ce_id] = NULL;
2388 }
2389 #else
2390 
2391 static void
2392 __hif_ce_desc_history_log_register(struct hif_softc *scn)
2393 {
2394 }
2395 
2396 static void __hif_ce_desc_history_log_unregister(void) { }
2397 
2398 static inline QDF_STATUS
2399 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2400 			   uint32_t src_nentries)
2401 {
2402 	return QDF_STATUS_SUCCESS;
2403 }
2404 
2405 static inline void
2406 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2407 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
2408 #else
2409 #if defined(HIF_CE_DEBUG_DATA_BUF)
2410 
2411 static void
2412 __hif_ce_desc_history_log_register(struct hif_softc *scn)
2413 {
2414 }
2415 
2416 static void __hif_ce_desc_history_log_unregister(void) { }
2417 
2418 static QDF_STATUS
2419 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2420 			   uint32_t src_nentries)
2421 {
2422 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
2423 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
2424 
2425 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
2426 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
2427 		return QDF_STATUS_E_NOMEM;
2428 	} else {
2429 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
2430 		return QDF_STATUS_SUCCESS;
2431 	}
2432 }
2433 
2434 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
2435 {
2436 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2437 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
2438 
2439 	if (!hist_ev)
2440 		return;
2441 
2442 	if (ce_hist->data_enable[CE_id]) {
2443 		ce_hist->data_enable[CE_id] = false;
2444 		free_mem_ce_debug_hist_data(scn, CE_id);
2445 	}
2446 
2447 	ce_hist->enable[CE_id] = false;
2448 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
2449 	ce_hist->hist_ev[CE_id] = NULL;
2450 }
2451 
2452 #else
2453 
2454 static void
2455 __hif_ce_desc_history_log_register(struct hif_softc *scn)
2456 {
2457 }
2458 
2459 static void __hif_ce_desc_history_log_unregister(void) { }
2460 
2461 static inline QDF_STATUS
2462 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
2463 			   uint32_t src_nentries)
2464 {
2465 	return QDF_STATUS_SUCCESS;
2466 }
2467 
2468 static inline void
2469 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2470 #endif /* HIF_CE_DEBUG_DATA_BUF */
2471 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
2472 
2473 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2474 /**
2475  * reset_ce_debug_history() - reset the index and ce id used for dumping the
2476  * CE records on the console using sysfs.
2477  * @scn: hif scn handle
2478  *
2479  * Return:
2480  */
2481 static inline void reset_ce_debug_history(struct hif_softc *scn)
2482 {
2483 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2484 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2485 	 * index. Disable data storing
2486 	 */
2487 	ce_hist->hist_index = 0;
2488 	ce_hist->hist_id = 0;
2489 }
2490 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2491 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
2492 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2493 
2494 void ce_enable_polling(void *cestate)
2495 {
2496 	struct CE_state *CE_state = (struct CE_state *)cestate;
2497 
2498 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2499 		CE_state->timer_inited = true;
2500 }
2501 
2502 void ce_disable_polling(void *cestate)
2503 {
2504 	struct CE_state *CE_state = (struct CE_state *)cestate;
2505 
2506 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2507 		CE_state->timer_inited = false;
2508 }
2509 
2510 /*
2511  * Initialize a Copy Engine based on caller-supplied attributes.
2512  * This may be called once to initialize both source and destination
2513  * rings or it may be called twice for separate source and destination
2514  * initialization. It may be that only one side or the other is
2515  * initialized by software/firmware.
2516  *
2517  * This should be called during the initialization sequence before
2518  * interrupts are enabled, so we don't have to worry about thread safety.
2519  */
2520 struct CE_handle *ce_init(struct hif_softc *scn,
2521 			  unsigned int CE_id, struct CE_attr *attr)
2522 {
2523 	struct CE_state *CE_state;
2524 	uint32_t ctrl_addr;
2525 	unsigned int nentries;
2526 	bool malloc_CE_state = false;
2527 	bool malloc_src_ring = false;
2528 	int status;
2529 	QDF_STATUS mem_status = QDF_STATUS_SUCCESS;
2530 
2531 	QDF_ASSERT(CE_id < scn->ce_count);
2532 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
2533 	CE_state = scn->ce_id_to_state[CE_id];
2534 
2535 	if (!CE_state) {
2536 		CE_state =
2537 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
2538 		if (!CE_state)
2539 			return NULL;
2540 
2541 		malloc_CE_state = true;
2542 		qdf_spinlock_create(&CE_state->ce_index_lock);
2543 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
2544 		qdf_spinlock_create(&CE_state->ce_interrupt_lock);
2545 #endif
2546 
2547 		CE_state->id = CE_id;
2548 		CE_state->ctrl_addr = ctrl_addr;
2549 		CE_state->state = CE_RUNNING;
2550 		CE_state->attr_flags = attr->flags;
2551 	}
2552 	CE_state->scn = scn;
2553 	CE_state->service = ce_engine_service_reg;
2554 
2555 	qdf_atomic_init(&CE_state->rx_pending);
2556 	if (!attr) {
2557 		/* Already initialized; caller wants the handle */
2558 		return (struct CE_handle *)CE_state;
2559 	}
2560 
2561 	if (CE_state->src_sz_max)
2562 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
2563 	else
2564 		CE_state->src_sz_max = attr->src_sz_max;
2565 
2566 	ce_init_ce_desc_event_log(scn, CE_id,
2567 				  attr->src_nentries + attr->dest_nentries);
2568 
2569 	/* source ring setup */
2570 	nentries = attr->src_nentries;
2571 	if (nentries) {
2572 		struct CE_ring_state *src_ring;
2573 
2574 		nentries = roundup_pwr2(nentries);
2575 		if (CE_state->src_ring) {
2576 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
2577 		} else {
2578 			src_ring = CE_state->src_ring =
2579 				ce_alloc_ring_state(CE_state,
2580 						CE_RING_SRC,
2581 						nentries);
2582 			if (!src_ring) {
2583 				/* cannot allocate src ring. If the
2584 				 * CE_state is allocated locally free
2585 				 * CE_State and return error.
2586 				 */
2587 				hif_err("src ring has no mem");
2588 				if (malloc_CE_state) {
2589 					/* allocated CE_state locally */
2590 					qdf_mem_free(CE_state);
2591 					malloc_CE_state = false;
2592 				}
2593 				return NULL;
2594 			}
2595 			/* we can allocate src ring. Mark that the src ring is
2596 			 * allocated locally
2597 			 */
2598 			malloc_src_ring = true;
2599 
2600 			/*
2601 			 * Also allocate a shadow src ring in
2602 			 * regular mem to use for faster access.
2603 			 */
2604 			src_ring->shadow_base_unaligned =
2605 				qdf_mem_malloc(nentries *
2606 					       sizeof(struct CE_src_desc) +
2607 					       CE_DESC_RING_ALIGN);
2608 			if (!src_ring->shadow_base_unaligned)
2609 				goto error_no_dma_mem;
2610 
2611 			src_ring->shadow_base = (struct CE_src_desc *)
2612 				(((size_t) src_ring->shadow_base_unaligned +
2613 				CE_DESC_RING_ALIGN - 1) &
2614 				 ~(CE_DESC_RING_ALIGN - 1));
2615 
2616 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
2617 					       src_ring, attr);
2618 			if (status < 0)
2619 				goto error_target_access;
2620 			ce_ring_test_initial_indexes(CE_id, src_ring,
2621 						     "src_ring");
2622 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2623 				qdf_timer_init(scn->qdf_dev,
2624 					       &CE_state->poll_timer,
2625 					       ce_poll_timeout,
2626 					       CE_state,
2627 					       QDF_TIMER_TYPE_WAKE_APPS);
2628 				ce_enable_polling(CE_state);
2629 				qdf_timer_mod(&CE_state->poll_timer,
2630 					      CE_POLL_TIMEOUT);
2631 			}
2632 		}
2633 	}
2634 
2635 	/* destination ring setup */
2636 	nentries = attr->dest_nentries;
2637 	if (nentries) {
2638 		struct CE_ring_state *dest_ring;
2639 
2640 		nentries = roundup_pwr2(nentries);
2641 		if (CE_state->dest_ring) {
2642 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
2643 		} else {
2644 			dest_ring = CE_state->dest_ring =
2645 				ce_alloc_ring_state(CE_state,
2646 						CE_RING_DEST,
2647 						nentries);
2648 			if (!dest_ring) {
2649 				/* cannot allocate dst ring. If the CE_state
2650 				 * or src ring is allocated locally free
2651 				 * CE_State and src ring and return error.
2652 				 */
2653 				hif_err("dest ring has no mem");
2654 				goto error_no_dma_mem;
2655 			}
2656 
2657 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
2658 				      dest_ring, attr);
2659 			if (status < 0)
2660 				goto error_target_access;
2661 
2662 			ce_ring_test_initial_indexes(CE_id, dest_ring,
2663 						     "dest_ring");
2664 
2665 			/* For srng based target, init status ring here */
2666 			if (ce_srng_based(CE_state->scn)) {
2667 				CE_state->status_ring =
2668 					ce_alloc_ring_state(CE_state,
2669 							CE_RING_STATUS,
2670 							nentries);
2671 				if (!CE_state->status_ring) {
2672 					/*Allocation failed. Cleanup*/
2673 					qdf_mem_free(CE_state->dest_ring);
2674 					if (malloc_src_ring) {
2675 						qdf_mem_free
2676 							(CE_state->src_ring);
2677 						CE_state->src_ring = NULL;
2678 						malloc_src_ring = false;
2679 					}
2680 					if (malloc_CE_state) {
2681 						/* allocated CE_state locally */
2682 						scn->ce_id_to_state[CE_id] =
2683 							NULL;
2684 						qdf_mem_free(CE_state);
2685 						malloc_CE_state = false;
2686 					}
2687 
2688 					return NULL;
2689 				}
2690 
2691 				status = ce_ring_setup(scn, CE_RING_STATUS,
2692 					       CE_id, CE_state->status_ring,
2693 					       attr);
2694 				if (status < 0)
2695 					goto error_target_access;
2696 
2697 			}
2698 
2699 			/* epping */
2700 			/* poll timer */
2701 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2702 				qdf_timer_init(scn->qdf_dev,
2703 						&CE_state->poll_timer,
2704 						ce_poll_timeout,
2705 						CE_state,
2706 						QDF_TIMER_TYPE_WAKE_APPS);
2707 				ce_enable_polling(CE_state);
2708 				qdf_timer_mod(&CE_state->poll_timer,
2709 						      CE_POLL_TIMEOUT);
2710 			}
2711 		}
2712 	}
2713 
2714 	if (!ce_srng_based(scn)) {
2715 		/* Enable CE error interrupts */
2716 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2717 			goto error_target_access;
2718 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
2719 		if (Q_TARGET_ACCESS_END(scn) < 0)
2720 			goto error_target_access;
2721 	}
2722 
2723 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
2724 			ce_oom_recovery, CE_state);
2725 
2726 	/* update the htt_data attribute */
2727 	ce_mark_datapath(CE_state);
2728 	scn->ce_id_to_state[CE_id] = CE_state;
2729 
2730 	mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
2731 	if (mem_status != QDF_STATUS_SUCCESS)
2732 		goto error_target_access;
2733 
2734 	ce_update_msi_batch_intr_flags(CE_state);
2735 	ce_update_wrt_idx_offset(scn, CE_state,
2736 				 attr->src_nentries ?
2737 				 CE_RING_SRC : CE_RING_DEST);
2738 
2739 	return (struct CE_handle *)CE_state;
2740 
2741 error_target_access:
2742 error_no_dma_mem:
2743 	ce_fini((struct CE_handle *)CE_state);
2744 	return NULL;
2745 }
2746 
2747 void hif_ce_desc_history_log_register(struct hif_softc *scn)
2748 {
2749 	__hif_ce_desc_history_log_register(scn);
2750 }
2751 
2752 /**
2753  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
2754  * @hif_ctx: HIF Context
2755  *
2756  * API to check if polling is enabled on all CEs. Returns true when polling
2757  * is enabled on all CEs.
2758  *
2759  * Return: bool
2760  */
2761 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
2762 {
2763 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2764 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2765 	struct CE_attr *attr;
2766 	int id;
2767 
2768 	for (id = 0; id < scn->ce_count; id++) {
2769 		attr = &hif_state->host_ce_config[id];
2770 		if (attr && (attr->dest_nentries) &&
2771 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
2772 			return false;
2773 	}
2774 	return true;
2775 }
2776 qdf_export_symbol(hif_is_polled_mode_enabled);
2777 
2778 static int hif_get_pktlog_ce_num(struct hif_softc *scn)
2779 {
2780 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2781 	int id;
2782 
2783 	for (id = 0; id < hif_state->sz_tgt_svc_map; id++) {
2784 		if (hif_state->tgt_svc_map[id].service_id ==  PACKET_LOG_SVC)
2785 			return hif_state->tgt_svc_map[id].pipenum;
2786 	}
2787 	return -EINVAL;
2788 }
2789 
2790 #ifdef WLAN_FEATURE_FASTPATH
2791 /**
2792  * hif_enable_fastpath() - Update that we have enabled fastpath mode
2793  * @hif_ctx: HIF context
2794  *
2795  * For use in data path
2796  *
2797  * Return: void
2798  */
2799 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
2800 {
2801 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2802 
2803 	if (ce_srng_based(scn)) {
2804 		hif_warn("srng rings do not support fastpath");
2805 		return;
2806 	}
2807 	hif_debug("Enabling fastpath mode");
2808 	scn->fastpath_mode_on = true;
2809 }
2810 
2811 /**
2812  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
2813  * @hif_ctx: HIF Context
2814  *
2815  * For use in data path to skip HTC
2816  *
2817  * Return: bool
2818  */
2819 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
2820 {
2821 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2822 
2823 	return scn->fastpath_mode_on;
2824 }
2825 
2826 /**
2827  * hif_get_ce_handle - API to get CE handle for FastPath mode
2828  * @hif_ctx: HIF Context
2829  * @id: CopyEngine Id
2830  *
2831  * API to return CE handle for fastpath mode
2832  *
2833  * Return: void
2834  */
2835 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
2836 {
2837 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2838 
2839 	return scn->ce_id_to_state[id];
2840 }
2841 qdf_export_symbol(hif_get_ce_handle);
2842 
2843 /**
2844  * ce_h2t_tx_ce_cleanup() - Place holder function for H2T CE cleanup.
2845  * No processing is required inside this function.
2846  * @ce_hdl: Cope engine handle
2847  * Using an assert, this function makes sure that,
2848  * the TX CE has been processed completely.
2849  *
2850  * This is called while dismantling CE structures. No other thread
2851  * should be using these structures while dismantling is occurring
2852  * therefore no locking is needed.
2853  *
2854  * Return: none
2855  */
2856 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
2857 {
2858 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2859 	struct CE_ring_state *src_ring = ce_state->src_ring;
2860 	struct hif_softc *sc = ce_state->scn;
2861 	uint32_t sw_index, write_index;
2862 
2863 	if (hif_is_nss_wifi_enabled(sc))
2864 		return;
2865 
2866 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
2867 		hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE");
2868 		sw_index = src_ring->sw_index;
2869 		write_index = src_ring->sw_index;
2870 
2871 		/* At this point Tx CE should be clean */
2872 		qdf_assert_always(sw_index == write_index);
2873 	}
2874 }
2875 
2876 /**
2877  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
2878  * @ce_hdl: Handle to CE
2879  *
2880  * These buffers are never allocated on the fly, but
2881  * are allocated only once during HIF start and freed
2882  * only once during HIF stop.
2883  * NOTE:
2884  * The assumption here is there is no in-flight DMA in progress
2885  * currently, so that buffers can be freed up safely.
2886  *
2887  * Return: NONE
2888  */
2889 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
2890 {
2891 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2892 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
2893 	qdf_nbuf_t nbuf;
2894 	int i;
2895 
2896 	if (ce_state->scn->fastpath_mode_on == false)
2897 		return;
2898 
2899 	if (!ce_state->htt_rx_data)
2900 		return;
2901 
2902 	/*
2903 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
2904 	 * this CE is completely full: does not leave one blank space, to
2905 	 * distinguish between empty queue & full queue. So free all the
2906 	 * entries.
2907 	 */
2908 	for (i = 0; i < dst_ring->nentries; i++) {
2909 		nbuf = dst_ring->per_transfer_context[i];
2910 
2911 		/*
2912 		 * The reasons for doing this check are:
2913 		 * 1) Protect against calling cleanup before allocating buffers
2914 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
2915 		 *    could have a partially filled ring, because of a memory
2916 		 *    allocation failure in the middle of allocating ring.
2917 		 *    This check accounts for that case, checking
2918 		 *    fastpath_mode_on flag or started flag would not have
2919 		 *    covered that case. This is not in performance path,
2920 		 *    so OK to do this.
2921 		 */
2922 		if (nbuf) {
2923 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
2924 					      QDF_DMA_FROM_DEVICE);
2925 			qdf_nbuf_free(nbuf);
2926 		}
2927 	}
2928 }
2929 
2930 /**
2931  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
2932  * @scn: HIF handle
2933  *
2934  * Datapath Rx CEs are special case, where we reuse all the message buffers.
2935  * Hence we have to post all the entries in the pipe, even, in the beginning
2936  * unlike for other CE pipes where one less than dest_nentries are filled in
2937  * the beginning.
2938  *
2939  * Return: None
2940  */
2941 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2942 {
2943 	int pipe_num;
2944 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2945 
2946 	if (scn->fastpath_mode_on == false)
2947 		return;
2948 
2949 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2950 		struct HIF_CE_pipe_info *pipe_info =
2951 			&hif_state->pipe_info[pipe_num];
2952 		struct CE_state *ce_state =
2953 			scn->ce_id_to_state[pipe_info->pipe_num];
2954 
2955 		if (ce_state->htt_rx_data)
2956 			atomic_inc(&pipe_info->recv_bufs_needed);
2957 	}
2958 }
2959 #else
2960 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2961 {
2962 }
2963 
2964 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
2965 {
2966 	return false;
2967 }
2968 #endif /* WLAN_FEATURE_FASTPATH */
2969 
2970 void ce_fini(struct CE_handle *copyeng)
2971 {
2972 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2973 	unsigned int CE_id = CE_state->id;
2974 	struct hif_softc *scn = CE_state->scn;
2975 	uint32_t desc_size;
2976 
2977 	bool inited = CE_state->timer_inited;
2978 	CE_state->state = CE_UNUSED;
2979 	scn->ce_id_to_state[CE_id] = NULL;
2980 	/* Set the flag to false first to stop processing in ce_poll_timeout */
2981 	ce_disable_polling(CE_state);
2982 
2983 	qdf_lro_deinit(CE_state->lro_data);
2984 
2985 	if (CE_state->src_ring) {
2986 		/* Cleanup the datapath Tx ring */
2987 		ce_h2t_tx_ce_cleanup(copyeng);
2988 
2989 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2990 		if (CE_state->src_ring->shadow_base_unaligned)
2991 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2992 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2993 			ce_free_desc_ring(scn, CE_state->id,
2994 					  CE_state->src_ring,
2995 					  desc_size);
2996 		ce_srng_cleanup(scn, CE_state, CE_RING_SRC);
2997 		qdf_mem_free(CE_state->src_ring);
2998 	}
2999 	if (CE_state->dest_ring) {
3000 		/* Cleanup the datapath Rx ring */
3001 		ce_t2h_msg_ce_cleanup(copyeng);
3002 
3003 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
3004 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
3005 			ce_free_desc_ring(scn, CE_state->id,
3006 					  CE_state->dest_ring,
3007 					  desc_size);
3008 		ce_srng_cleanup(scn, CE_state, CE_RING_DEST);
3009 		qdf_mem_free(CE_state->dest_ring);
3010 
3011 		/* epping */
3012 		if (inited) {
3013 			qdf_timer_free(&CE_state->poll_timer);
3014 		}
3015 	}
3016 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
3017 		/* Cleanup the datapath Tx ring */
3018 		ce_h2t_tx_ce_cleanup(copyeng);
3019 
3020 		if (CE_state->status_ring->shadow_base_unaligned)
3021 			qdf_mem_free(
3022 				CE_state->status_ring->shadow_base_unaligned);
3023 
3024 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
3025 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
3026 			ce_free_desc_ring(scn, CE_state->id,
3027 					  CE_state->status_ring,
3028 					  desc_size);
3029 		ce_srng_cleanup(scn, CE_state, CE_RING_STATUS);
3030 		qdf_mem_free(CE_state->status_ring);
3031 	}
3032 
3033 	free_mem_ce_debug_history(scn, CE_id);
3034 	reset_ce_debug_history(scn);
3035 	ce_deinit_ce_desc_event_log(scn, CE_id);
3036 
3037 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
3038 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
3039 	qdf_spinlock_destroy(&CE_state->ce_interrupt_lock);
3040 #endif
3041 	qdf_mem_free(CE_state);
3042 }
3043 
3044 void hif_ce_desc_history_log_unregister(void)
3045 {
3046 	__hif_ce_desc_history_log_unregister();
3047 }
3048 
3049 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
3050 {
3051 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3052 
3053 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
3054 		  sizeof(hif_state->msg_callbacks_pending));
3055 	qdf_mem_zero(&hif_state->msg_callbacks_current,
3056 		  sizeof(hif_state->msg_callbacks_current));
3057 }
3058 
3059 /* Send the first nbytes bytes of the buffer */
3060 QDF_STATUS
3061 hif_send_head(struct hif_opaque_softc *hif_ctx,
3062 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
3063 	      qdf_nbuf_t nbuf, unsigned int data_attr)
3064 {
3065 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3066 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3067 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3068 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3069 	int bytes = nbytes, nfrags = 0;
3070 	struct ce_sendlist sendlist;
3071 	int i = 0;
3072 	QDF_STATUS status;
3073 	unsigned int mux_id = 0;
3074 
3075 	if (nbytes > qdf_nbuf_len(nbuf)) {
3076 		hif_err("nbytes: %d nbuf_len: %d", nbytes,
3077 		       (uint32_t)qdf_nbuf_len(nbuf));
3078 		QDF_ASSERT(0);
3079 	}
3080 
3081 	transfer_id =
3082 		(mux_id & MUX_ID_MASK) |
3083 		(transfer_id & TRANSACTION_ID_MASK);
3084 	data_attr &= DESC_DATA_FLAG_MASK;
3085 	/*
3086 	 * The common case involves sending multiple fragments within a
3087 	 * single download (the tx descriptor and the tx frame header).
3088 	 * So, optimize for the case of multiple fragments by not even
3089 	 * checking whether it's necessary to use a sendlist.
3090 	 * The overhead of using a sendlist for a single buffer download
3091 	 * is not a big deal, since it happens rarely (for WMI messages).
3092 	 */
3093 	ce_sendlist_init(&sendlist);
3094 	do {
3095 		qdf_dma_addr_t frag_paddr;
3096 		int frag_bytes;
3097 
3098 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
3099 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
3100 		/*
3101 		 * Clear the packet offset for all but the first CE desc.
3102 		 */
3103 		if (i++ > 0)
3104 			data_attr &= ~CE_DESC_PKT_OFFSET_BIT_M;
3105 
3106 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
3107 				    frag_bytes >
3108 				    bytes ? bytes : frag_bytes,
3109 				    qdf_nbuf_get_frag_is_wordstream
3110 				    (nbuf,
3111 				    nfrags) ? 0 :
3112 				    CE_SEND_FLAG_SWAP_DISABLE,
3113 				    data_attr);
3114 		if (status != QDF_STATUS_SUCCESS) {
3115 			hif_err("frag_num: %d larger than limit (status=%d)",
3116 			       nfrags, status);
3117 			return status;
3118 		}
3119 		bytes -= frag_bytes;
3120 		nfrags++;
3121 	} while (bytes > 0);
3122 
3123 	/* Make sure we have resources to handle this request */
3124 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
3125 	if (pipe_info->num_sends_allowed < nfrags) {
3126 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3127 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
3128 		return QDF_STATUS_E_RESOURCES;
3129 	}
3130 	pipe_info->num_sends_allowed -= nfrags;
3131 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3132 
3133 	if (qdf_unlikely(!ce_hdl)) {
3134 		hif_err("CE handle is null");
3135 		return A_ERROR;
3136 	}
3137 
3138 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
3139 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
3140 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
3141 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
3142 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
3143 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
3144 
3145 	return status;
3146 }
3147 
3148 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
3149 								int force)
3150 {
3151 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3152 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3153 
3154 	if (!force) {
3155 		int resources;
3156 		/*
3157 		 * Decide whether to actually poll for completions, or just
3158 		 * wait for a later chance. If there seem to be plenty of
3159 		 * resources left, then just wait, since checking involves
3160 		 * reading a CE register, which is a relatively expensive
3161 		 * operation.
3162 		 */
3163 		resources = hif_get_free_queue_number(hif_ctx, pipe);
3164 		/*
3165 		 * If at least 50% of the total resources are still available,
3166 		 * don't bother checking again yet.
3167 		 */
3168 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
3169 									 1))
3170 			return;
3171 	}
3172 #ifdef ATH_11AC_TXCOMPACT
3173 	ce_per_engine_servicereap(scn, pipe);
3174 #else
3175 	ce_per_engine_service(scn, pipe);
3176 #endif
3177 }
3178 
3179 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
3180 QDF_STATUS
3181 hif_register_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
3182 			  void (*custom_cb)(void *), void *custom_cb_context)
3183 {
3184 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3185 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3186 	struct HIF_CE_pipe_info *pipe_info;
3187 
3188 	if (pipe >= CE_COUNT_MAX)
3189 		return QDF_STATUS_E_INVAL;
3190 
3191 	pipe_info = &hif_state->pipe_info[pipe];
3192 	ce_register_custom_cb(pipe_info->ce_hdl, custom_cb, custom_cb_context);
3193 
3194 	return QDF_STATUS_SUCCESS;
3195 }
3196 
3197 QDF_STATUS
3198 hif_unregister_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3199 {
3200 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3201 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3202 	struct HIF_CE_pipe_info *pipe_info;
3203 
3204 	if (pipe >= CE_COUNT_MAX)
3205 		return QDF_STATUS_E_INVAL;
3206 
3207 	pipe_info = &hif_state->pipe_info[pipe];
3208 	ce_unregister_custom_cb(pipe_info->ce_hdl);
3209 
3210 	return QDF_STATUS_SUCCESS;
3211 }
3212 
3213 QDF_STATUS
3214 hif_enable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3215 {
3216 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3217 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3218 	struct HIF_CE_pipe_info *pipe_info;
3219 
3220 	if (pipe >= CE_COUNT_MAX)
3221 		return QDF_STATUS_E_INVAL;
3222 
3223 	pipe_info = &hif_state->pipe_info[pipe];
3224 	ce_enable_custom_cb(pipe_info->ce_hdl);
3225 	ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]);
3226 
3227 	return QDF_STATUS_SUCCESS;
3228 }
3229 
3230 QDF_STATUS
3231 hif_disable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3232 {
3233 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3234 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3235 	struct HIF_CE_pipe_info *pipe_info;
3236 
3237 	if (pipe >= CE_COUNT_MAX)
3238 		return QDF_STATUS_E_INVAL;
3239 
3240 	pipe_info = &hif_state->pipe_info[pipe];
3241 	ce_disable_custom_cb(pipe_info->ce_hdl);
3242 
3243 	return QDF_STATUS_SUCCESS;
3244 }
3245 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
3246 
3247 #if defined(CE_TASKLET_SCHEDULE_ON_FULL) && defined(CE_TASKLET_DEBUG_ENABLE)
3248 #define CE_RING_FULL_THRESHOLD_TIME 3000000
3249 #define CE_RING_FULL_THRESHOLD 1024
3250 /* This function is called from htc_send path. If there is no resourse to send
3251  * packet via HTC, then check if interrupts are not processed from that
3252  * CE for last 3 seconds. If so, schedule a tasklet to reap available entries.
3253  * Also if Queue has reached 1024 entries within 3 seconds, then also schedule
3254  * tasklet.
3255  */
3256 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3257 {
3258 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3259 	int64_t diff_time = qdf_get_log_timestamp_usecs() -
3260 			hif_state->stats.tasklet_sched_entry_ts[pipe];
3261 
3262 	hif_state->stats.ce_ring_full_count[pipe]++;
3263 
3264 	if (diff_time >= CE_RING_FULL_THRESHOLD_TIME ||
3265 	    hif_state->stats.ce_ring_full_count[pipe] >=
3266 	    CE_RING_FULL_THRESHOLD) {
3267 		hif_state->stats.ce_ring_full_count[pipe] = 0;
3268 		hif_state->stats.ce_manual_tasklet_schedule_count[pipe]++;
3269 		hif_state->stats.ce_last_manual_tasklet_schedule_ts[pipe] =
3270 			qdf_get_log_timestamp_usecs();
3271 		ce_dispatch_interrupt(pipe, &hif_state->tasklets[pipe]);
3272 	}
3273 }
3274 #else
3275 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3276 {
3277 }
3278 #endif
3279 
3280 uint16_t
3281 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
3282 {
3283 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3284 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3285 	uint16_t rv;
3286 
3287 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
3288 	rv = pipe_info->num_sends_allowed;
3289 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3290 	return rv;
3291 }
3292 
3293 /* Called by lower (CE) layer when a send to Target completes. */
3294 static void
3295 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
3296 		     void *transfer_context, qdf_dma_addr_t CE_data,
3297 		     unsigned int nbytes, unsigned int transfer_id,
3298 		     unsigned int sw_index, unsigned int hw_index,
3299 		     unsigned int toeplitz_hash_result)
3300 {
3301 	struct HIF_CE_pipe_info *pipe_info =
3302 		(struct HIF_CE_pipe_info *)ce_context;
3303 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
3304 	struct hif_msg_callbacks *msg_callbacks =
3305 		&pipe_info->pipe_callbacks;
3306 
3307 	do {
3308 		/*
3309 		 * The upper layer callback will be triggered
3310 		 * when last fragment is complteted.
3311 		 */
3312 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
3313 			msg_callbacks->txCompletionHandler(
3314 				msg_callbacks->Context,
3315 				transfer_context, transfer_id,
3316 				toeplitz_hash_result);
3317 
3318 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
3319 		pipe_info->num_sends_allowed++;
3320 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
3321 	} while (ce_completed_send_next(copyeng,
3322 			&ce_context, &transfer_context,
3323 			&CE_data, &nbytes, &transfer_id,
3324 			&sw_idx, &hw_idx,
3325 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
3326 }
3327 
3328 #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
3329 
3330 #define HIF_CE_RX_NBUF_WMI_POOL_SIZE 32
3331 
3332 static qdf_nbuf_t hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
3333 {
3334 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3335 	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
3336 	qdf_nbuf_t nbuf;
3337 
3338 	nbuf = wbuff_buff_get(scn->wbuff_handle, ce_id, 0, __func__,
3339 			      __LINE__);
3340 	if (!nbuf)
3341 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz,
3342 				      0, 4, false);
3343 
3344 	if (!nbuf)
3345 		return NULL;
3346 
3347 	return nbuf;
3348 }
3349 
3350 static void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
3351 {
3352 	nbuf = wbuff_buff_put(nbuf);
3353 	if (nbuf)
3354 		qdf_nbuf_free(nbuf);
3355 }
3356 
3357 static int
3358 hif_calc_wbuff_pool_size(struct hif_softc *scn, struct CE_state *ce_state)
3359 {
3360 	int ul_is_polled, dl_is_polled;
3361 	bool is_wmi_svc, wmi_diag_svc;
3362 	uint8_t ul_pipe, dl_pipe;
3363 	int pool_size;
3364 	int status;
3365 	int ce_id;
3366 
3367 	if (!ce_state)
3368 		return 0;
3369 
3370 	ce_id = ce_state->id;
3371 
3372 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
3373 					 &ul_pipe, &dl_pipe,
3374 					 &ul_is_polled, &dl_is_polled);
3375 	is_wmi_svc = !status && (dl_pipe == ce_id);
3376 
3377 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3378 					 WMI_CONTROL_DIAG_SVC,
3379 					 &ul_pipe, &dl_pipe,
3380 					 &ul_is_polled, &dl_is_polled);
3381 	wmi_diag_svc = !status;
3382 
3383 	if (is_wmi_svc && !wmi_diag_svc)
3384 		pool_size = ce_state->dest_ring->nentries +
3385 			HIF_CE_RX_NBUF_WMI_POOL_SIZE;
3386 	else if (is_wmi_svc && wmi_diag_svc)
3387 		pool_size = ce_state->dest_ring->nentries +
3388 			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
3389 	else if (!is_wmi_svc && wmi_diag_svc && ce_id == dl_pipe)
3390 		pool_size = ce_state->dest_ring->nentries +
3391 			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
3392 	else
3393 		pool_size = ce_state->dest_ring->nentries;
3394 
3395 	return pool_size;
3396 }
3397 
3398 static void hif_ce_rx_wbuff_register(struct hif_softc *scn)
3399 {
3400 	struct wbuff_alloc_request wbuff_alloc[CE_COUNT_MAX] = {0};
3401 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3402 	struct HIF_CE_pipe_info *pipe_info;
3403 	struct CE_state *ce_state;
3404 	int ce_id;
3405 
3406 	for (ce_id = 0; ce_id <  scn->ce_count; ce_id++) {
3407 		pipe_info = &hif_state->pipe_info[ce_id];
3408 		ce_state = scn->ce_id_to_state[ce_id];
3409 
3410 		if (!pipe_info->buf_sz)
3411 			continue;
3412 
3413 		/* Only RX CEs need WBUFF registration. recv_bufs_needed
3414 		 * contains valid count for RX CEs during init time.
3415 		 */
3416 		if (!atomic_read(&pipe_info->recv_bufs_needed))
3417 			continue;
3418 
3419 		if (ce_is_fastpath_enabled(scn) &&
3420 		    ce_state->htt_rx_data)
3421 			continue;
3422 
3423 		wbuff_alloc[ce_id].pool_id = ce_id;
3424 		wbuff_alloc[ce_id].buffer_size = pipe_info->buf_sz;
3425 		wbuff_alloc[ce_id].pool_size =
3426 				hif_calc_wbuff_pool_size(scn, ce_state);
3427 	}
3428 
3429 	scn->wbuff_handle =
3430 		wbuff_module_register(wbuff_alloc, CE_COUNT_MAX, 0, 4,
3431 				      WBUFF_MODULE_CE_RX);
3432 }
3433 
3434 static void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
3435 {
3436 	wbuff_module_deregister(scn->wbuff_handle);
3437 	scn->wbuff_handle = NULL;
3438 }
3439 #else
3440 static inline qdf_nbuf_t
3441 hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
3442 {
3443 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3444 	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
3445 
3446 	return qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 0, 4, false);
3447 }
3448 
3449 static inline void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
3450 {
3451 	return qdf_nbuf_free(nbuf);
3452 }
3453 
3454 static inline void hif_ce_rx_wbuff_register(struct hif_softc *scn)
3455 {
3456 }
3457 
3458 static inline void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
3459 {
3460 }
3461 #endif /* WLAN_FEATURE_CE_RX_BUFFER_REUSE */
3462 
3463 /**
3464  * hif_ce_do_recv(): send message from copy engine to upper layers
3465  * @msg_callbacks: structure containing callback and callback context
3466  * @netbuf: skb containing message
3467  * @nbytes: number of bytes in the message
3468  * @pipe_info: used for the pipe_number info
3469  *
3470  * Checks the packet length, configures the length in the netbuff,
3471  * and calls the upper layer callback.
3472  *
3473  * return: None
3474  */
3475 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
3476 		qdf_nbuf_t netbuf, int nbytes,
3477 		struct HIF_CE_pipe_info *pipe_info) {
3478 	if (nbytes <= pipe_info->buf_sz) {
3479 		qdf_nbuf_set_pktlen(netbuf, nbytes);
3480 		msg_callbacks->
3481 			rxCompletionHandler(msg_callbacks->Context,
3482 					netbuf, pipe_info->pipe_num);
3483 	} else {
3484 		hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
3485 		hif_ce_rx_nbuf_free(netbuf);
3486 	}
3487 }
3488 
3489 /* Called by lower (CE) layer when data is received from the Target. */
3490 static void
3491 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
3492 		     void *transfer_context, qdf_dma_addr_t CE_data,
3493 		     unsigned int nbytes, unsigned int transfer_id,
3494 		     unsigned int flags)
3495 {
3496 	struct HIF_CE_pipe_info *pipe_info =
3497 		(struct HIF_CE_pipe_info *)ce_context;
3498 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
3499 	struct CE_state *ce_state = (struct CE_state *) copyeng;
3500 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3501 	struct hif_msg_callbacks *msg_callbacks = &pipe_info->pipe_callbacks;
3502 
3503 	do {
3504 		hif_rtpm_record_ce_last_busy_evt(scn, ce_state->id);
3505 		hif_rtpm_mark_last_busy(HIF_RTPM_ID_CE);
3506 		qdf_nbuf_unmap_single(scn->qdf_dev,
3507 				      (qdf_nbuf_t) transfer_context,
3508 				      QDF_DMA_FROM_DEVICE);
3509 
3510 		atomic_inc(&pipe_info->recv_bufs_needed);
3511 		hif_post_recv_buffers_for_pipe(pipe_info);
3512 		if (scn->target_status == TARGET_STATUS_RESET)
3513 			hif_ce_rx_nbuf_free(transfer_context);
3514 		else
3515 			hif_ce_do_recv(msg_callbacks, transfer_context,
3516 				nbytes, pipe_info);
3517 
3518 		/* Set up force_break flag if num of receices reaches
3519 		 * MAX_NUM_OF_RECEIVES
3520 		 */
3521 		ce_state->receive_count++;
3522 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
3523 			ce_state->force_break = 1;
3524 			break;
3525 		}
3526 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
3527 					&CE_data, &nbytes, &transfer_id,
3528 					&flags) == QDF_STATUS_SUCCESS);
3529 
3530 }
3531 
3532 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
3533 
3534 void
3535 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
3536 	      struct hif_msg_callbacks *callbacks)
3537 {
3538 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
3539 
3540 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3541 	spin_lock_init(&pcie_access_log_lock);
3542 #endif
3543 	/* Save callbacks for later installation */
3544 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
3545 		 sizeof(hif_state->msg_callbacks_pending));
3546 
3547 }
3548 
3549 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state,
3550 						 int pipe_num)
3551 {
3552 	struct CE_attr attr;
3553 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3554 	struct hif_msg_callbacks *hif_msg_callbacks =
3555 		&hif_state->msg_callbacks_current;
3556 	struct HIF_CE_pipe_info *pipe_info;
3557 	struct CE_state *ce_state;
3558 
3559 	if (pipe_num >= CE_COUNT_MAX)
3560 		return -EINVAL;
3561 
3562 	pipe_info = &hif_state->pipe_info[pipe_num];
3563 	ce_state = scn->ce_id_to_state[pipe_num];
3564 
3565 	if (!hif_msg_callbacks ||
3566 	    !hif_msg_callbacks->rxCompletionHandler ||
3567 	    !hif_msg_callbacks->txCompletionHandler) {
3568 		hif_err("no completion handler registered");
3569 		return -EFAULT;
3570 	}
3571 
3572 	attr = hif_state->host_ce_config[pipe_num];
3573 	if (attr.src_nentries) {
3574 		/* pipe used to send to target */
3575 		hif_debug("pipe_num:%d pipe_info:0x%pK\n",
3576 			  pipe_num, pipe_info);
3577 		ce_send_cb_register(pipe_info->ce_hdl,
3578 				    hif_pci_ce_send_done, pipe_info,
3579 				    attr.flags & CE_ATTR_DISABLE_INTR);
3580 		pipe_info->num_sends_allowed = attr.src_nentries - 1;
3581 	}
3582 	if (attr.dest_nentries) {
3583 		hif_debug("pipe_num:%d pipe_info:0x%pK\n",
3584 			  pipe_num, pipe_info);
3585 		/* pipe used to receive from target */
3586 		ce_recv_cb_register(pipe_info->ce_hdl,
3587 				    hif_pci_ce_recv_data, pipe_info,
3588 				    attr.flags & CE_ATTR_DISABLE_INTR);
3589 	}
3590 
3591 	if (attr.src_nentries)
3592 		qdf_spinlock_create(&pipe_info->completion_freeq_lock);
3593 
3594 	if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND))
3595 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
3596 			     sizeof(pipe_info->pipe_callbacks));
3597 
3598 	return 0;
3599 }
3600 
3601 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
3602 {
3603 	struct CE_handle *ce_diag = hif_state->ce_diag;
3604 	int pipe_num, ret;
3605 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3606 
3607 	/* daemonize("hif_compl_thread"); */
3608 
3609 	if (scn->ce_count == 0) {
3610 		hif_err("ce_count is 0");
3611 		return -EINVAL;
3612 	}
3613 
3614 
3615 	A_TARGET_ACCESS_LIKELY(scn);
3616 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3617 		struct HIF_CE_pipe_info *pipe_info;
3618 
3619 		pipe_info = &hif_state->pipe_info[pipe_num];
3620 		if (pipe_info->ce_hdl == ce_diag)
3621 			continue;       /* Handle Diagnostic CE specially */
3622 
3623 		ret = hif_completion_thread_startup_by_ceid(hif_state,
3624 							    pipe_num);
3625 		if (ret < 0)
3626 			return ret;
3627 
3628 	}
3629 
3630 	A_TARGET_ACCESS_UNLIKELY(scn);
3631 	return 0;
3632 }
3633 
3634 /*
3635  * Install pending msg callbacks.
3636  *
3637  * TBDXXX: This hack is needed because upper layers install msg callbacks
3638  * for use with HTC before BMI is done; yet this HIF implementation
3639  * needs to continue to use BMI msg callbacks. Really, upper layers
3640  * should not register HTC callbacks until AFTER BMI phase.
3641  */
3642 static void hif_msg_callbacks_install(struct hif_softc *scn)
3643 {
3644 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3645 
3646 	qdf_mem_copy(&hif_state->msg_callbacks_current,
3647 		 &hif_state->msg_callbacks_pending,
3648 		 sizeof(hif_state->msg_callbacks_pending));
3649 }
3650 
3651 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
3652 							uint8_t *DLPipe)
3653 {
3654 	int ul_is_polled, dl_is_polled;
3655 
3656 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
3657 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
3658 }
3659 
3660 /**
3661  * hif_dump_pipe_debug_count() - Log error count
3662  * @scn: hif_softc pointer.
3663  *
3664  * Output the pipe error counts of each pipe to log file
3665  *
3666  * Return: N/A
3667  */
3668 void hif_dump_pipe_debug_count(struct hif_softc *scn)
3669 {
3670 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3671 	int pipe_num;
3672 
3673 	if (!hif_state) {
3674 		hif_err("hif_state is NULL");
3675 		return;
3676 	}
3677 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3678 		struct HIF_CE_pipe_info *pipe_info;
3679 
3680 	pipe_info = &hif_state->pipe_info[pipe_num];
3681 
3682 	if (pipe_info->nbuf_alloc_err_count > 0 ||
3683 			pipe_info->nbuf_dma_err_count > 0 ||
3684 			pipe_info->nbuf_ce_enqueue_err_count)
3685 		hif_err(
3686 			"pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
3687 			pipe_info->pipe_num,
3688 			atomic_read(&pipe_info->recv_bufs_needed),
3689 			pipe_info->nbuf_alloc_err_count,
3690 			pipe_info->nbuf_dma_err_count,
3691 			pipe_info->nbuf_ce_enqueue_err_count);
3692 	}
3693 }
3694 
3695 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
3696 					  void *nbuf, uint32_t *error_cnt,
3697 					  enum hif_ce_event_type failure_type,
3698 					  const char *failure_type_string)
3699 {
3700 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
3701 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
3702 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3703 	int ce_id = CE_state->id;
3704 	uint32_t error_cnt_tmp;
3705 
3706 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3707 	error_cnt_tmp = ++(*error_cnt);
3708 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3709 	hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s",
3710 		  pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
3711 		  failure_type_string);
3712 	hif_record_ce_desc_event(scn, ce_id, failure_type,
3713 				 NULL, nbuf, bufs_needed_tmp, 0);
3714 	/* if we fail to allocate the last buffer for an rx pipe,
3715 	 *	there is no trigger to refill the ce and we will
3716 	 *	eventually crash
3717 	 */
3718 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
3719 	    (ce_srng_based(scn) &&
3720 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
3721 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
3722 
3723 }
3724 
3725 
3726 
3727 
3728 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
3729 {
3730 	struct CE_handle *ce_hdl;
3731 	qdf_size_t buf_sz;
3732 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3733 	QDF_STATUS status;
3734 	uint32_t bufs_posted = 0;
3735 	unsigned int ce_id;
3736 
3737 	buf_sz = pipe_info->buf_sz;
3738 	if (buf_sz == 0) {
3739 		/* Unused Copy Engine */
3740 		return QDF_STATUS_SUCCESS;
3741 	}
3742 
3743 	ce_hdl = pipe_info->ce_hdl;
3744 	if (!ce_hdl) {
3745 		hif_err("ce_hdl is NULL");
3746 		return QDF_STATUS_E_INVAL;
3747 	}
3748 
3749 	ce_id = ((struct CE_state *)ce_hdl)->id;
3750 
3751 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3752 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
3753 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
3754 		qdf_nbuf_t nbuf;
3755 
3756 		atomic_dec(&pipe_info->recv_bufs_needed);
3757 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3758 
3759 		hif_record_ce_desc_event(scn, ce_id,
3760 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
3761 					 0, 0);
3762 		nbuf = hif_ce_rx_nbuf_alloc(scn, ce_id);
3763 		if (!nbuf) {
3764 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3765 					&pipe_info->nbuf_alloc_err_count,
3766 					 HIF_RX_NBUF_ALLOC_FAILURE,
3767 					"HIF_RX_NBUF_ALLOC_FAILURE");
3768 			return QDF_STATUS_E_NOMEM;
3769 		}
3770 
3771 		hif_record_ce_desc_event(scn, ce_id,
3772 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
3773 					 0, 0);
3774 		/*
3775 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
3776 		 * CE_data = dma_map_single(dev, data, buf_sz, );
3777 		 * DMA_FROM_DEVICE);
3778 		 */
3779 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
3780 					    QDF_DMA_FROM_DEVICE);
3781 
3782 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3783 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3784 					&pipe_info->nbuf_dma_err_count,
3785 					 HIF_RX_NBUF_MAP_FAILURE,
3786 					"HIF_RX_NBUF_MAP_FAILURE");
3787 			hif_ce_rx_nbuf_free(nbuf);
3788 			return status;
3789 		}
3790 
3791 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
3792 		hif_record_ce_desc_event(scn, ce_id,
3793 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
3794 					 0, 0);
3795 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
3796 					       buf_sz, DMA_FROM_DEVICE);
3797 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
3798 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3799 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3800 					&pipe_info->nbuf_ce_enqueue_err_count,
3801 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
3802 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
3803 
3804 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
3805 						QDF_DMA_FROM_DEVICE);
3806 			hif_ce_rx_nbuf_free(nbuf);
3807 			return status;
3808 		}
3809 
3810 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3811 		bufs_posted++;
3812 	}
3813 	pipe_info->nbuf_alloc_err_count =
3814 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
3815 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
3816 	pipe_info->nbuf_dma_err_count =
3817 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
3818 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
3819 	pipe_info->nbuf_ce_enqueue_err_count =
3820 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
3821 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
3822 
3823 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3824 
3825 	return QDF_STATUS_SUCCESS;
3826 }
3827 
3828 #ifdef FEATURE_DIRECT_LINK
3829 static QDF_STATUS
3830 hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
3831 					  int pipe_num)
3832 {
3833 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
3834 	struct service_to_pipe *tgt_svc_cfg;
3835 	struct HIF_CE_pipe_info *pipe_info;
3836 	int32_t recv_bufs_needed;
3837 	qdf_dma_addr_t dma_addr;
3838 	uint16_t num_elem_per_page;
3839 	uint16_t i;
3840 	bool is_found = false;
3841 
3842 	tgt_svc_cfg = hif_ce_state->tgt_svc_map;
3843 
3844 	for (i = 0; i < hif_ce_state->sz_tgt_svc_map; i++) {
3845 		if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC ||
3846 		    tgt_svc_cfg[i].pipedir != PIPEDIR_IN ||
3847 		    tgt_svc_cfg[i].pipenum != pipe_num)
3848 			continue;
3849 
3850 		pipe_info = &hif_ce_state->pipe_info[pipe_num];
3851 		recv_bufs_needed = atomic_read(&pipe_info->recv_bufs_needed);
3852 
3853 		if (!pipe_info->buf_sz || !recv_bufs_needed)
3854 			continue;
3855 
3856 		is_found = true;
3857 		break;
3858 	}
3859 
3860 	if (!is_found)
3861 		return QDF_STATUS_E_NOSUPPORT;
3862 
3863 	scn->dl_recv_pipe_num = pipe_num;
3864 
3865 	hif_prealloc_get_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE,
3866 				     pipe_info->buf_sz, recv_bufs_needed,
3867 				     &scn->dl_recv_pages, false);
3868 	if (!scn->dl_recv_pages.num_pages)
3869 		return QDF_STATUS_E_NOMEM;
3870 
3871 	num_elem_per_page = scn->dl_recv_pages.num_element_per_page;
3872 	for (i = 0; i < recv_bufs_needed; i++) {
3873 		dma_addr = scn->dl_recv_pages.dma_pages[i / num_elem_per_page].page_p_addr;
3874 		dma_addr += (i % num_elem_per_page) * pipe_info->buf_sz;
3875 		ce_recv_buf_enqueue(pipe_info->ce_hdl, NULL, dma_addr);
3876 	}
3877 
3878 	return QDF_STATUS_SUCCESS;
3879 }
3880 
3881 static QDF_STATUS
3882 hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
3883 					 int pipe_num)
3884 {
3885 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
3886 
3887 	if (pipe_num != scn->dl_recv_pipe_num)
3888 		return QDF_STATUS_E_NOSUPPORT;
3889 
3890 	hif_prealloc_put_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE,
3891 				     &scn->dl_recv_pages, false);
3892 
3893 	return QDF_STATUS_SUCCESS;
3894 }
3895 #else
3896 static inline QDF_STATUS
3897 hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
3898 					  int pipe_num)
3899 {
3900 	return QDF_STATUS_E_NOSUPPORT;
3901 }
3902 
3903 static inline QDF_STATUS
3904 hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
3905 					 int pipe_num)
3906 {
3907 	return QDF_STATUS_E_NOSUPPORT;
3908 }
3909 #endif
3910 
3911 /*
3912  * Try to post all desired receive buffers for all pipes.
3913  * Returns 0 for non fastpath rx copy engine as
3914  * oom_allocation_work will be scheduled to recover any
3915  * failures, non-zero if unable to completely replenish
3916  * receive buffers for fastpath rx Copy engine.
3917  */
3918 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
3919 {
3920 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3921 	int pipe_num;
3922 	struct CE_state *ce_state = NULL;
3923 	QDF_STATUS qdf_status;
3924 
3925 	A_TARGET_ACCESS_LIKELY(scn);
3926 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3927 		struct HIF_CE_pipe_info *pipe_info;
3928 
3929 		if (pipe_num >= CE_COUNT_MAX) {
3930 			A_TARGET_ACCESS_UNLIKELY(scn);
3931 			return QDF_STATUS_E_INVAL;
3932 		}
3933 
3934 		ce_state = scn->ce_id_to_state[pipe_num];
3935 		pipe_info = &hif_state->pipe_info[pipe_num];
3936 
3937 		if (!ce_state)
3938 			continue;
3939 
3940 		/* Do not init dynamic CEs, during initial load */
3941 		if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)
3942 			continue;
3943 
3944 		if (hif_is_nss_wifi_enabled(scn) &&
3945 		    ce_state && (ce_state->htt_rx_data))
3946 			continue;
3947 
3948 		qdf_status =
3949 			hif_alloc_pages_for_direct_link_recv_pipe(hif_state,
3950 								  pipe_num);
3951 		if (QDF_IS_STATUS_SUCCESS(qdf_status))
3952 			continue;
3953 
3954 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
3955 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
3956 			ce_state->htt_rx_data &&
3957 			scn->fastpath_mode_on) {
3958 			A_TARGET_ACCESS_UNLIKELY(scn);
3959 			return qdf_status;
3960 		}
3961 	}
3962 
3963 	A_TARGET_ACCESS_UNLIKELY(scn);
3964 
3965 	return QDF_STATUS_SUCCESS;
3966 }
3967 
3968 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
3969 {
3970 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3971 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3972 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
3973 
3974 	hif_update_fastpath_recv_bufs_cnt(scn);
3975 
3976 	hif_msg_callbacks_install(scn);
3977 
3978 	if (hif_completion_thread_startup(hif_state))
3979 		return QDF_STATUS_E_FAILURE;
3980 
3981 	hif_ce_rx_wbuff_register(scn);
3982 
3983 	/* enable buffer cleanup */
3984 	hif_state->started = true;
3985 
3986 	/* Post buffers once to start things off. */
3987 	qdf_status = hif_post_recv_buffers(scn);
3988 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3989 		/* cleanup is done in hif_ce_disable */
3990 		hif_err("Failed to post buffers");
3991 		return qdf_status;
3992 	}
3993 
3994 	return qdf_status;
3995 }
3996 
3997 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3998 {
3999 	struct hif_softc *scn;
4000 	struct CE_handle *ce_hdl;
4001 	uint32_t buf_sz;
4002 	struct HIF_CE_state *hif_state;
4003 	qdf_nbuf_t netbuf;
4004 	qdf_dma_addr_t CE_data;
4005 	void *per_CE_context;
4006 	QDF_STATUS status;
4007 
4008 	buf_sz = pipe_info->buf_sz;
4009 	/* Unused Copy Engine */
4010 	if (buf_sz == 0)
4011 		return;
4012 
4013 
4014 	hif_state = pipe_info->HIF_CE_state;
4015 	if (!hif_state->started)
4016 		return;
4017 
4018 	scn = HIF_GET_SOFTC(hif_state);
4019 	ce_hdl = pipe_info->ce_hdl;
4020 
4021 	if (!scn->qdf_dev)
4022 		return;
4023 
4024 	status = hif_free_pages_for_direct_link_recv_pipe(hif_state,
4025 							  pipe_info->pipe_num);
4026 	if (QDF_IS_STATUS_SUCCESS(status))
4027 		return;
4028 
4029 	while (ce_revoke_recv_next
4030 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
4031 			&CE_data) == QDF_STATUS_SUCCESS) {
4032 		if (netbuf) {
4033 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
4034 					      QDF_DMA_FROM_DEVICE);
4035 			hif_ce_rx_nbuf_free(netbuf);
4036 		}
4037 	}
4038 }
4039 
4040 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
4041 {
4042 	struct CE_handle *ce_hdl;
4043 	struct HIF_CE_state *hif_state;
4044 	struct hif_softc *scn;
4045 	qdf_nbuf_t netbuf;
4046 	void *per_CE_context;
4047 	qdf_dma_addr_t CE_data;
4048 	unsigned int nbytes;
4049 	unsigned int id;
4050 	uint32_t buf_sz;
4051 	uint32_t toeplitz_hash_result;
4052 
4053 	buf_sz = pipe_info->buf_sz;
4054 	if (buf_sz == 0) {
4055 		/* Unused Copy Engine */
4056 		return;
4057 	}
4058 
4059 	hif_state = pipe_info->HIF_CE_state;
4060 	if (!hif_state->started) {
4061 		return;
4062 	}
4063 
4064 	scn = HIF_GET_SOFTC(hif_state);
4065 
4066 	ce_hdl = pipe_info->ce_hdl;
4067 
4068 	while (ce_cancel_send_next
4069 		       (ce_hdl, &per_CE_context,
4070 		       (void **)&netbuf, &CE_data, &nbytes,
4071 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
4072 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
4073 			/*
4074 			 * Packets enqueued by htt_h2t_ver_req_msg() and
4075 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
4076 			 * freed in htt_htc_misc_pkt_pool_free() in
4077 			 * wlantl_close(), so do not free them here again
4078 			 * by checking whether it's the endpoint
4079 			 * which they are queued in.
4080 			 */
4081 			if (id == scn->htc_htt_tx_endpoint)
4082 				return;
4083 			/* Indicate the completion to higher
4084 			 * layer to free the buffer
4085 			 */
4086 			if (pipe_info->pipe_callbacks.txCompletionHandler)
4087 				pipe_info->pipe_callbacks.
4088 				    txCompletionHandler(pipe_info->
4089 					    pipe_callbacks.Context,
4090 					    netbuf, id, toeplitz_hash_result);
4091 		}
4092 	}
4093 }
4094 
4095 /*
4096  * Cleanup residual buffers for device shutdown:
4097  *    buffers that were enqueued for receive
4098  *    buffers that were to be sent
4099  * Note: Buffers that had completed but which were
4100  * not yet processed are on a completion queue. They
4101  * are handled when the completion thread shuts down.
4102  */
4103 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
4104 {
4105 	int pipe_num;
4106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
4107 	struct CE_state *ce_state;
4108 
4109 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4110 		struct HIF_CE_pipe_info *pipe_info;
4111 
4112 		ce_state = scn->ce_id_to_state[pipe_num];
4113 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
4114 				((ce_state->htt_tx_data) ||
4115 				 (ce_state->htt_rx_data))) {
4116 			continue;
4117 		}
4118 
4119 		pipe_info = &hif_state->pipe_info[pipe_num];
4120 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
4121 		hif_send_buffer_cleanup_on_pipe(pipe_info);
4122 	}
4123 }
4124 
4125 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
4126 {
4127 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4128 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4129 
4130 	hif_buffer_cleanup(hif_state);
4131 }
4132 
4133 static void hif_destroy_oom_work(struct hif_softc *scn)
4134 {
4135 	struct CE_state *ce_state;
4136 	int ce_id;
4137 
4138 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
4139 		ce_state = scn->ce_id_to_state[ce_id];
4140 		if (ce_state)
4141 			qdf_destroy_work(scn->qdf_dev,
4142 					 &ce_state->oom_allocation_work);
4143 	}
4144 }
4145 
4146 void hif_ce_stop(struct hif_softc *scn)
4147 {
4148 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4149 	int pipe_num;
4150 
4151 	/*
4152 	 * before cleaning up any memory, ensure irq &
4153 	 * bottom half contexts will not be re-entered
4154 	 */
4155 	hif_disable_isr(&scn->osc);
4156 	hif_destroy_oom_work(scn);
4157 	scn->hif_init_done = false;
4158 
4159 	/*
4160 	 * At this point, asynchronous threads are stopped,
4161 	 * The Target should not DMA nor interrupt, Host code may
4162 	 * not initiate anything more.  So we just need to clean
4163 	 * up Host-side state.
4164 	 */
4165 
4166 	if (scn->athdiag_procfs_inited) {
4167 		athdiag_procfs_remove();
4168 		scn->athdiag_procfs_inited = false;
4169 	}
4170 
4171 	hif_buffer_cleanup(hif_state);
4172 	hif_ce_rx_wbuff_deregister(scn);
4173 
4174 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4175 		struct HIF_CE_pipe_info *pipe_info;
4176 		struct CE_attr attr;
4177 		struct CE_handle *ce_diag = hif_state->ce_diag;
4178 
4179 		pipe_info = &hif_state->pipe_info[pipe_num];
4180 		if (pipe_info->ce_hdl) {
4181 			if (pipe_info->ce_hdl != ce_diag &&
4182 			    hif_state->started) {
4183 				attr = hif_state->host_ce_config[pipe_num];
4184 				if (attr.src_nentries)
4185 					qdf_spinlock_destroy(&pipe_info->
4186 							completion_freeq_lock);
4187 			}
4188 			ce_fini(pipe_info->ce_hdl);
4189 			pipe_info->ce_hdl = NULL;
4190 			pipe_info->buf_sz = 0;
4191 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
4192 		}
4193 	}
4194 
4195 	if (hif_state->sleep_timer_init) {
4196 		qdf_timer_stop(&hif_state->sleep_timer);
4197 		qdf_timer_free(&hif_state->sleep_timer);
4198 		hif_state->sleep_timer_init = false;
4199 	}
4200 
4201 	hif_state->started = false;
4202 }
4203 
4204 #ifdef CONFIG_SHADOW_V3
4205 void hif_preare_shadow_register_cfg_v3(struct hif_softc *scn)
4206 {
4207 	int shadow_cfg_idx = scn->num_shadow_registers_configured;
4208 	int i;
4209 
4210 	/* shadow reg config for CE SRC registers */
4211 	for (i = 0; i < scn->ce_count; i++) {
4212 		scn->shadow_regs[shadow_cfg_idx].addr =
4213 				CE_BASE_ADDRESS(i) + SR_WR_INDEX_ADDRESS;
4214 		shadow_cfg_idx++;
4215 	}
4216 
4217 	/* shadow reg config for CE DST registers */
4218 	for (i = 0; i < scn->ce_count; i++) {
4219 		scn->shadow_regs[shadow_cfg_idx].addr =
4220 				CE_BASE_ADDRESS(i) + DST_WR_INDEX_ADDRESS;
4221 		shadow_cfg_idx++;
4222 	}
4223 
4224 	scn->num_shadow_registers_configured = shadow_cfg_idx;
4225 }
4226 
4227 void hif_get_shadow_reg_config_v3(struct hif_softc *scn,
4228 				  struct pld_shadow_reg_v3_cfg **shadow_config,
4229 				  int *num_shadow_registers_configured)
4230 {
4231 	*shadow_config = scn->shadow_regs;
4232 	*num_shadow_registers_configured =
4233 				scn->num_shadow_registers_configured;
4234 }
4235 #endif
4236 
4237 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
4238 				   struct shadow_reg_cfg
4239 				   **target_shadow_reg_cfg_ret,
4240 				   uint32_t *shadow_cfg_sz_ret)
4241 {
4242 	if (target_shadow_reg_cfg_ret)
4243 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
4244 	if (shadow_cfg_sz_ret)
4245 		*shadow_cfg_sz_ret = shadow_cfg_sz;
4246 }
4247 
4248 /**
4249  * hif_get_target_ce_config() - get copy engine configuration
4250  * @scn: HIF context
4251  * @target_ce_config_ret: basic copy engine configuration
4252  * @target_ce_config_sz_ret: size of the basic configuration in bytes
4253  * @target_service_to_ce_map_ret: service mapping for the copy engines
4254  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
4255  * @target_shadow_reg_cfg_ret: shadow register configuration
4256  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
4257  *
4258  * providing accessor to these values outside of this file.
4259  * currently these are stored in static pointers to const sections.
4260  * there are multiple configurations that are selected from at compile time.
4261  * Runtime selection would need to consider mode, target type and bus type.
4262  *
4263  * Return: return by parameter.
4264  */
4265 void hif_get_target_ce_config(struct hif_softc *scn,
4266 		struct CE_pipe_config **target_ce_config_ret,
4267 		uint32_t *target_ce_config_sz_ret,
4268 		struct service_to_pipe **target_service_to_ce_map_ret,
4269 		uint32_t *target_service_to_ce_map_sz_ret,
4270 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
4271 		uint32_t *shadow_cfg_sz_ret)
4272 {
4273 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4274 
4275 	*target_ce_config_ret = hif_state->target_ce_config;
4276 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
4277 
4278 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
4279 				       target_service_to_ce_map_sz_ret);
4280 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
4281 			       shadow_cfg_sz_ret);
4282 }
4283 
4284 #ifdef CONFIG_SHADOW_V3
4285 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
4286 {
4287 	int i;
4288 
4289 	hif_info("v3: num_config %d", cfg->num_shadow_reg_v3_cfg);
4290 	for (i = 0; i < cfg->num_shadow_reg_v3_cfg; i++)
4291 		hif_info("i %d, val %x", i, cfg->shadow_reg_v3_cfg[i].addr);
4292 }
4293 
4294 #elif defined(CONFIG_SHADOW_V2)
4295 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
4296 {
4297 	int i;
4298 
4299 	hif_info("v2: num_config %d", cfg->num_shadow_reg_v2_cfg);
4300 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++)
4301 		hif_info("i %d, val %x", i, cfg->shadow_reg_v2_cfg[i].addr);
4302 }
4303 
4304 #else
4305 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
4306 {
4307 	hif_info("CONFIG_SHADOW V2/V3 not defined");
4308 }
4309 #endif
4310 
4311 #ifdef ADRASTEA_RRI_ON_DDR
4312 /**
4313  * hif_get_src_ring_read_index(): Called to get the SRRI
4314  *
4315  * @scn: hif_softc pointer
4316  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
4317  *
4318  * This function returns the SRRI to the caller. For CEs that
4319  * dont have interrupts enabled, we look at the DDR based SRRI
4320  *
4321  * Return: SRRI
4322  */
4323 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
4324 		uint32_t CE_ctrl_addr)
4325 {
4326 	struct CE_attr attr;
4327 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4328 
4329 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
4330 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
4331 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
4332 	} else {
4333 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
4334 			return A_TARGET_READ(scn,
4335 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
4336 		else
4337 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
4338 					CE_ctrl_addr);
4339 	}
4340 }
4341 
4342 /**
4343  * hif_get_dst_ring_read_index(): Called to get the DRRI
4344  *
4345  * @scn: hif_softc pointer
4346  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
4347  *
4348  * This function returns the DRRI to the caller. For CEs that
4349  * dont have interrupts enabled, we look at the DDR based DRRI
4350  *
4351  * Return: DRRI
4352  */
4353 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
4354 		uint32_t CE_ctrl_addr)
4355 {
4356 	struct CE_attr attr;
4357 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4358 
4359 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
4360 
4361 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
4362 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
4363 	} else {
4364 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
4365 			return A_TARGET_READ(scn,
4366 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
4367 		else
4368 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
4369 					CE_ctrl_addr);
4370 	}
4371 }
4372 
4373 /**
4374  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
4375  * @scn: hif_softc pointer
4376  *
4377  * Return: qdf status
4378  */
4379 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
4380 {
4381 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
4382 
4383 	scn->vaddr_rri_on_ddr =
4384 		(void *)qdf_mem_alloc_consistent(scn->qdf_dev,
4385 		scn->qdf_dev->dev, RRI_ON_DDR_MEM_SIZE,
4386 		&paddr_rri_on_ddr);
4387 
4388 	if (!scn->vaddr_rri_on_ddr) {
4389 		hif_err("dmaable page alloc fail");
4390 		return QDF_STATUS_E_NOMEM;
4391 	}
4392 
4393 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
4394 
4395 	qdf_mem_zero(scn->vaddr_rri_on_ddr, RRI_ON_DDR_MEM_SIZE);
4396 
4397 	return QDF_STATUS_SUCCESS;
4398 }
4399 #endif
4400 
4401 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
4402 /**
4403  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
4404  *
4405  * @scn: hif_softc pointer
4406  *
4407  * This function allocates non cached memory on ddr and sends
4408  * the physical address of this memory to the CE hardware. The
4409  * hardware updates the RRI on this particular location.
4410  *
4411  * Return: None
4412  */
4413 #ifdef QCA_WIFI_WCN6450
4414 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
4415 {
4416 	unsigned int i;
4417 	uint32_t high_paddr, low_paddr;
4418 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4419 	struct CE_attr *attr;
4420 
4421 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
4422 		return;
4423 
4424 	low_paddr  = RRI_ON_DDR_PADDR_LOW(scn->paddr_rri_on_ddr);
4425 	high_paddr = RRI_ON_DDR_PADDR_HIGH(scn->paddr_rri_on_ddr);
4426 
4427 	hif_debug("using srri and drri from DDR");
4428 
4429 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
4430 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
4431 
4432 	for (i = 0; i < CE_COUNT; i++) {
4433 		attr = &hif_state->host_ce_config[i];
4434 		if (attr->src_nentries || attr->dest_nentries)
4435 			CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
4436 	}
4437 }
4438 #else
4439 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
4440 {
4441 	unsigned int i;
4442 	uint32_t high_paddr, low_paddr;
4443 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4444 	struct CE_pipe_config *ce_config;
4445 
4446 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
4447 		return;
4448 
4449 	low_paddr  = RRI_ON_DDR_PADDR_LOW(scn->paddr_rri_on_ddr);
4450 	high_paddr = RRI_ON_DDR_PADDR_HIGH(scn->paddr_rri_on_ddr);
4451 
4452 	hif_debug("using srri and drri from DDR");
4453 
4454 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
4455 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
4456 
4457 	for (i = 0; i < CE_COUNT; i++) {
4458 		ce_config = &hif_state->target_ce_config[i];
4459 		/*
4460 		 * For DST channel program both IDX_UPD_EN and
4461 		 * DMAX length(behalf of F.W) at once to avoid
4462 		 * race with F.W register update.
4463 		 */
4464 		if (ce_config->pipedir == PIPEDIR_IN && ce_config->nbytes_max)
4465 			CE_IDX_UPD_EN_DMAX_LEN_SET(scn, CE_BASE_ADDRESS(i),
4466 						   ce_config->nbytes_max);
4467 		else
4468 			CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
4469 	}
4470 }
4471 #endif
4472 
4473 #else
4474 /**
4475  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
4476  *
4477  * @scn: hif_softc pointer
4478  *
4479  * This is a dummy implementation for platforms that don't
4480  * support this functionality.
4481  *
4482  * Return: None
4483  */
4484 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
4485 {
4486 }
4487 #endif
4488 
4489 /**
4490  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
4491  *                                    QMI command
4492  * @scn: hif context
4493  * @cfg: wlan enable config
4494  *
4495  * In case of Genoa, rri_over_ddr memory configuration is passed
4496  * to firmware through QMI configure command.
4497  */
4498 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
4499 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
4500 					   struct pld_wlan_enable_cfg *cfg)
4501 {
4502 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
4503 		return;
4504 
4505 	cfg->rri_over_ddr_cfg_valid = true;
4506 	cfg->rri_over_ddr_cfg.base_addr_low =
4507 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
4508 	cfg->rri_over_ddr_cfg.base_addr_high =
4509 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
4510 }
4511 #else
4512 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
4513 					   struct pld_wlan_enable_cfg *cfg)
4514 {
4515 }
4516 #endif
4517 
4518 /**
4519  * hif_wlan_enable(): call the platform driver to enable wlan
4520  * @scn: HIF Context
4521  *
4522  * This function passes the con_mode and CE configuration to
4523  * platform driver to enable wlan.
4524  *
4525  * Return: linux error code
4526  */
4527 int hif_wlan_enable(struct hif_softc *scn)
4528 {
4529 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4530 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
4531 	struct pld_wlan_enable_cfg cfg = { 0 };
4532 	enum pld_driver_mode mode;
4533 	uint32_t con_mode = hif_get_conparam(scn);
4534 
4535 	hif_get_target_ce_config(scn,
4536 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
4537 			&cfg.num_ce_tgt_cfg,
4538 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
4539 			&cfg.num_ce_svc_pipe_cfg,
4540 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
4541 			&cfg.num_shadow_reg_cfg);
4542 
4543 	/* translate from structure size to array size */
4544 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
4545 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
4546 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
4547 
4548 	switch (tgt_info->target_type) {
4549 	case TARGET_TYPE_KIWI:
4550 	case TARGET_TYPE_MANGO:
4551 	case TARGET_TYPE_PEACH:
4552 	case TARGET_TYPE_WCN6450:
4553 		hif_prepare_hal_shadow_reg_cfg_v3(scn, &cfg);
4554 		break;
4555 	default:
4556 		hif_prepare_hal_shadow_register_cfg(scn,
4557 						    &cfg.shadow_reg_v2_cfg,
4558 						    &cfg.num_shadow_reg_v2_cfg);
4559 		break;
4560 	}
4561 
4562 	hif_print_hal_shadow_register_cfg(&cfg);
4563 
4564 	hif_update_rri_over_ddr_config(scn, &cfg);
4565 
4566 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4567 		mode = PLD_FTM;
4568 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
4569 		mode = PLD_COLDBOOT_CALIBRATION;
4570 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
4571 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
4572 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4573 		mode = PLD_EPPING;
4574 	else
4575 		mode = PLD_MISSION;
4576 
4577 	if (BYPASS_QMI)
4578 		return 0;
4579 	else
4580 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
4581 }
4582 
4583 #ifdef WLAN_FEATURE_EPPING
4584 
4585 #define CE_EPPING_USES_IRQ true
4586 
4587 void hif_ce_prepare_epping_config(struct hif_softc *scn,
4588 				  struct HIF_CE_state *hif_state)
4589 {
4590 	if (CE_EPPING_USES_IRQ)
4591 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
4592 	else
4593 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
4594 	hif_state->target_ce_config = target_ce_config_wlan_epping;
4595 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
4596 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
4597 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
4598 	scn->ce_count = EPPING_HOST_CE_COUNT;
4599 }
4600 #endif
4601 
4602 #ifdef QCN7605_SUPPORT
4603 static inline
4604 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
4605 			       struct HIF_CE_state *hif_state)
4606 {
4607 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
4608 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
4609 	hif_state->target_ce_config_sz =
4610 				 sizeof(target_ce_config_wlan_qcn7605);
4611 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
4612 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
4613 	scn->ce_count = QCN7605_CE_COUNT;
4614 }
4615 #else
4616 static inline
4617 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
4618 			       struct HIF_CE_state *hif_state)
4619 {
4620 	hif_err("QCN7605 not supported");
4621 }
4622 #endif
4623 
4624 #ifdef CE_SVC_CMN_INIT
4625 #ifdef QCA_WIFI_SUPPORT_SRNG
4626 static inline void hif_ce_service_init(void)
4627 {
4628 	ce_service_srng_init();
4629 }
4630 #else
4631 static inline void hif_ce_service_init(void)
4632 {
4633 	ce_service_legacy_init();
4634 }
4635 #endif
4636 #else
4637 static inline void hif_ce_service_init(void)
4638 {
4639 }
4640 #endif
4641 
4642 #ifdef FEATURE_DIRECT_LINK
4643 /**
4644  * hif_ce_select_config_kiwi() - Select the host and target CE
4645  *  configuration for Kiwi
4646  * @hif_state: HIF CE context
4647  *
4648  * Return: None
4649  */
4650 static inline
4651 void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state)
4652 {
4653 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif_state);
4654 
4655 	if (pld_is_direct_link_supported(hif_ctx->qdf_dev->dev)) {
4656 		hif_state->host_ce_config =
4657 				host_ce_config_wlan_kiwi_direct_link;
4658 		hif_state->target_ce_config =
4659 				target_ce_config_wlan_kiwi_direct_link;
4660 		hif_state->target_ce_config_sz =
4661 				sizeof(target_ce_config_wlan_kiwi_direct_link);
4662 	} else {
4663 		hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4664 		hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4665 		hif_state->target_ce_config_sz =
4666 				sizeof(target_ce_config_wlan_kiwi);
4667 	}
4668 }
4669 #else
4670 static inline
4671 void hif_ce_select_config_kiwi(struct HIF_CE_state *hif_state)
4672 {
4673 	hif_state->host_ce_config = host_ce_config_wlan_kiwi;
4674 	hif_state->target_ce_config = target_ce_config_wlan_kiwi;
4675 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_kiwi);
4676 }
4677 #endif
4678 
4679 /**
4680  * hif_ce_prepare_config() - load the correct static tables.
4681  * @scn: hif context
4682  *
4683  * Epping uses different static attribute tables than mission mode.
4684  */
4685 void hif_ce_prepare_config(struct hif_softc *scn)
4686 {
4687 	uint32_t mode = hif_get_conparam(scn);
4688 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4689 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
4690 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4691 	int ret;
4692 	int msi_data_count = 0;
4693 	int msi_data_start = 0;
4694 	int msi_irq_start = 0;
4695 
4696 	hif_ce_service_init();
4697 	hif_state->ce_services = ce_services_attach(scn);
4698 
4699 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4700 					  &msi_data_count, &msi_data_start,
4701 					  &msi_irq_start);
4702 
4703 	scn->ce_count = HOST_CE_COUNT;
4704 	scn->int_assignment = &ce_int_context[msi_data_count];
4705 	scn->free_irq_done = false;
4706 	/* if epping is enabled we need to use the epping configuration. */
4707 	if (QDF_IS_EPPING_ENABLED(mode)) {
4708 		hif_ce_prepare_epping_config(scn, hif_state);
4709 		return;
4710 	}
4711 
4712 	switch (tgt_info->target_type) {
4713 	default:
4714 		hif_state->host_ce_config = host_ce_config_wlan;
4715 		hif_state->target_ce_config = target_ce_config_wlan;
4716 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
4717 		break;
4718 	case TARGET_TYPE_QCN7605:
4719 		hif_set_ce_config_qcn7605(scn, hif_state);
4720 		break;
4721 	case TARGET_TYPE_AR900B:
4722 	case TARGET_TYPE_QCA9984:
4723 	case TARGET_TYPE_QCA9888:
4724 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4725 			hif_state->host_ce_config =
4726 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
4727 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4728 			hif_state->host_ce_config =
4729 				host_lowdesc_ce_cfg_wlan_ar900b;
4730 		} else {
4731 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
4732 		}
4733 
4734 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
4735 		hif_state->target_ce_config_sz =
4736 				sizeof(target_ce_config_wlan_ar900b);
4737 
4738 		break;
4739 
4740 	case TARGET_TYPE_AR9888:
4741 	case TARGET_TYPE_AR9888V2:
4742 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
4743 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
4744 		} else {
4745 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
4746 		}
4747 
4748 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
4749 		hif_state->target_ce_config_sz =
4750 					sizeof(target_ce_config_wlan_ar9888);
4751 
4752 		break;
4753 
4754 	case TARGET_TYPE_QCA8074:
4755 	case TARGET_TYPE_QCA8074V2:
4756 	case TARGET_TYPE_QCA6018:
4757 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
4758 			hif_state->host_ce_config =
4759 					host_ce_config_wlan_qca8074_pci;
4760 			hif_state->target_ce_config =
4761 				target_ce_config_wlan_qca8074_pci;
4762 			hif_state->target_ce_config_sz =
4763 				sizeof(target_ce_config_wlan_qca8074_pci);
4764 		} else {
4765 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
4766 			hif_state->target_ce_config =
4767 					target_ce_config_wlan_qca8074;
4768 			hif_state->target_ce_config_sz =
4769 				sizeof(target_ce_config_wlan_qca8074);
4770 		}
4771 		break;
4772 	case TARGET_TYPE_QCA6290:
4773 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
4774 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
4775 		hif_state->target_ce_config_sz =
4776 					sizeof(target_ce_config_wlan_qca6290);
4777 
4778 		scn->ce_count = QCA_6290_CE_COUNT;
4779 		break;
4780 	case TARGET_TYPE_QCN9000:
4781 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
4782 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
4783 		hif_state->target_ce_config_sz =
4784 					sizeof(target_ce_config_wlan_qcn9000);
4785 		scn->ce_count = QCN_9000_CE_COUNT;
4786 		scn->ini_cfg.disable_wake_irq = 1;
4787 		break;
4788 	case TARGET_TYPE_QCN9224:
4789 		hif_set_ce_config_qcn9224(scn, hif_state);
4790 		break;
4791 	case TARGET_TYPE_QCA5332:
4792 		hif_state->host_ce_config = host_ce_config_wlan_qca5332;
4793 		hif_state->target_ce_config = target_ce_config_wlan_qca5332;
4794 		hif_state->target_ce_config_sz =
4795 					 sizeof(target_ce_config_wlan_qca5332);
4796 		scn->ce_count = QCA_5332_CE_COUNT;
4797 		break;
4798 	case TARGET_TYPE_QCN6122:
4799 		hif_state->host_ce_config = host_ce_config_wlan_qcn6122;
4800 		hif_state->target_ce_config = target_ce_config_wlan_qcn6122;
4801 		hif_state->target_ce_config_sz =
4802 					sizeof(target_ce_config_wlan_qcn6122);
4803 		scn->ce_count = QCN_6122_CE_COUNT;
4804 		scn->ini_cfg.disable_wake_irq = 1;
4805 		break;
4806 	case TARGET_TYPE_QCN9160:
4807 		hif_state->host_ce_config = host_ce_config_wlan_qcn9160;
4808 		hif_state->target_ce_config = target_ce_config_wlan_qcn9160;
4809 		hif_state->target_ce_config_sz =
4810 					sizeof(target_ce_config_wlan_qcn9160);
4811 		scn->ce_count = QCN_9160_CE_COUNT;
4812 		scn->ini_cfg.disable_wake_irq = 1;
4813 		break;
4814 	case TARGET_TYPE_QCN6432:
4815 		hif_state->host_ce_config = host_ce_config_wlan_qcn6432;
4816 		hif_state->target_ce_config = target_ce_config_wlan_qcn6432;
4817 		hif_state->target_ce_config_sz =
4818 					sizeof(target_ce_config_wlan_qcn6432);
4819 		scn->ce_count = QCN_6432_CE_COUNT;
4820 		scn->ini_cfg.disable_wake_irq = 1;
4821 		break;
4822 	case TARGET_TYPE_QCA5018:
4823 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
4824 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
4825 		hif_state->target_ce_config_sz =
4826 					sizeof(target_ce_config_wlan_qca5018);
4827 		scn->ce_count = QCA_5018_CE_COUNT;
4828 		break;
4829 	case TARGET_TYPE_QCA9574:
4830 		hif_state->host_ce_config = host_ce_config_wlan_qca9574;
4831 		hif_state->target_ce_config = target_ce_config_wlan_qca9574;
4832 		hif_state->target_ce_config_sz =
4833 					sizeof(target_ce_config_wlan_qca9574);
4834 		break;
4835 	case TARGET_TYPE_QCA6390:
4836 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
4837 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
4838 		hif_state->target_ce_config_sz =
4839 					sizeof(target_ce_config_wlan_qca6390);
4840 
4841 		scn->ce_count = QCA_6390_CE_COUNT;
4842 		break;
4843 	case TARGET_TYPE_QCA6490:
4844 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
4845 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
4846 		hif_state->target_ce_config_sz =
4847 					sizeof(target_ce_config_wlan_qca6490);
4848 
4849 		scn->ce_count = QCA_6490_CE_COUNT;
4850 		break;
4851 	case TARGET_TYPE_QCA6750:
4852 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
4853 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
4854 		hif_state->target_ce_config_sz =
4855 					sizeof(target_ce_config_wlan_qca6750);
4856 
4857 		scn->ce_count = QCA_6750_CE_COUNT;
4858 		break;
4859 	case TARGET_TYPE_KIWI:
4860 	case TARGET_TYPE_MANGO:
4861 	case TARGET_TYPE_PEACH:
4862 		hif_ce_select_config_kiwi(hif_state);
4863 		scn->ce_count = KIWI_CE_COUNT;
4864 		break;
4865 	case TARGET_TYPE_ADRASTEA:
4866 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
4867 			hif_state->host_ce_config =
4868 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
4869 			hif_state->target_ce_config =
4870 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
4871 			hif_state->target_ce_config_sz =
4872 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
4873 		} else {
4874 			hif_state->host_ce_config =
4875 				host_ce_config_wlan_adrastea;
4876 			hif_state->target_ce_config =
4877 					target_ce_config_wlan_adrastea;
4878 			hif_state->target_ce_config_sz =
4879 					sizeof(target_ce_config_wlan_adrastea);
4880 		}
4881 		break;
4882 	case TARGET_TYPE_WCN6450:
4883 		hif_state->host_ce_config = host_ce_config_wlan_wcn6450;
4884 		hif_state->target_ce_config = target_ce_config_wlan_wcn6450;
4885 		hif_state->target_ce_config_sz =
4886 				sizeof(target_ce_config_wlan_wcn6450);
4887 		break;
4888 	}
4889 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
4890 }
4891 
4892 /**
4893  * hif_ce_open() - do ce specific allocations
4894  * @hif_sc: pointer to hif context
4895  *
4896  * return: 0 for success or QDF_STATUS_E_NOMEM
4897  */
4898 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
4899 {
4900 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4901 
4902 	qdf_spinlock_create(&hif_state->irq_reg_lock);
4903 	qdf_spinlock_create(&hif_state->keep_awake_lock);
4904 	return QDF_STATUS_SUCCESS;
4905 }
4906 
4907 /**
4908  * hif_ce_close() - do ce specific free
4909  * @hif_sc: pointer to hif context
4910  */
4911 void hif_ce_close(struct hif_softc *hif_sc)
4912 {
4913 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4914 
4915 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
4916 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
4917 }
4918 
4919 /**
4920  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
4921  * @hif_sc: hif context
4922  *
4923  * uses state variables to support cleaning up when hif_config_ce fails.
4924  */
4925 void hif_unconfig_ce(struct hif_softc *hif_sc)
4926 {
4927 	int pipe_num;
4928 	struct HIF_CE_pipe_info *pipe_info;
4929 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
4930 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
4931 
4932 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4933 		pipe_info = &hif_state->pipe_info[pipe_num];
4934 		if (pipe_info->ce_hdl) {
4935 			ce_unregister_irq(hif_state, (1 << pipe_num));
4936 		}
4937 	}
4938 	deinit_tasklet_workers(hif_hdl);
4939 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
4940 		pipe_info = &hif_state->pipe_info[pipe_num];
4941 		if (pipe_info->ce_hdl) {
4942 			ce_fini(pipe_info->ce_hdl);
4943 			pipe_info->ce_hdl = NULL;
4944 			pipe_info->buf_sz = 0;
4945 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
4946 		}
4947 	}
4948 	if (hif_sc->athdiag_procfs_inited) {
4949 		athdiag_procfs_remove();
4950 		hif_sc->athdiag_procfs_inited = false;
4951 	}
4952 }
4953 
4954 #ifdef CONFIG_BYPASS_QMI
4955 #ifdef QCN7605_SUPPORT
4956 /**
4957  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4958  * @scn: pointer to HIF structure
4959  *
4960  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4961  *
4962  * Return: void
4963  */
4964 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4965 {
4966 	phys_addr_t target_pa;
4967 	struct ce_info *ce_info_ptr;
4968 	uint32_t msi_data_start;
4969 	uint32_t msi_data_count;
4970 	uint32_t msi_irq_start;
4971 	uint32_t i = 0;
4972 	int ret;
4973 
4974 	scn->vaddr_qmi_bypass =
4975 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4976 							     scn->qdf_dev->dev,
4977 							     FW_SHARED_MEM,
4978 							     &target_pa);
4979 	if (!scn->vaddr_qmi_bypass) {
4980 		hif_err("Memory allocation failed could not post target buf");
4981 		return;
4982 	}
4983 
4984 	scn->paddr_qmi_bypass = target_pa;
4985 
4986 	ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass;
4987 
4988 	if (scn->vaddr_rri_on_ddr) {
4989 		ce_info_ptr->rri_over_ddr_low_paddr  =
4990 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
4991 		ce_info_ptr->rri_over_ddr_high_paddr =
4992 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
4993 	}
4994 
4995 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
4996 					  &msi_data_count, &msi_data_start,
4997 					  &msi_irq_start);
4998 	if (ret) {
4999 		hif_err("Failed to get CE msi config");
5000 		return;
5001 	}
5002 
5003 	for (i = 0; i < CE_COUNT_MAX; i++) {
5004 		ce_info_ptr->cfg[i].ce_id = i;
5005 		ce_info_ptr->cfg[i].msi_vector =
5006 			 (i % msi_data_count) + msi_irq_start;
5007 	}
5008 
5009 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
5010 	hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass,
5011 		 &target_pa);
5012 }
5013 
5014 /**
5015  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
5016  * @scn: pointer to HIF structure
5017  *
5018  *
5019  * Return: void
5020  */
5021 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
5022 {
5023 	void *target_va = scn->vaddr_qmi_bypass;
5024 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
5025 
5026 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
5027 				FW_SHARED_MEM, target_va,
5028 				target_pa, 0);
5029 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
5030 }
5031 #else
5032 /**
5033  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
5034  * @scn: pointer to HIF structure
5035  *
5036  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
5037  *
5038  * Return: void
5039  */
5040 static void hif_post_static_buf_to_target(struct hif_softc *scn)
5041 {
5042 	qdf_dma_addr_t target_pa;
5043 
5044 	scn->vaddr_qmi_bypass =
5045 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
5046 							     scn->qdf_dev->dev,
5047 							     FW_SHARED_MEM,
5048 							     &target_pa);
5049 	if (!scn->vaddr_qmi_bypass) {
5050 		hif_err("Memory allocation failed could not post target buf");
5051 		return;
5052 	}
5053 
5054 	scn->paddr_qmi_bypass = target_pa;
5055 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
5056 }
5057 
5058 /**
5059  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
5060  * @scn: pointer to HIF structure
5061  *
5062  *
5063  * Return: void
5064  */
5065 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
5066 {
5067 	void *target_va = scn->vaddr_qmi_bypass;
5068 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
5069 
5070 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
5071 				FW_SHARED_MEM, target_va,
5072 				target_pa, 0);
5073 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
5074 }
5075 #endif
5076 
5077 #else
5078 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
5079 {
5080 }
5081 
5082 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
5083 {
5084 }
5085 #endif
5086 
5087 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
5088 				bool wait_for_it)
5089 {
5090 	/* todo */
5091 	return 0;
5092 }
5093 
5094 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num)
5095 {
5096 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5097 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
5098 	struct HIF_CE_pipe_info *pipe_info;
5099 	struct CE_state *ce_state = NULL;
5100 	struct CE_attr *attr;
5101 	int rv = 0;
5102 
5103 	if (pipe_num >= CE_COUNT_MAX)
5104 		return -EINVAL;
5105 
5106 	pipe_info = &hif_state->pipe_info[pipe_num];
5107 	pipe_info->pipe_num = pipe_num;
5108 	pipe_info->HIF_CE_state = hif_state;
5109 	attr = &hif_state->host_ce_config[pipe_num];
5110 	ce_state = scn->ce_id_to_state[pipe_num];
5111 
5112 	if (ce_state) {
5113 		/* Do not reinitialize the CE if its done already */
5114 		rv = QDF_STATUS_E_BUSY;
5115 		goto err;
5116 	}
5117 
5118 	pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
5119 	ce_state = scn->ce_id_to_state[pipe_num];
5120 	if (!ce_state) {
5121 		A_TARGET_ACCESS_UNLIKELY(scn);
5122 		rv = QDF_STATUS_E_FAILURE;
5123 		goto err;
5124 	}
5125 	qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
5126 	QDF_ASSERT(pipe_info->ce_hdl);
5127 	if (!pipe_info->ce_hdl) {
5128 		rv = QDF_STATUS_E_FAILURE;
5129 		A_TARGET_ACCESS_UNLIKELY(scn);
5130 		goto err;
5131 	}
5132 
5133 	ce_state->lro_data = qdf_lro_init();
5134 
5135 	if (attr->flags & CE_ATTR_DIAG) {
5136 		/* Reserve the ultimate CE for
5137 		 * Diagnostic Window support
5138 		 */
5139 		hif_state->ce_diag = pipe_info->ce_hdl;
5140 		goto skip;
5141 	}
5142 
5143 	if (hif_is_nss_wifi_enabled(scn) && ce_state &&
5144 	    (ce_state->htt_rx_data)) {
5145 		goto skip;
5146 	}
5147 
5148 	pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max);
5149 	if (attr->dest_nentries > 0) {
5150 		atomic_set(&pipe_info->recv_bufs_needed,
5151 			   init_buffer_count(attr->dest_nentries - 1));
5152 		/*SRNG based CE has one entry less */
5153 		if (ce_srng_based(scn))
5154 			atomic_dec(&pipe_info->recv_bufs_needed);
5155 	} else {
5156 		atomic_set(&pipe_info->recv_bufs_needed, 0);
5157 	}
5158 	ce_tasklet_init(hif_state, (1 << pipe_num));
5159 	ce_register_irq(hif_state, (1 << pipe_num));
5160 
5161 	init_tasklet_worker_by_ceid(hif_hdl, pipe_num);
5162 skip:
5163 	return 0;
5164 err:
5165 	return rv;
5166 }
5167 
5168 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
5169 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
5170 {
5171 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
5172 	uint8_t ce_id, hist_idx = 0;
5173 
5174 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
5175 		if (IS_CE_DEBUG_ONLY_FOR_CRIT_CE & (1 << ce_id))
5176 			ce_hist->ce_id_hist_map[ce_id] = hist_idx++;
5177 		else
5178 			ce_hist->ce_id_hist_map[ce_id] = -1;
5179 	}
5180 }
5181 #else
5182 static inline void hif_gen_ce_id_history_idx_mapping(struct hif_softc *scn)
5183 {
5184 }
5185 #endif
5186 
5187 /**
5188  * hif_config_ce() - configure copy engines
5189  * @scn: hif context
5190  *
5191  * Prepares fw, copy engine hardware and host sw according
5192  * to the attributes selected by hif_ce_prepare_config.
5193  *
5194  * also calls athdiag_procfs_init
5195  *
5196  * return: 0 for success nonzero for failure.
5197  */
5198 int hif_config_ce(struct hif_softc *scn)
5199 {
5200 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5201 	struct HIF_CE_pipe_info *pipe_info;
5202 	int pipe_num;
5203 
5204 #ifdef ADRASTEA_SHADOW_REGISTERS
5205 	int i;
5206 #endif
5207 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
5208 
5209 	scn->notice_send = true;
5210 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
5211 
5212 	hif_post_static_buf_to_target(scn);
5213 
5214 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
5215 
5216 	hif_config_rri_on_ddr(scn);
5217 
5218 	if (ce_srng_based(scn))
5219 		scn->bus_ops.hif_target_sleep_state_adjust =
5220 			&hif_srng_sleep_state_adjust;
5221 
5222 	/* Initialise the CE debug history sysfs interface inputs ce_id and
5223 	 * index. Disable data storing
5224 	 */
5225 	reset_ce_debug_history(scn);
5226 	hif_gen_ce_id_history_idx_mapping(scn);
5227 
5228 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
5229 		struct CE_attr *attr;
5230 
5231 		pipe_info = &hif_state->pipe_info[pipe_num];
5232 		attr = &hif_state->host_ce_config[pipe_num];
5233 
5234 		if (attr->flags & CE_ATTR_INIT_ON_DEMAND)
5235 			continue;
5236 
5237 		if (hif_config_ce_by_id(scn, pipe_num))
5238 			goto err;
5239 	}
5240 
5241 	if (athdiag_procfs_init(scn) != 0) {
5242 		A_TARGET_ACCESS_UNLIKELY(scn);
5243 		goto err;
5244 	}
5245 	scn->athdiag_procfs_inited = true;
5246 
5247 	hif_debug("ce_init done");
5248 	hif_debug("X, ret = %d", rv);
5249 
5250 #ifdef ADRASTEA_SHADOW_REGISTERS
5251 	hif_debug("Using Shadow Registers instead of CE Registers");
5252 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
5253 		hif_debug("Shadow Register%d is mapped to address %x",
5254 			  i,
5255 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
5256 	}
5257 #endif
5258 
5259 	return rv != QDF_STATUS_SUCCESS;
5260 err:
5261 	/* Failure, so clean up */
5262 	hif_unconfig_ce(scn);
5263 	hif_info("X, ret = %d", rv);
5264 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
5265 }
5266 
5267 /**
5268  * hif_config_ce_pktlog() - configure copy engines
5269  * @hif_hdl: hif context
5270  *
5271  * Prepares fw, copy engine hardware and host sw according
5272  * to the attributes selected by hif_ce_prepare_config.
5273  *
5274  * also calls athdiag_procfs_init
5275  *
5276  * return: 0 for success nonzero for failure.
5277  */
5278 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl)
5279 {
5280 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
5281 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5282 	int pipe_num;
5283 	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
5284 	struct HIF_CE_pipe_info *pipe_info;
5285 
5286 	if (!scn)
5287 		goto err;
5288 
5289 	if (scn->pktlog_init)
5290 		return QDF_STATUS_SUCCESS;
5291 
5292 	pipe_num =  hif_get_pktlog_ce_num(scn);
5293 	if (pipe_num < 0) {
5294 		qdf_status = QDF_STATUS_E_FAILURE;
5295 		goto err;
5296 	}
5297 
5298 	pipe_info = &hif_state->pipe_info[pipe_num];
5299 
5300 	qdf_status = hif_config_ce_by_id(scn, pipe_num);
5301 	/* CE Already initialized. Do not try to reinitialized again */
5302 	if (qdf_status == QDF_STATUS_E_BUSY)
5303 		return QDF_STATUS_SUCCESS;
5304 
5305 	qdf_status = hif_config_irq_by_ceid(scn, pipe_num);
5306 	if (qdf_status < 0)
5307 		goto err;
5308 
5309 	qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num);
5310 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
5311 		hif_err("Failed to start hif thread");
5312 		goto err;
5313 	}
5314 
5315 	/* Post buffers for pktlog copy engine. */
5316 	qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
5317 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
5318 		/* cleanup is done in hif_ce_disable */
5319 		hif_err("Failed to post buffers");
5320 		return qdf_status;
5321 	}
5322 	scn->pktlog_init = true;
5323 	return qdf_status != QDF_STATUS_SUCCESS;
5324 
5325 err:
5326 	hif_debug("X, ret = %d", qdf_status);
5327 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
5328 }
5329 
5330 #ifdef IPA_OFFLOAD
5331 /**
5332  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
5333  * @scn: bus context
5334  * @ce_sr: copyengine source ring base physical address
5335  * @ce_sr_ring_size: copyengine source ring size
5336  * @ce_reg_paddr: copyengine register physical address
5337  *
5338  * IPA micro controller data path offload feature enabled,
5339  * HIF should release copy engine related resource information to IPA UC
5340  * IPA UC will access hardware resource with released information
5341  *
5342  * Return: None
5343  */
5344 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
5345 			     qdf_shared_mem_t **ce_sr,
5346 			     uint32_t *ce_sr_ring_size,
5347 			     qdf_dma_addr_t *ce_reg_paddr)
5348 {
5349 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5350 	struct HIF_CE_pipe_info *pipe_info =
5351 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
5352 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
5353 
5354 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
5355 			    ce_reg_paddr);
5356 }
5357 #endif /* IPA_OFFLOAD */
5358 
5359 
5360 #ifdef ADRASTEA_SHADOW_REGISTERS
5361 
5362 /*
5363  * Current shadow register config
5364  *
5365  * -----------------------------------------------------------
5366  * Shadow Register      |     CE   |    src/dst write index
5367  * -----------------------------------------------------------
5368  *         0            |     0    |           src
5369  *         1     No Config - Doesn't point to anything
5370  *         2     No Config - Doesn't point to anything
5371  *         3            |     3    |           src
5372  *         4            |     4    |           src
5373  *         5            |     5    |           src
5374  *         6     No Config - Doesn't point to anything
5375  *         7            |     7    |           src
5376  *         8     No Config - Doesn't point to anything
5377  *         9     No Config - Doesn't point to anything
5378  *         10    No Config - Doesn't point to anything
5379  *         11    No Config - Doesn't point to anything
5380  * -----------------------------------------------------------
5381  *         12    No Config - Doesn't point to anything
5382  *         13           |     1    |           dst
5383  *         14           |     2    |           dst
5384  *         15    No Config - Doesn't point to anything
5385  *         16    No Config - Doesn't point to anything
5386  *         17    No Config - Doesn't point to anything
5387  *         18    No Config - Doesn't point to anything
5388  *         19           |     7    |           dst
5389  *         20           |     8    |           dst
5390  *         21    No Config - Doesn't point to anything
5391  *         22    No Config - Doesn't point to anything
5392  *         23    No Config - Doesn't point to anything
5393  * -----------------------------------------------------------
5394  *
5395  *
5396  * ToDo - Move shadow register config to following in the future
5397  * This helps free up a block of shadow registers towards the end.
5398  * Can be used for other purposes
5399  *
5400  * -----------------------------------------------------------
5401  * Shadow Register      |     CE   |    src/dst write index
5402  * -----------------------------------------------------------
5403  *      0            |     0    |           src
5404  *      1            |     3    |           src
5405  *      2            |     4    |           src
5406  *      3            |     5    |           src
5407  *      4            |     7    |           src
5408  * -----------------------------------------------------------
5409  *      5            |     1    |           dst
5410  *      6            |     2    |           dst
5411  *      7            |     7    |           dst
5412  *      8            |     8    |           dst
5413  * -----------------------------------------------------------
5414  *      9     No Config - Doesn't point to anything
5415  *      12    No Config - Doesn't point to anything
5416  *      13    No Config - Doesn't point to anything
5417  *      14    No Config - Doesn't point to anything
5418  *      15    No Config - Doesn't point to anything
5419  *      16    No Config - Doesn't point to anything
5420  *      17    No Config - Doesn't point to anything
5421  *      18    No Config - Doesn't point to anything
5422  *      19    No Config - Doesn't point to anything
5423  *      20    No Config - Doesn't point to anything
5424  *      21    No Config - Doesn't point to anything
5425  *      22    No Config - Doesn't point to anything
5426  *      23    No Config - Doesn't point to anything
5427  * -----------------------------------------------------------
5428 */
5429 #ifndef QCN7605_SUPPORT
5430 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5431 {
5432 	u32 addr = 0;
5433 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5434 
5435 	switch (ce) {
5436 	case 0:
5437 		addr = SHADOW_VALUE0;
5438 		break;
5439 	case 3:
5440 		addr = SHADOW_VALUE3;
5441 		break;
5442 	case 4:
5443 		addr = SHADOW_VALUE4;
5444 		break;
5445 	case 5:
5446 		addr = SHADOW_VALUE5;
5447 		break;
5448 	case 7:
5449 		addr = SHADOW_VALUE7;
5450 		break;
5451 	default:
5452 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5453 		QDF_ASSERT(0);
5454 	}
5455 	return addr;
5456 
5457 }
5458 
5459 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5460 {
5461 	u32 addr = 0;
5462 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5463 
5464 	switch (ce) {
5465 	case 1:
5466 		addr = SHADOW_VALUE13;
5467 		break;
5468 	case 2:
5469 		addr = SHADOW_VALUE14;
5470 		break;
5471 	case 5:
5472 		addr = SHADOW_VALUE17;
5473 		break;
5474 	case 7:
5475 		addr = SHADOW_VALUE19;
5476 		break;
5477 	case 8:
5478 		addr = SHADOW_VALUE20;
5479 		break;
5480 	case 9:
5481 		addr = SHADOW_VALUE21;
5482 		break;
5483 	case 10:
5484 		addr = SHADOW_VALUE22;
5485 		break;
5486 	case 11:
5487 		addr = SHADOW_VALUE23;
5488 		break;
5489 	default:
5490 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5491 		QDF_ASSERT(0);
5492 	}
5493 
5494 	return addr;
5495 
5496 }
5497 #else
5498 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5499 {
5500 	u32 addr = 0;
5501 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5502 
5503 	switch (ce) {
5504 	case 0:
5505 		addr = SHADOW_VALUE0;
5506 		break;
5507 	case 3:
5508 		addr = SHADOW_VALUE3;
5509 		break;
5510 	case 4:
5511 		addr = SHADOW_VALUE4;
5512 		break;
5513 	case 5:
5514 		addr = SHADOW_VALUE5;
5515 		break;
5516 	default:
5517 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5518 		QDF_ASSERT(0);
5519 	}
5520 	return addr;
5521 }
5522 
5523 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
5524 {
5525 	u32 addr = 0;
5526 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
5527 
5528 	switch (ce) {
5529 	case 1:
5530 		addr = SHADOW_VALUE13;
5531 		break;
5532 	case 2:
5533 		addr = SHADOW_VALUE14;
5534 		break;
5535 	case 3:
5536 		addr = SHADOW_VALUE15;
5537 		break;
5538 	case 5:
5539 		addr = SHADOW_VALUE17;
5540 		break;
5541 	case 7:
5542 		addr = SHADOW_VALUE19;
5543 		break;
5544 	case 8:
5545 		addr = SHADOW_VALUE20;
5546 		break;
5547 	case 9:
5548 		addr = SHADOW_VALUE21;
5549 		break;
5550 	case 10:
5551 		addr = SHADOW_VALUE22;
5552 		break;
5553 	case 11:
5554 		addr = SHADOW_VALUE23;
5555 		break;
5556 	default:
5557 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
5558 		QDF_ASSERT(0);
5559 	}
5560 
5561 	return addr;
5562 }
5563 #endif
5564 #endif
5565 
5566 #if defined(FEATURE_LRO)
5567 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
5568 {
5569 	struct CE_state *ce_state;
5570 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
5571 
5572 	ce_state = scn->ce_id_to_state[ctx_id];
5573 
5574 	return ce_state->lro_data;
5575 }
5576 #endif
5577 
5578 /**
5579  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
5580  * this service
5581  * @hif_hdl: hif_softc pointer.
5582  * @svc_id: Service ID for which the mapping is needed.
5583  * @ul_pipe: address of the container in which ul pipe is returned.
5584  * @dl_pipe: address of the container in which dl pipe is returned.
5585  * @ul_is_polled: address of the container in which a bool
5586  *			indicating if the UL CE for this service
5587  *			is polled is returned.
5588  * @dl_is_polled: address of the container in which a bool
5589  *			indicating if the DL CE for this service
5590  *			is polled is returned.
5591  *
5592  * Return: Indicates whether the service has been found in the table.
5593  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
5594  *         There will be warning logs if either leg has not been updated
5595  *         because it missed the entry in the table (but this is not an err).
5596  */
5597 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
5598 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
5599 			int *dl_is_polled)
5600 {
5601 	int status = -EINVAL;
5602 	unsigned int i;
5603 	struct service_to_pipe element;
5604 	struct service_to_pipe *tgt_svc_map_to_use;
5605 	uint32_t sz_tgt_svc_map_to_use;
5606 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
5607 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5608 	bool dl_updated = false;
5609 	bool ul_updated = false;
5610 
5611 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
5612 				       &sz_tgt_svc_map_to_use);
5613 
5614 	*dl_is_polled = 0;  /* polling for received messages not supported */
5615 
5616 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
5617 
5618 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
5619 		if (element.service_id == svc_id) {
5620 			if (element.pipedir == PIPEDIR_OUT) {
5621 				*ul_pipe = element.pipenum;
5622 				*ul_is_polled =
5623 					(hif_state->host_ce_config[*ul_pipe].flags &
5624 					 CE_ATTR_DISABLE_INTR) != 0;
5625 				ul_updated = true;
5626 			} else if (element.pipedir == PIPEDIR_IN) {
5627 				*dl_pipe = element.pipenum;
5628 				dl_updated = true;
5629 			}
5630 			status = 0;
5631 		}
5632 	}
5633 	if (ul_updated == false)
5634 		hif_debug("ul pipe is NOT updated for service %d", svc_id);
5635 	if (dl_updated == false)
5636 		hif_debug("dl pipe is NOT updated for service %d", svc_id);
5637 
5638 	return status;
5639 }
5640 
5641 #ifdef SHADOW_REG_DEBUG
5642 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
5643 		uint32_t CE_ctrl_addr)
5644 {
5645 	uint32_t read_from_hw, srri_from_ddr = 0;
5646 
5647 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
5648 
5649 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
5650 
5651 	if (read_from_hw != srri_from_ddr) {
5652 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
5653 		       srri_from_ddr, read_from_hw,
5654 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
5655 		QDF_ASSERT(0);
5656 	}
5657 	return srri_from_ddr;
5658 }
5659 
5660 
5661 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
5662 		uint32_t CE_ctrl_addr)
5663 {
5664 	uint32_t read_from_hw, drri_from_ddr = 0;
5665 
5666 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
5667 
5668 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
5669 
5670 	if (read_from_hw != drri_from_ddr) {
5671 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
5672 		       drri_from_ddr, read_from_hw,
5673 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
5674 		QDF_ASSERT(0);
5675 	}
5676 	return drri_from_ddr;
5677 }
5678 
5679 #endif
5680 
5681 /**
5682  * hif_dump_ce_registers() - dump ce registers
5683  * @scn: hif_opaque_softc pointer.
5684  *
5685  * Output the copy engine registers
5686  *
5687  * Return: 0 for success or error code
5688  */
5689 int hif_dump_ce_registers(struct hif_softc *scn)
5690 {
5691 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
5692 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
5693 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
5694 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
5695 	uint16_t i;
5696 	QDF_STATUS status;
5697 
5698 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
5699 		if (!scn->ce_id_to_state[i]) {
5700 			hif_debug("CE%d not used", i);
5701 			continue;
5702 		}
5703 
5704 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
5705 					   (uint8_t *) &ce_reg_values[0],
5706 					   ce_reg_word_size * sizeof(uint32_t));
5707 
5708 		if (status != QDF_STATUS_SUCCESS) {
5709 			hif_err("Dumping CE register failed!");
5710 			return -EACCES;
5711 		}
5712 		hif_debug("CE%d=>", i);
5713 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
5714 				   (uint8_t *) &ce_reg_values[0],
5715 				   ce_reg_word_size * sizeof(uint32_t));
5716 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
5717 				+ SR_WR_INDEX_ADDRESS),
5718 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
5719 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
5720 				+ CURRENT_SRRI_ADDRESS),
5721 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
5722 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
5723 				+ DST_WR_INDEX_ADDRESS),
5724 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
5725 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
5726 				+ CURRENT_DRRI_ADDRESS),
5727 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
5728 		qdf_print("---");
5729 	}
5730 	return 0;
5731 }
5732 qdf_export_symbol(hif_dump_ce_registers);
5733 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
5734 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
5735 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
5736 {
5737 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5738 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
5739 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
5740 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
5741 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
5742 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
5743 	struct CE_ring_state *src_ring = ce_state->src_ring;
5744 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
5745 
5746 	if (src_ring) {
5747 		hif_info->ul_pipe.nentries = src_ring->nentries;
5748 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
5749 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
5750 		hif_info->ul_pipe.write_index = src_ring->write_index;
5751 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
5752 		hif_info->ul_pipe.base_addr_CE_space =
5753 			src_ring->base_addr_CE_space;
5754 		hif_info->ul_pipe.base_addr_owner_space =
5755 			src_ring->base_addr_owner_space;
5756 	}
5757 
5758 
5759 	if (dest_ring) {
5760 		hif_info->dl_pipe.nentries = dest_ring->nentries;
5761 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
5762 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
5763 		hif_info->dl_pipe.write_index = dest_ring->write_index;
5764 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
5765 		hif_info->dl_pipe.base_addr_CE_space =
5766 			dest_ring->base_addr_CE_space;
5767 		hif_info->dl_pipe.base_addr_owner_space =
5768 			dest_ring->base_addr_owner_space;
5769 	}
5770 
5771 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
5772 	hif_info->ctrl_addr = ce_state->ctrl_addr;
5773 
5774 	return hif_info;
5775 }
5776 qdf_export_symbol(hif_get_addl_pipe_info);
5777 
5778 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
5779 {
5780 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5781 
5782 	scn->nss_wifi_ol_mode = mode;
5783 	return 0;
5784 }
5785 qdf_export_symbol(hif_set_nss_wifiol_mode);
5786 #endif
5787 
5788 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
5789 {
5790 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5791 	scn->hif_attribute = hif_attrib;
5792 }
5793 
5794 
5795 /* disable interrupts (only applicable for legacy copy engine currently */
5796 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
5797 {
5798 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
5799 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
5800 	uint32_t ctrl_addr = CE_state->ctrl_addr;
5801 
5802 	Q_TARGET_ACCESS_BEGIN(scn);
5803 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
5804 	Q_TARGET_ACCESS_END(scn);
5805 }
5806 qdf_export_symbol(hif_disable_interrupt);
5807 
5808 /**
5809  * hif_fw_event_handler() - hif fw event handler
5810  * @hif_state: pointer to hif ce state structure
5811  *
5812  * Process fw events and raise HTC callback to process fw events.
5813  *
5814  * Return: none
5815  */
5816 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
5817 {
5818 	struct hif_msg_callbacks *msg_callbacks =
5819 		&hif_state->msg_callbacks_current;
5820 
5821 	if (!msg_callbacks->fwEventHandler)
5822 		return;
5823 
5824 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
5825 			QDF_STATUS_E_FAILURE);
5826 }
5827 
5828 #ifndef QCA_WIFI_3_0
5829 /**
5830  * hif_fw_interrupt_handler() - FW interrupt handler
5831  * @irq: irq number
5832  * @arg: the user pointer
5833  *
5834  * Called from the PCI interrupt handler when a
5835  * firmware-generated interrupt to the Host.
5836  *
5837  * only registered for legacy ce devices
5838  *
5839  * Return: status of handled irq
5840  */
5841 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5842 {
5843 	struct hif_softc *scn = arg;
5844 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5845 	uint32_t fw_indicator_address, fw_indicator;
5846 
5847 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
5848 		return ATH_ISR_NOSCHED;
5849 
5850 	fw_indicator_address = hif_state->fw_indicator_address;
5851 	/* For sudden unplug this will return ~0 */
5852 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
5853 
5854 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
5855 		/* ACK: clear Target-side pending event */
5856 		A_TARGET_WRITE(scn, fw_indicator_address,
5857 			       fw_indicator & ~FW_IND_EVENT_PENDING);
5858 		if (Q_TARGET_ACCESS_END(scn) < 0)
5859 			return ATH_ISR_SCHED;
5860 
5861 		if (hif_state->started) {
5862 			hif_fw_event_handler(hif_state);
5863 		} else {
5864 			/*
5865 			 * Probable Target failure before we're prepared
5866 			 * to handle it.  Generally unexpected.
5867 			 * fw_indicator used as bitmap, and defined as below:
5868 			 *     FW_IND_EVENT_PENDING    0x1
5869 			 *     FW_IND_INITIALIZED      0x2
5870 			 *     FW_IND_NEEDRECOVER      0x4
5871 			 */
5872 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
5873 				("%s: Early firmware event indicated 0x%x\n",
5874 				 __func__, fw_indicator));
5875 		}
5876 	} else {
5877 		if (Q_TARGET_ACCESS_END(scn) < 0)
5878 			return ATH_ISR_SCHED;
5879 	}
5880 
5881 	return ATH_ISR_SCHED;
5882 }
5883 #else
5884 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
5885 {
5886 	return ATH_ISR_SCHED;
5887 }
5888 #endif /* #ifdef QCA_WIFI_3_0 */
5889 
5890 
5891 /**
5892  * hif_wlan_disable(): call the platform driver to disable wlan
5893  * @scn: HIF Context
5894  *
5895  * This function passes the con_mode to platform driver to disable
5896  * wlan.
5897  *
5898  * Return: void
5899  */
5900 void hif_wlan_disable(struct hif_softc *scn)
5901 {
5902 	enum pld_driver_mode mode;
5903 	uint32_t con_mode = hif_get_conparam(scn);
5904 
5905 	if (scn->target_status == TARGET_STATUS_RESET)
5906 		return;
5907 
5908 	if (QDF_GLOBAL_FTM_MODE == con_mode)
5909 		mode = PLD_FTM;
5910 	else if (QDF_IS_EPPING_ENABLED(con_mode))
5911 		mode = PLD_EPPING;
5912 	else
5913 		mode = PLD_MISSION;
5914 
5915 	pld_wlan_disable(scn->qdf_dev->dev, mode);
5916 }
5917 
5918 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5919 {
5920 	int status;
5921 	uint8_t ul_pipe, dl_pipe;
5922 	int ul_is_polled, dl_is_polled;
5923 
5924 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
5925 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5926 					 HTC_CTRL_RSVD_SVC,
5927 					 &ul_pipe, &dl_pipe,
5928 					 &ul_is_polled, &dl_is_polled);
5929 	if (status) {
5930 		hif_err("Failed to map pipe: %d", status);
5931 		return status;
5932 	}
5933 
5934 	*ce_id = dl_pipe;
5935 
5936 	return 0;
5937 }
5938 
5939 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id)
5940 {
5941 	int status;
5942 	uint8_t ul_pipe, dl_pipe;
5943 	int ul_is_polled, dl_is_polled;
5944 
5945 	/* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */
5946 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
5947 					 WMI_CONTROL_DIAG_SVC,
5948 					 &ul_pipe, &dl_pipe,
5949 					 &ul_is_polled, &dl_is_polled);
5950 	if (status) {
5951 		hif_err("Failed to map pipe: %d", status);
5952 		return status;
5953 	}
5954 
5955 	*ce_id = dl_pipe;
5956 
5957 	return 0;
5958 }
5959 
5960 #ifdef HIF_CE_LOG_INFO
5961 /**
5962  * ce_get_index_info(): Get CE index info
5963  * @scn: HIF Context
5964  * @ce_state: CE opaque handle
5965  * @info: CE info
5966  *
5967  * Return: 0 for success and non zero for failure
5968  */
5969 static
5970 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
5971 		      struct ce_index *info)
5972 {
5973 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
5974 
5975 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
5976 }
5977 
5978 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
5979 		     unsigned int *offset)
5980 {
5981 	struct hang_event_info info = {0};
5982 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
5983 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
5984 	uint8_t curr_index = 0;
5985 	uint8_t i;
5986 	uint16_t size;
5987 
5988 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
5989 	info.active_grp_tasklet_cnt =
5990 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
5991 
5992 	for (i = 0; i < scn->ce_count; i++) {
5993 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
5994 			continue;
5995 
5996 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
5997 				      &info.ce_info[curr_index]))
5998 			continue;
5999 
6000 		curr_index++;
6001 	}
6002 
6003 	info.ce_count = curr_index;
6004 	size = sizeof(info) -
6005 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
6006 
6007 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
6008 		return;
6009 
6010 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
6011 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
6012 
6013 	qdf_mem_copy(data + *offset, &info, size);
6014 	*offset = *offset + size;
6015 }
6016 #endif
6017 
6018 #ifdef FEATURE_DIRECT_LINK
6019 QDF_STATUS
6020 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
6021 			   uint64_t addr, uint32_t data)
6022 {
6023 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
6024 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
6025 
6026 	if (hif_state->ce_services->ce_set_irq_config_by_ceid)
6027 		return hif_state->ce_services->ce_set_irq_config_by_ceid(
6028 									hif_ctx,
6029 									ce_id,
6030 									addr,
6031 									data);
6032 
6033 	return QDF_STATUS_E_NOSUPPORT;
6034 }
6035 
6036 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
6037 						  uint64_t **dma_addr,
6038 						  uint32_t *buf_size)
6039 {
6040 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
6041 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
6042 	struct ce_ops *ce_services = hif_state->ce_services;
6043 
6044 	if (ce_services->ce_get_direct_link_dest_buffers)
6045 		return ce_services->ce_get_direct_link_dest_buffers(hif_ctx,
6046 								    dma_addr,
6047 								    buf_size);
6048 
6049 	return 0;
6050 }
6051 
6052 QDF_STATUS
6053 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
6054 				 struct hif_direct_link_ce_info *info,
6055 				 uint8_t max_ce_info_len)
6056 {
6057 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(scn);
6058 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
6059 	struct ce_ops *ce_services = hif_state->ce_services;
6060 
6061 	if (ce_services->ce_get_direct_link_ring_info)
6062 		return ce_services->ce_get_direct_link_ring_info(hif_ctx,
6063 							       info,
6064 							       max_ce_info_len);
6065 
6066 	return QDF_STATUS_E_NOSUPPORT;
6067 }
6068 #endif
6069