xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision a64d8a0dbea74a9757ced8bd24c04bf658d196c7)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #include "qdf_module.h"
41 
42 #define CE_POLL_TIMEOUT 10      /* ms */
43 
44 #define AGC_DUMP         1
45 #define CHANINFO_DUMP    2
46 #define BB_WATCHDOG_DUMP 3
47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48 #define PCIE_ACCESS_DUMP 4
49 #endif
50 #include "mp_dev.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include "qdf_hang_event_notifier.h"
53 #endif
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
56 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018) || \
57 	defined(QCA_WIFI_WCN7850)) && !defined(QCA_WIFI_SUPPORT_SRNG)
58 #define QCA_WIFI_SUPPORT_SRNG
59 #endif
60 
61 #ifdef QCA_WIFI_SUPPORT_SRNG
62 #include <hal_api.h>
63 #endif
64 
65 /* Forward references */
66 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
67 
68 /*
69  * Fix EV118783, poll to check whether a BMI response comes
70  * other than waiting for the interruption which may be lost.
71  */
72 /* #define BMI_RSP_POLLING */
73 #define BMI_RSP_TO_MILLISEC  1000
74 
75 #ifdef CONFIG_BYPASS_QMI
76 #define BYPASS_QMI 1
77 #else
78 #define BYPASS_QMI 0
79 #endif
80 
81 #ifdef ENABLE_10_4_FW_HDR
82 #if (ENABLE_10_4_FW_HDR == 1)
83 #define WDI_IPA_SERVICE_GROUP 5
84 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
85 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
86 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
87 #endif /* ENABLE_10_4_FW_HDR == 1 */
88 #endif /* ENABLE_10_4_FW_HDR */
89 
90 static void hif_config_rri_on_ddr(struct hif_softc *scn);
91 
92 /**
93  * hif_target_access_log_dump() - dump access log
94  *
95  * dump access log
96  *
97  * Return: n/a
98  */
99 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
100 static void hif_target_access_log_dump(void)
101 {
102 	hif_target_dump_access_log();
103 }
104 #endif
105 
106 /*
107  * This structure contains the interrupt index for each Copy engine
108  * for various number of MSIs available in the system.
109  */
110 static struct ce_int_assignment ce_int_context[NUM_CE_CONTEXT] = {
111 	/* Default configuration */
112 	{{ CE_INTERRUPT_IDX(0),
113 	  CE_INTERRUPT_IDX(1),
114 	  CE_INTERRUPT_IDX(2),
115 	  CE_INTERRUPT_IDX(3),
116 	  CE_INTERRUPT_IDX(4),
117 	  CE_INTERRUPT_IDX(5),
118 	  CE_INTERRUPT_IDX(6),
119 	  CE_INTERRUPT_IDX(7),
120 	  CE_INTERRUPT_IDX(8),
121 	  CE_INTERRUPT_IDX(9),
122 	  CE_INTERRUPT_IDX(10),
123 	  CE_INTERRUPT_IDX(11),
124 #ifdef QCA_WIFI_QCN9224
125 	  CE_INTERRUPT_IDX(12),
126 	  CE_INTERRUPT_IDX(13),
127 	  CE_INTERRUPT_IDX(14),
128 	  CE_INTERRUPT_IDX(15),
129 #endif
130 	} },
131 	/* Interrupt assignment for 1 MSI combination */
132 	{{ CE_INTERRUPT_IDX(0),
133 	  CE_INTERRUPT_IDX(0),
134 	  CE_INTERRUPT_IDX(0),
135 	  CE_INTERRUPT_IDX(0),
136 	  CE_INTERRUPT_IDX(0),
137 	  CE_INTERRUPT_IDX(0),
138 	  CE_INTERRUPT_IDX(0),
139 	  CE_INTERRUPT_IDX(0),
140 	  CE_INTERRUPT_IDX(0),
141 	  CE_INTERRUPT_IDX(0),
142 	  CE_INTERRUPT_IDX(0),
143 	  CE_INTERRUPT_IDX(0),
144 #ifdef QCA_WIFI_QCN9224
145 	  CE_INTERRUPT_IDX(0),
146 	  CE_INTERRUPT_IDX(0),
147 	  CE_INTERRUPT_IDX(0),
148 	  CE_INTERRUPT_IDX(0),
149 #endif
150 	} },
151 	/* Interrupt assignment for 2 MSI combination */
152 	{{ CE_INTERRUPT_IDX(0),
153 	  CE_INTERRUPT_IDX(1),
154 	  CE_INTERRUPT_IDX(0),
155 	  CE_INTERRUPT_IDX(1),
156 	  CE_INTERRUPT_IDX(0),
157 	  CE_INTERRUPT_IDX(1),
158 	  CE_INTERRUPT_IDX(0),
159 	  CE_INTERRUPT_IDX(0),
160 	  CE_INTERRUPT_IDX(0),
161 	  CE_INTERRUPT_IDX(0),
162 	  CE_INTERRUPT_IDX(0),
163 	  CE_INTERRUPT_IDX(0),
164 #ifdef QCA_WIFI_QCN9224
165 	  CE_INTERRUPT_IDX(0),
166 	  CE_INTERRUPT_IDX(0),
167 	  CE_INTERRUPT_IDX(0),
168 	  CE_INTERRUPT_IDX(0),
169 #endif
170 	} },
171 	/* Interrupt assignment for 3 MSI combination */
172 	{{ CE_INTERRUPT_IDX(0),
173 	  CE_INTERRUPT_IDX(1),
174 	  CE_INTERRUPT_IDX(2),
175 	  CE_INTERRUPT_IDX(1),
176 	  CE_INTERRUPT_IDX(0),
177 	  CE_INTERRUPT_IDX(1),
178 	  CE_INTERRUPT_IDX(0),
179 	  CE_INTERRUPT_IDX(0),
180 	  CE_INTERRUPT_IDX(0),
181 	  CE_INTERRUPT_IDX(0),
182 	  CE_INTERRUPT_IDX(0),
183 	  CE_INTERRUPT_IDX(0),
184 #ifdef QCA_WIFI_QCN9224
185 	  CE_INTERRUPT_IDX(0),
186 	  CE_INTERRUPT_IDX(0),
187 	  CE_INTERRUPT_IDX(0),
188 	  CE_INTERRUPT_IDX(0),
189 #endif
190 	} },
191 	/* Interrupt assignment for 4 MSI combination */
192 	{{ CE_INTERRUPT_IDX(0),
193 	  CE_INTERRUPT_IDX(1),
194 	  CE_INTERRUPT_IDX(2),
195 	  CE_INTERRUPT_IDX(3),
196 	  CE_INTERRUPT_IDX(0),
197 	  CE_INTERRUPT_IDX(1),
198 	  CE_INTERRUPT_IDX(0),
199 	  CE_INTERRUPT_IDX(0),
200 	  CE_INTERRUPT_IDX(0),
201 	  CE_INTERRUPT_IDX(0),
202 	  CE_INTERRUPT_IDX(0),
203 	  CE_INTERRUPT_IDX(0),
204 #ifdef QCA_WIFI_QCN9224
205 	  CE_INTERRUPT_IDX(0),
206 	  CE_INTERRUPT_IDX(0),
207 	  CE_INTERRUPT_IDX(0),
208 	  CE_INTERRUPT_IDX(0),
209 #endif
210 	} },
211 	/* Interrupt assignment for 5 MSI combination */
212 	{{ CE_INTERRUPT_IDX(0),
213 	  CE_INTERRUPT_IDX(1),
214 	  CE_INTERRUPT_IDX(2),
215 	  CE_INTERRUPT_IDX(3),
216 	  CE_INTERRUPT_IDX(0),
217 	  CE_INTERRUPT_IDX(4),
218 	  CE_INTERRUPT_IDX(0),
219 	  CE_INTERRUPT_IDX(0),
220 	  CE_INTERRUPT_IDX(0),
221 	  CE_INTERRUPT_IDX(0),
222 	  CE_INTERRUPT_IDX(0),
223 	  CE_INTERRUPT_IDX(0),
224 #ifdef QCA_WIFI_QCN9224
225 	  CE_INTERRUPT_IDX(0),
226 	  CE_INTERRUPT_IDX(0),
227 	  CE_INTERRUPT_IDX(0),
228 	  CE_INTERRUPT_IDX(0),
229 #endif
230 	} },
231 	/* Interrupt assignment for 6 MSI combination */
232 	{{ CE_INTERRUPT_IDX(0),
233 	  CE_INTERRUPT_IDX(1),
234 	  CE_INTERRUPT_IDX(2),
235 	  CE_INTERRUPT_IDX(3),
236 	  CE_INTERRUPT_IDX(4),
237 	  CE_INTERRUPT_IDX(5),
238 	  CE_INTERRUPT_IDX(0),
239 	  CE_INTERRUPT_IDX(0),
240 	  CE_INTERRUPT_IDX(0),
241 	  CE_INTERRUPT_IDX(0),
242 	  CE_INTERRUPT_IDX(0),
243 	  CE_INTERRUPT_IDX(0),
244 #ifdef QCA_WIFI_QCN9224
245 	  CE_INTERRUPT_IDX(0),
246 	  CE_INTERRUPT_IDX(0),
247 	  CE_INTERRUPT_IDX(0),
248 	  CE_INTERRUPT_IDX(0),
249 #endif
250 	} },
251 	/* Interrupt assignment for 7 MSI combination */
252 	{{ CE_INTERRUPT_IDX(0),
253 	  CE_INTERRUPT_IDX(1),
254 	  CE_INTERRUPT_IDX(2),
255 	  CE_INTERRUPT_IDX(3),
256 	  CE_INTERRUPT_IDX(4),
257 	  CE_INTERRUPT_IDX(5),
258 	  CE_INTERRUPT_IDX(6),
259 	  CE_INTERRUPT_IDX(0),
260 	  CE_INTERRUPT_IDX(0),
261 	  CE_INTERRUPT_IDX(0),
262 	  CE_INTERRUPT_IDX(0),
263 	  CE_INTERRUPT_IDX(0),
264 #ifdef QCA_WIFI_QCN9224
265 	  CE_INTERRUPT_IDX(0),
266 	  CE_INTERRUPT_IDX(0),
267 	  CE_INTERRUPT_IDX(0),
268 	  CE_INTERRUPT_IDX(0),
269 #endif
270 	} },
271 	/* Interrupt assignment for 8 MSI combination */
272 	{{ CE_INTERRUPT_IDX(0),
273 	  CE_INTERRUPT_IDX(1),
274 	  CE_INTERRUPT_IDX(2),
275 	  CE_INTERRUPT_IDX(3),
276 	  CE_INTERRUPT_IDX(4),
277 	  CE_INTERRUPT_IDX(5),
278 	  CE_INTERRUPT_IDX(6),
279 	  CE_INTERRUPT_IDX(7),
280 	  CE_INTERRUPT_IDX(0),
281 	  CE_INTERRUPT_IDX(0),
282 	  CE_INTERRUPT_IDX(0),
283 	  CE_INTERRUPT_IDX(0),
284 #ifdef QCA_WIFI_QCN9224
285 	  CE_INTERRUPT_IDX(0),
286 	  CE_INTERRUPT_IDX(0),
287 	  CE_INTERRUPT_IDX(0),
288 	  CE_INTERRUPT_IDX(0),
289 #endif
290 	} },
291 	/* Interrupt assignment for 9 MSI combination */
292 	{{ CE_INTERRUPT_IDX(0),
293 	  CE_INTERRUPT_IDX(1),
294 	  CE_INTERRUPT_IDX(2),
295 	  CE_INTERRUPT_IDX(3),
296 	  CE_INTERRUPT_IDX(4),
297 	  CE_INTERRUPT_IDX(5),
298 	  CE_INTERRUPT_IDX(6),
299 	  CE_INTERRUPT_IDX(7),
300 	  CE_INTERRUPT_IDX(8),
301 	  CE_INTERRUPT_IDX(0),
302 	  CE_INTERRUPT_IDX(0),
303 	  CE_INTERRUPT_IDX(0),
304 #ifdef QCA_WIFI_QCN9224
305 	  CE_INTERRUPT_IDX(0),
306 	  CE_INTERRUPT_IDX(0),
307 	  CE_INTERRUPT_IDX(0),
308 	  CE_INTERRUPT_IDX(0),
309 #endif
310 	} },
311 	/* Interrupt assignment for 10 MSI combination */
312 	{{ CE_INTERRUPT_IDX(0),
313 	  CE_INTERRUPT_IDX(1),
314 	  CE_INTERRUPT_IDX(2),
315 	  CE_INTERRUPT_IDX(3),
316 	  CE_INTERRUPT_IDX(4),
317 	  CE_INTERRUPT_IDX(5),
318 	  CE_INTERRUPT_IDX(6),
319 	  CE_INTERRUPT_IDX(7),
320 	  CE_INTERRUPT_IDX(8),
321 	  CE_INTERRUPT_IDX(9),
322 	  CE_INTERRUPT_IDX(0),
323 	  CE_INTERRUPT_IDX(0),
324 #ifdef QCA_WIFI_QCN9224
325 	  CE_INTERRUPT_IDX(0),
326 	  CE_INTERRUPT_IDX(0),
327 	  CE_INTERRUPT_IDX(0),
328 	  CE_INTERRUPT_IDX(0),
329 #endif
330 	} },
331 	/* Interrupt assignment for 11 MSI combination */
332 	{{ CE_INTERRUPT_IDX(0),
333 	  CE_INTERRUPT_IDX(1),
334 	  CE_INTERRUPT_IDX(2),
335 	  CE_INTERRUPT_IDX(3),
336 	  CE_INTERRUPT_IDX(4),
337 	  CE_INTERRUPT_IDX(5),
338 	  CE_INTERRUPT_IDX(6),
339 	  CE_INTERRUPT_IDX(7),
340 	  CE_INTERRUPT_IDX(8),
341 	  CE_INTERRUPT_IDX(9),
342 	  CE_INTERRUPT_IDX(10),
343 	  CE_INTERRUPT_IDX(0),
344 #ifdef QCA_WIFI_QCN9224
345 	  CE_INTERRUPT_IDX(0),
346 	  CE_INTERRUPT_IDX(0),
347 	  CE_INTERRUPT_IDX(0),
348 	  CE_INTERRUPT_IDX(0),
349 #endif
350 	} },
351 	/* Interrupt assignment for 12 MSI combination */
352 	{{ CE_INTERRUPT_IDX(0),
353 	  CE_INTERRUPT_IDX(1),
354 	  CE_INTERRUPT_IDX(2),
355 	  CE_INTERRUPT_IDX(3),
356 	  CE_INTERRUPT_IDX(4),
357 	  CE_INTERRUPT_IDX(5),
358 	  CE_INTERRUPT_IDX(6),
359 	  CE_INTERRUPT_IDX(7),
360 	  CE_INTERRUPT_IDX(8),
361 	  CE_INTERRUPT_IDX(9),
362 	  CE_INTERRUPT_IDX(10),
363 	  CE_INTERRUPT_IDX(11),
364 #ifdef QCA_WIFI_QCN9224
365 	  CE_INTERRUPT_IDX(0),
366 	  CE_INTERRUPT_IDX(0),
367 	  CE_INTERRUPT_IDX(0),
368 	  CE_INTERRUPT_IDX(0),
369 #endif
370 	} },
371 #ifdef QCA_WIFI_QCN9224
372 	/* Interrupt assignment for 13 MSI combination */
373 	{{ CE_INTERRUPT_IDX(0),
374 	  CE_INTERRUPT_IDX(1),
375 	  CE_INTERRUPT_IDX(2),
376 	  CE_INTERRUPT_IDX(3),
377 	  CE_INTERRUPT_IDX(4),
378 	  CE_INTERRUPT_IDX(5),
379 	  CE_INTERRUPT_IDX(6),
380 	  CE_INTERRUPT_IDX(7),
381 	  CE_INTERRUPT_IDX(8),
382 	  CE_INTERRUPT_IDX(9),
383 	  CE_INTERRUPT_IDX(10),
384 	  CE_INTERRUPT_IDX(11),
385 	  CE_INTERRUPT_IDX(12),
386 	  CE_INTERRUPT_IDX(0),
387 	  CE_INTERRUPT_IDX(0),
388 	  CE_INTERRUPT_IDX(0),
389 	} },
390 	/* Interrupt assignment for 14 MSI combination */
391 	{{ CE_INTERRUPT_IDX(0),
392 	  CE_INTERRUPT_IDX(1),
393 	  CE_INTERRUPT_IDX(2),
394 	  CE_INTERRUPT_IDX(3),
395 	  CE_INTERRUPT_IDX(4),
396 	  CE_INTERRUPT_IDX(5),
397 	  CE_INTERRUPT_IDX(6),
398 	  CE_INTERRUPT_IDX(7),
399 	  CE_INTERRUPT_IDX(8),
400 	  CE_INTERRUPT_IDX(9),
401 	  CE_INTERRUPT_IDX(10),
402 	  CE_INTERRUPT_IDX(11),
403 	  CE_INTERRUPT_IDX(12),
404 	  CE_INTERRUPT_IDX(13),
405 	  CE_INTERRUPT_IDX(0),
406 	  CE_INTERRUPT_IDX(0),
407 	} },
408 	/* Interrupt assignment for 15 MSI combination */
409 	{{ CE_INTERRUPT_IDX(0),
410 	  CE_INTERRUPT_IDX(1),
411 	  CE_INTERRUPT_IDX(2),
412 	  CE_INTERRUPT_IDX(3),
413 	  CE_INTERRUPT_IDX(4),
414 	  CE_INTERRUPT_IDX(5),
415 	  CE_INTERRUPT_IDX(6),
416 	  CE_INTERRUPT_IDX(7),
417 	  CE_INTERRUPT_IDX(8),
418 	  CE_INTERRUPT_IDX(9),
419 	  CE_INTERRUPT_IDX(10),
420 	  CE_INTERRUPT_IDX(11),
421 	  CE_INTERRUPT_IDX(12),
422 	  CE_INTERRUPT_IDX(13),
423 	  CE_INTERRUPT_IDX(14),
424 	  CE_INTERRUPT_IDX(0),
425 	} },
426 	/* Interrupt assignment for 16 MSI combination */
427 	{{ CE_INTERRUPT_IDX(0),
428 	  CE_INTERRUPT_IDX(1),
429 	  CE_INTERRUPT_IDX(2),
430 	  CE_INTERRUPT_IDX(3),
431 	  CE_INTERRUPT_IDX(4),
432 	  CE_INTERRUPT_IDX(5),
433 	  CE_INTERRUPT_IDX(6),
434 	  CE_INTERRUPT_IDX(7),
435 	  CE_INTERRUPT_IDX(8),
436 	  CE_INTERRUPT_IDX(9),
437 	  CE_INTERRUPT_IDX(10),
438 	  CE_INTERRUPT_IDX(11),
439 	  CE_INTERRUPT_IDX(12),
440 	  CE_INTERRUPT_IDX(13),
441 	  CE_INTERRUPT_IDX(14),
442 	  CE_INTERRUPT_IDX(15),
443 	} },
444 #endif
445 };
446 
447 
448 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
449 		      uint8_t cmd_id, bool start)
450 {
451 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
452 
453 	switch (cmd_id) {
454 	case AGC_DUMP:
455 		if (start)
456 			priv_start_agc(scn);
457 		else
458 			priv_dump_agc(scn);
459 		break;
460 	case CHANINFO_DUMP:
461 		if (start)
462 			priv_start_cap_chaninfo(scn);
463 		else
464 			priv_dump_chaninfo(scn);
465 		break;
466 	case BB_WATCHDOG_DUMP:
467 		priv_dump_bbwatchdog(scn);
468 		break;
469 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
470 	case PCIE_ACCESS_DUMP:
471 		hif_target_access_log_dump();
472 		break;
473 #endif
474 	default:
475 		hif_err("Invalid htc dump command: %d", cmd_id);
476 		break;
477 	}
478 }
479 
480 static void ce_poll_timeout(void *arg)
481 {
482 	struct CE_state *CE_state = (struct CE_state *)arg;
483 
484 	if (CE_state->timer_inited) {
485 		ce_per_engine_service(CE_state->scn, CE_state->id);
486 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
487 	}
488 }
489 
490 static unsigned int roundup_pwr2(unsigned int n)
491 {
492 	int i;
493 	unsigned int test_pwr2;
494 
495 	if (!(n & (n - 1)))
496 		return n; /* already a power of 2 */
497 
498 	test_pwr2 = 4;
499 	for (i = 0; i < 29; i++) {
500 		if (test_pwr2 > n)
501 			return test_pwr2;
502 		test_pwr2 = test_pwr2 << 1;
503 	}
504 
505 	QDF_ASSERT(0); /* n too large */
506 	return 0;
507 }
508 
509 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
510 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
511 
512 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
513 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
514 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
515 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
516 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
517 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
518 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
519 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
520 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
521 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
522 #ifdef QCA_WIFI_3_0_ADRASTEA
523 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
524 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
525 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
526 #endif
527 };
528 
529 #ifdef QCN7605_SUPPORT
530 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
531 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
532 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
533 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
534 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
535 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
536 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
537 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
538 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
539 };
540 #endif
541 
542 #ifdef WLAN_FEATURE_EPPING
543 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
544 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
545 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
546 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
547 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
548 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
549 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
550 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
551 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
552 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
553 };
554 #endif
555 
556 /* CE_PCI TABLE */
557 /*
558  * NOTE: the table below is out of date, though still a useful reference.
559  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
560  * mapping of HTC services to HIF pipes.
561  */
562 /*
563  * This authoritative table defines Copy Engine configuration and the mapping
564  * of services/endpoints to CEs.  A subset of this information is passed to
565  * the Target during startup as a prerequisite to entering BMI phase.
566  * See:
567  *    target_service_to_ce_map - Target-side mapping
568  *    hif_map_service_to_pipe      - Host-side mapping
569  *    target_ce_config         - Target-side configuration
570  *    host_ce_config           - Host-side configuration
571    ============================================================================
572    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
573  |                      |      | ctio | Size     | Frequency
574  |                      |      | n    |          |
575    ============================================================================
576    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
577    descriptor |                      |      |      | O(100B)  | and regular
578    download   |                      |      |      |          |
579    ----------------------------------------------------------------------------
580    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
581    indication |                      |      |      | O(10B)   | regular
582    upload     |                      |      |      |          |
583    ----------------------------------------------------------------------------
584    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
585    upload     |                      |      |      | O(1000B) | (frequent
586    e.g. noise |                      |      |      |          | during IP1.0
587    packets    |                      |      |      |          | testing)
588    ----------------------------------------------------------------------------
589    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
590    download   |                      |      |      | O(1000B) | (frequent
591    e.g.       |                      |      |      |          | during IP1.0
592    misdirecte |                      |      |      |          | testing)
593    d EAPOL    |                      |      |      |          |
594    packets    |                      |      |      |          |
595    ----------------------------------------------------------------------------
596    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
597  | DATA_VO (uplink)     |      |      |          |
598    ----------------------------------------------------------------------------
599    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
600  | DATA_VO (downlink)   |      |      |          |
601    ----------------------------------------------------------------------------
602    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
603  |                      |      |      | O(100B)  |
604    ----------------------------------------------------------------------------
605    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
606    messages   | (downlink)           |      |      | O(100B)  |
607  |                      |      |      |          |
608    ----------------------------------------------------------------------------
609    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
610  | HTC_RAW_STREAMS      |      |      |          |
611  | (uplink)             |      |      |          |
612    ----------------------------------------------------------------------------
613    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
614  | HTC_RAW_STREAMS      |      |      |          |
615  | (downlink)           |      |      |          |
616    ----------------------------------------------------------------------------
617    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
618  |                      |      |      |          | infrequent
619    ============================================================================
620  */
621 
622 /*
623  * Map from service/endpoint to Copy Engine.
624  * This table is derived from the CE_PCI TABLE, above.
625  * It is passed to the Target at startup for use by firmware.
626  */
627 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
628 	{
629 		WMI_DATA_VO_SVC,
630 		PIPEDIR_OUT,    /* out = UL = host -> target */
631 		3,
632 	},
633 	{
634 		WMI_DATA_VO_SVC,
635 		PIPEDIR_IN,     /* in = DL = target -> host */
636 		2,
637 	},
638 	{
639 		WMI_DATA_BK_SVC,
640 		PIPEDIR_OUT,    /* out = UL = host -> target */
641 		3,
642 	},
643 	{
644 		WMI_DATA_BK_SVC,
645 		PIPEDIR_IN,     /* in = DL = target -> host */
646 		2,
647 	},
648 	{
649 		WMI_DATA_BE_SVC,
650 		PIPEDIR_OUT,    /* out = UL = host -> target */
651 		3,
652 	},
653 	{
654 		WMI_DATA_BE_SVC,
655 		PIPEDIR_IN,     /* in = DL = target -> host */
656 		2,
657 	},
658 	{
659 		WMI_DATA_VI_SVC,
660 		PIPEDIR_OUT,    /* out = UL = host -> target */
661 		3,
662 	},
663 	{
664 		WMI_DATA_VI_SVC,
665 		PIPEDIR_IN,     /* in = DL = target -> host */
666 		2,
667 	},
668 	{
669 		WMI_CONTROL_SVC,
670 		PIPEDIR_OUT,    /* out = UL = host -> target */
671 		3,
672 	},
673 	{
674 		WMI_CONTROL_SVC,
675 		PIPEDIR_IN,     /* in = DL = target -> host */
676 		2,
677 	},
678 	{
679 		HTC_CTRL_RSVD_SVC,
680 		PIPEDIR_OUT,    /* out = UL = host -> target */
681 		0,              /* could be moved to 3 (share with WMI) */
682 	},
683 	{
684 		HTC_CTRL_RSVD_SVC,
685 		PIPEDIR_IN,     /* in = DL = target -> host */
686 		2,
687 	},
688 	{
689 		HTC_RAW_STREAMS_SVC, /* not currently used */
690 		PIPEDIR_OUT,    /* out = UL = host -> target */
691 		0,
692 	},
693 	{
694 		HTC_RAW_STREAMS_SVC, /* not currently used */
695 		PIPEDIR_IN,     /* in = DL = target -> host */
696 		2,
697 	},
698 	{
699 		HTT_DATA_MSG_SVC,
700 		PIPEDIR_OUT,    /* out = UL = host -> target */
701 		4,
702 	},
703 	{
704 		HTT_DATA_MSG_SVC,
705 		PIPEDIR_IN,     /* in = DL = target -> host */
706 		1,
707 	},
708 	{
709 		WDI_IPA_TX_SVC,
710 		PIPEDIR_OUT,    /* in = DL = target -> host */
711 		5,
712 	},
713 #if defined(QCA_WIFI_3_0_ADRASTEA)
714 	{
715 		HTT_DATA2_MSG_SVC,
716 		PIPEDIR_IN,    /* in = DL = target -> host */
717 		9,
718 	},
719 	{
720 		HTT_DATA3_MSG_SVC,
721 		PIPEDIR_IN,    /* in = DL = target -> host */
722 		10,
723 	},
724 	{
725 		PACKET_LOG_SVC,
726 		PIPEDIR_IN,    /* in = DL = target -> host */
727 		11,
728 	},
729 #endif
730 	/* (Additions here) */
731 
732 	{                       /* Must be last */
733 		0,
734 		0,
735 		0,
736 	},
737 };
738 
739 /* PIPEDIR_OUT = HOST to Target */
740 /* PIPEDIR_IN  = TARGET to HOST */
741 #if (defined(QCA_WIFI_QCA8074))
742 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
743 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
744 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
745 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
746 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
747 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
748 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
749 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
750 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
751 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
752 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
753 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
754 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
755 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
756 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
757 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
758 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
759 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
760 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
761 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
762 	/* (Additions here) */
763 	{ 0, 0, 0, },
764 };
765 #else
766 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
767 };
768 #endif
769 
770 #if (defined(QCA_WIFI_QCA8074V2))
771 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
772 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
773 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
774 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
775 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
776 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
777 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
778 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
779 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
780 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
781 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
782 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
783 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
784 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
785 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
786 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
787 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
788 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
789 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
790 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
791 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
792 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
793 	/* (Additions here) */
794 	{ 0, 0, 0, },
795 };
796 #else
797 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
798 };
799 #endif
800 
801 #if (defined(QCA_WIFI_QCA6018))
802 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
803 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
804 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
805 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
806 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
807 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
808 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
809 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
810 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
811 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
812 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
813 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
814 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
815 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
816 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
817 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
818 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
819 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
820 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
821 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
822 	/* (Additions here) */
823 	{ 0, 0, 0, },
824 };
825 #else
826 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
827 };
828 #endif
829 
830 #if (defined(QCA_WIFI_QCN9000))
831 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
832 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
833 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
834 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
835 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
836 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
837 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
838 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
839 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
840 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
841 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
842 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
843 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
844 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
845 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
846 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
847 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
848 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
849 	/* (Additions here) */
850 	{ 0, 0, 0, },
851 };
852 #else
853 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
854 };
855 #endif
856 
857 #if (defined(QCA_WIFI_QCN9224))
858 static struct service_to_pipe target_service_to_ce_map_qcn9224[] = {
859 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
860 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
861 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
862 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
863 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
864 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
865 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
866 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
867 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
868 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
869 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
870 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
871 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
872 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
873 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
874 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
875 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7, },
876 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2, },
877 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
878 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 14, },
879 	/* (Additions here) */
880 	{ 0, 0, 0, },
881 };
882 #endif
883 
884 #if (defined(QCA_WIFI_QCA5018))
885 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
886 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
887 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
888 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
889 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
890 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
891 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
892 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
893 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
894 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
895 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
896 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
897 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
898 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
899 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
900 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
901 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
902 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
903 	/* (Additions here) */
904 	{ 0, 0, 0, },
905 };
906 #else
907 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
908 };
909 #endif
910 
911 /* PIPEDIR_OUT = HOST to Target */
912 /* PIPEDIR_IN  = TARGET to HOST */
913 #ifdef QCN7605_SUPPORT
914 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
915 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
916 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
917 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
918 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
919 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
920 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
921 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
922 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
923 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
924 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
925 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
926 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
927 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
928 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
929 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
930 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
931 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
932 #ifdef IPA_OFFLOAD
933 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
934 #else
935 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
936 #endif
937 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
938 	/* (Additions here) */
939 	{ 0, 0, 0, },
940 };
941 #endif
942 
943 #if (defined(QCA_WIFI_QCA6290))
944 #ifdef QCA_6290_AP_MODE
945 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
946 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
947 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
948 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
949 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
950 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
951 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
952 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
953 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
954 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
955 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
956 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
957 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
958 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
959 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
960 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
961 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
962 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
963 	/* (Additions here) */
964 	{ 0, 0, 0, },
965 };
966 #else
967 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
968 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
969 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
970 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
971 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
972 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
973 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
974 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
975 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
976 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
977 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
978 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
979 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
980 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
981 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
982 	/* (Additions here) */
983 	{ 0, 0, 0, },
984 };
985 #endif
986 #else
987 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
988 };
989 #endif
990 
991 #if (defined(QCA_WIFI_QCA6390))
992 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
993 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
994 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
995 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
996 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
997 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
998 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
999 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1000 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1001 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1002 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1003 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1004 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1005 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1006 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1007 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1008 	/* (Additions here) */
1009 	{ 0, 0, 0, },
1010 };
1011 #else
1012 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
1013 };
1014 #endif
1015 
1016 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
1017 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1018 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1019 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1020 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1021 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1022 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1023 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1024 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1025 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1026 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1027 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1028 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1029 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1030 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1031 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1032 	/* (Additions here) */
1033 	{ 0, 0, 0, },
1034 };
1035 
1036 #if (defined(QCA_WIFI_QCA6750))
1037 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1038 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1039 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1040 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1041 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1042 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1043 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1044 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1045 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1046 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1047 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1048 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1049 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1050 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1051 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1052 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1053 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1054 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
1055 #endif
1056 	/* (Additions here) */
1057 	{ 0, 0, 0, },
1058 };
1059 #else
1060 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
1061 };
1062 #endif
1063 
1064 #if (defined(QCA_WIFI_WCN7850))
1065 static struct service_to_pipe target_service_to_ce_map_wcn7850[] = {
1066 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
1067 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
1068 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
1069 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
1070 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
1071 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
1072 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
1073 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
1074 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
1075 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
1076 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
1077 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
1078 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
1079 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
1080 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
1081 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
1082 #endif
1083 	/* (Additions here) */
1084 	{ 0, 0, 0, },
1085 };
1086 #else
1087 static struct service_to_pipe target_service_to_ce_map_wcn7850[] = {
1088 };
1089 #endif
1090 
1091 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
1092 	{
1093 		WMI_DATA_VO_SVC,
1094 		PIPEDIR_OUT,    /* out = UL = host -> target */
1095 		3,
1096 	},
1097 	{
1098 		WMI_DATA_VO_SVC,
1099 		PIPEDIR_IN,     /* in = DL = target -> host */
1100 		2,
1101 	},
1102 	{
1103 		WMI_DATA_BK_SVC,
1104 		PIPEDIR_OUT,    /* out = UL = host -> target */
1105 		3,
1106 	},
1107 	{
1108 		WMI_DATA_BK_SVC,
1109 		PIPEDIR_IN,     /* in = DL = target -> host */
1110 		2,
1111 	},
1112 	{
1113 		WMI_DATA_BE_SVC,
1114 		PIPEDIR_OUT,    /* out = UL = host -> target */
1115 		3,
1116 	},
1117 	{
1118 		WMI_DATA_BE_SVC,
1119 		PIPEDIR_IN,     /* in = DL = target -> host */
1120 		2,
1121 	},
1122 	{
1123 		WMI_DATA_VI_SVC,
1124 		PIPEDIR_OUT,    /* out = UL = host -> target */
1125 		3,
1126 	},
1127 	{
1128 		WMI_DATA_VI_SVC,
1129 		PIPEDIR_IN,     /* in = DL = target -> host */
1130 		2,
1131 	},
1132 	{
1133 		WMI_CONTROL_SVC,
1134 		PIPEDIR_OUT,    /* out = UL = host -> target */
1135 		3,
1136 	},
1137 	{
1138 		WMI_CONTROL_SVC,
1139 		PIPEDIR_IN,     /* in = DL = target -> host */
1140 		2,
1141 	},
1142 	{
1143 		HTC_CTRL_RSVD_SVC,
1144 		PIPEDIR_OUT,    /* out = UL = host -> target */
1145 		0,              /* could be moved to 3 (share with WMI) */
1146 	},
1147 	{
1148 		HTC_CTRL_RSVD_SVC,
1149 		PIPEDIR_IN,     /* in = DL = target -> host */
1150 		1,
1151 	},
1152 	{
1153 		HTC_RAW_STREAMS_SVC, /* not currently used */
1154 		PIPEDIR_OUT,    /* out = UL = host -> target */
1155 		0,
1156 	},
1157 	{
1158 		HTC_RAW_STREAMS_SVC, /* not currently used */
1159 		PIPEDIR_IN,     /* in = DL = target -> host */
1160 		1,
1161 	},
1162 	{
1163 		HTT_DATA_MSG_SVC,
1164 		PIPEDIR_OUT,    /* out = UL = host -> target */
1165 		4,
1166 	},
1167 #ifdef WLAN_FEATURE_FASTPATH
1168 	{
1169 		HTT_DATA_MSG_SVC,
1170 		PIPEDIR_IN,     /* in = DL = target -> host */
1171 		5,
1172 	},
1173 #else /* WLAN_FEATURE_FASTPATH */
1174 	{
1175 		HTT_DATA_MSG_SVC,
1176 		PIPEDIR_IN,  /* in = DL = target -> host */
1177 		1,
1178 	},
1179 #endif /* WLAN_FEATURE_FASTPATH */
1180 
1181 	/* (Additions here) */
1182 
1183 	{                       /* Must be last */
1184 		0,
1185 		0,
1186 		0,
1187 	},
1188 };
1189 
1190 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
1191 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
1192 
1193 #ifdef WLAN_FEATURE_EPPING
1194 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
1195 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1196 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1197 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
1198 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
1199 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1200 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1201 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1202 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1203 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
1204 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
1205 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
1206 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
1207 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
1208 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
1209 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
1210 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
1211 	{0, 0, 0,},             /* Must be last */
1212 };
1213 
1214 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
1215 					   **tgt_svc_map_to_use,
1216 					   uint32_t *sz_tgt_svc_map_to_use)
1217 {
1218 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
1219 	*sz_tgt_svc_map_to_use =
1220 			sizeof(target_service_to_ce_map_wlan_epping);
1221 }
1222 #endif
1223 
1224 #ifdef QCN7605_SUPPORT
1225 static inline
1226 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1227 			       uint32_t *sz_tgt_svc_map_to_use)
1228 {
1229 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
1230 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
1231 }
1232 #else
1233 static inline
1234 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
1235 			       uint32_t *sz_tgt_svc_map_to_use)
1236 {
1237 	hif_err("QCN7605 not supported");
1238 }
1239 #endif
1240 
1241 #ifdef QCA_WIFI_QCN9224
1242 static
1243 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1244 			       struct HIF_CE_state *hif_state)
1245 {
1246 	hif_state->host_ce_config = host_ce_config_wlan_qcn9224;
1247 	hif_state->target_ce_config = target_ce_config_wlan_qcn9224;
1248 	hif_state->target_ce_config_sz =
1249 				 sizeof(target_ce_config_wlan_qcn9224);
1250 	scn->ce_count = QCN_9224_CE_COUNT;
1251 	scn->disable_wake_irq = 1;
1252 }
1253 
1254 static
1255 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1256 			       uint32_t *sz_tgt_svc_map_to_use)
1257 {
1258 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn9224;
1259 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn9224);
1260 }
1261 #else
1262 static inline
1263 void hif_set_ce_config_qcn9224(struct hif_softc *scn,
1264 			       struct HIF_CE_state *hif_state)
1265 {
1266 	hif_err("QCN9224 not supported");
1267 }
1268 
1269 static inline
1270 void hif_select_ce_map_qcn9224(struct service_to_pipe **tgt_svc_map_to_use,
1271 			       uint32_t *sz_tgt_svc_map_to_use)
1272 {
1273 	hif_err("QCN9224 not supported");
1274 }
1275 #endif
1276 
1277 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
1278 				    struct service_to_pipe **tgt_svc_map_to_use,
1279 				    uint32_t *sz_tgt_svc_map_to_use)
1280 {
1281 	uint32_t mode = hif_get_conparam(scn);
1282 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1283 	struct hif_target_info *tgt_info = &scn->target_info;
1284 
1285 	if (QDF_IS_EPPING_ENABLED(mode)) {
1286 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
1287 						      sz_tgt_svc_map_to_use);
1288 	} else {
1289 		switch (tgt_info->target_type) {
1290 		default:
1291 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
1292 			*sz_tgt_svc_map_to_use =
1293 				sizeof(target_service_to_ce_map_wlan);
1294 			break;
1295 		case TARGET_TYPE_QCN7605:
1296 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
1297 						  sz_tgt_svc_map_to_use);
1298 			break;
1299 		case TARGET_TYPE_AR900B:
1300 		case TARGET_TYPE_QCA9984:
1301 		case TARGET_TYPE_IPQ4019:
1302 		case TARGET_TYPE_QCA9888:
1303 		case TARGET_TYPE_AR9888:
1304 		case TARGET_TYPE_AR9888V2:
1305 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
1306 			*sz_tgt_svc_map_to_use =
1307 				sizeof(target_service_to_ce_map_ar900b);
1308 			break;
1309 		case TARGET_TYPE_QCA6290:
1310 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
1311 			*sz_tgt_svc_map_to_use =
1312 				sizeof(target_service_to_ce_map_qca6290);
1313 			break;
1314 		case TARGET_TYPE_QCA6390:
1315 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
1316 			*sz_tgt_svc_map_to_use =
1317 				sizeof(target_service_to_ce_map_qca6390);
1318 			break;
1319 		case TARGET_TYPE_QCA6490:
1320 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
1321 			*sz_tgt_svc_map_to_use =
1322 				sizeof(target_service_to_ce_map_qca6490);
1323 			break;
1324 		case TARGET_TYPE_QCA6750:
1325 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
1326 			*sz_tgt_svc_map_to_use =
1327 				sizeof(target_service_to_ce_map_qca6750);
1328 			break;
1329 		case TARGET_TYPE_WCN7850:
1330 			*tgt_svc_map_to_use = target_service_to_ce_map_wcn7850;
1331 			*sz_tgt_svc_map_to_use =
1332 				sizeof(target_service_to_ce_map_wcn7850);
1333 			break;
1334 		case TARGET_TYPE_QCA8074:
1335 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
1336 			*sz_tgt_svc_map_to_use =
1337 				sizeof(target_service_to_ce_map_qca8074);
1338 			break;
1339 		case TARGET_TYPE_QCA8074V2:
1340 			*tgt_svc_map_to_use =
1341 				target_service_to_ce_map_qca8074_v2;
1342 			*sz_tgt_svc_map_to_use =
1343 				sizeof(target_service_to_ce_map_qca8074_v2);
1344 			break;
1345 		case TARGET_TYPE_QCA6018:
1346 			*tgt_svc_map_to_use =
1347 				target_service_to_ce_map_qca6018;
1348 			*sz_tgt_svc_map_to_use =
1349 				sizeof(target_service_to_ce_map_qca6018);
1350 			break;
1351 		case TARGET_TYPE_QCN9000:
1352 			*tgt_svc_map_to_use =
1353 				target_service_to_ce_map_qcn9000;
1354 			*sz_tgt_svc_map_to_use =
1355 				sizeof(target_service_to_ce_map_qcn9000);
1356 			break;
1357 		case TARGET_TYPE_QCN9224:
1358 			hif_select_ce_map_qcn9224(tgt_svc_map_to_use,
1359 						  sz_tgt_svc_map_to_use);
1360 			break;
1361 		case TARGET_TYPE_QCA5018:
1362 		case TARGET_TYPE_QCN6122:
1363 			*tgt_svc_map_to_use =
1364 				target_service_to_ce_map_qca5018;
1365 			*sz_tgt_svc_map_to_use =
1366 				sizeof(target_service_to_ce_map_qca5018);
1367 			break;
1368 		}
1369 	}
1370 	hif_state->tgt_svc_map = *tgt_svc_map_to_use;
1371 	hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use /
1372 					sizeof(struct service_to_pipe);
1373 }
1374 
1375 /**
1376  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
1377  * @ce_state : pointer to the state context of the CE
1378  *
1379  * Description:
1380  *   Sets htt_rx_data attribute of the state structure if the
1381  *   CE serves one of the HTT DATA services.
1382  *
1383  * Return:
1384  *  false (attribute set to false)
1385  *  true  (attribute set to true);
1386  */
1387 static bool ce_mark_datapath(struct CE_state *ce_state)
1388 {
1389 	struct service_to_pipe *svc_map;
1390 	uint32_t map_sz, map_len;
1391 	int    i;
1392 	bool   rc = false;
1393 
1394 	if (ce_state) {
1395 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
1396 					       &map_sz);
1397 
1398 		map_len = map_sz / sizeof(struct service_to_pipe);
1399 		for (i = 0; i < map_len; i++) {
1400 			if ((svc_map[i].pipenum == ce_state->id) &&
1401 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
1402 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
1403 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
1404 				/* HTT CEs are unidirectional */
1405 				if (svc_map[i].pipedir == PIPEDIR_IN)
1406 					ce_state->htt_rx_data = true;
1407 				else
1408 					ce_state->htt_tx_data = true;
1409 				rc = true;
1410 			}
1411 		}
1412 	}
1413 	return rc;
1414 }
1415 
1416 /**
1417  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
1418  * @ce_id: ce in question
1419  * @ring: ring state being examined
1420  * @type: "src_ring" or "dest_ring" string for identifying the ring
1421  *
1422  * Warns on non-zero index values.
1423  * Causes a kernel panic if the ring is not empty durring initialization.
1424  */
1425 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
1426 					 char *type)
1427 {
1428 	if (ring->write_index != 0 || ring->sw_index != 0)
1429 		hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d",
1430 			  ce_id, type, ring->sw_index, ring->write_index);
1431 	if (ring->write_index != ring->sw_index)
1432 		QDF_BUG(0);
1433 }
1434 
1435 #ifdef IPA_OFFLOAD
1436 /**
1437  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
1438  * @scn: softc instance
1439  * @ce_id: ce in question
1440  * @base_addr: pointer to copyengine ring base address
1441  * @ce_ring: copyengine instance
1442  * @nentries: number of entries should be allocated
1443  * @desc_size: ce desc size
1444  *
1445  * Return: QDF_STATUS_SUCCESS - for success
1446  */
1447 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1448 				     qdf_dma_addr_t *base_addr,
1449 				     struct CE_ring_state *ce_ring,
1450 				     unsigned int nentries, uint32_t desc_size)
1451 {
1452 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1453 	    !ce_srng_based(scn)) {
1454 		if (!scn->ipa_ce_ring) {
1455 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1456 				scn->qdf_dev,
1457 				nentries * desc_size + CE_DESC_RING_ALIGN);
1458 			if (!scn->ipa_ce_ring) {
1459 				hif_err(
1460 				"Failed to allocate memory for IPA ce ring");
1461 				return QDF_STATUS_E_NOMEM;
1462 			}
1463 		}
1464 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1465 						&scn->ipa_ce_ring->mem_info);
1466 		ce_ring->base_addr_owner_space_unaligned =
1467 						scn->ipa_ce_ring->vaddr;
1468 	} else {
1469 		ce_ring->base_addr_owner_space_unaligned =
1470 			hif_mem_alloc_consistent_unaligned
1471 					(scn,
1472 					 (nentries * desc_size +
1473 					  CE_DESC_RING_ALIGN),
1474 					 base_addr,
1475 					 ce_ring->hal_ring_type,
1476 					 &ce_ring->is_ring_prealloc);
1477 
1478 		if (!ce_ring->base_addr_owner_space_unaligned) {
1479 			hif_err("Failed to allocate DMA memory for ce ring id: %u",
1480 			       CE_id);
1481 			return QDF_STATUS_E_NOMEM;
1482 		}
1483 	}
1484 	return QDF_STATUS_SUCCESS;
1485 }
1486 
1487 /**
1488  * ce_free_desc_ring() - Frees copyengine descriptor ring
1489  * @scn: softc instance
1490  * @ce_id: ce in question
1491  * @ce_ring: copyengine instance
1492  * @desc_size: ce desc size
1493  *
1494  * Return: None
1495  */
1496 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1497 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1498 {
1499 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1500 	    !ce_srng_based(scn)) {
1501 		if (scn->ipa_ce_ring) {
1502 			qdf_mem_shared_mem_free(scn->qdf_dev,
1503 						scn->ipa_ce_ring);
1504 			scn->ipa_ce_ring = NULL;
1505 		}
1506 		ce_ring->base_addr_owner_space_unaligned = NULL;
1507 	} else {
1508 		hif_mem_free_consistent_unaligned
1509 			(scn,
1510 			 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1511 			 ce_ring->base_addr_owner_space_unaligned,
1512 			 ce_ring->base_addr_CE_space, 0,
1513 			 ce_ring->is_ring_prealloc);
1514 		ce_ring->base_addr_owner_space_unaligned = NULL;
1515 	}
1516 }
1517 #else
1518 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1519 				     qdf_dma_addr_t *base_addr,
1520 				     struct CE_ring_state *ce_ring,
1521 				     unsigned int nentries, uint32_t desc_size)
1522 {
1523 	ce_ring->base_addr_owner_space_unaligned =
1524 			hif_mem_alloc_consistent_unaligned
1525 					(scn,
1526 					 (nentries * desc_size +
1527 					  CE_DESC_RING_ALIGN),
1528 					 base_addr,
1529 					 ce_ring->hal_ring_type,
1530 					 &ce_ring->is_ring_prealloc);
1531 
1532 	if (!ce_ring->base_addr_owner_space_unaligned) {
1533 		hif_err("Failed to allocate DMA memory for ce ring id: %u",
1534 		       CE_id);
1535 		return QDF_STATUS_E_NOMEM;
1536 	}
1537 	return QDF_STATUS_SUCCESS;
1538 }
1539 
1540 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1541 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1542 {
1543 	hif_mem_free_consistent_unaligned
1544 		(scn,
1545 		 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1546 		 ce_ring->base_addr_owner_space_unaligned,
1547 		 ce_ring->base_addr_CE_space, 0,
1548 		 ce_ring->is_ring_prealloc);
1549 	ce_ring->base_addr_owner_space_unaligned = NULL;
1550 }
1551 #endif /* IPA_OFFLOAD */
1552 
1553 /*
1554  * TODO: Need to explore the possibility of having this as part of a
1555  * target context instead of a global array.
1556  */
1557 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1558 
1559 void ce_service_register_module(enum ce_target_type target_type,
1560 				struct ce_ops* (*ce_attach)(void))
1561 {
1562 	if (target_type < CE_MAX_TARGET_TYPE)
1563 		ce_attach_register[target_type] = ce_attach;
1564 }
1565 
1566 qdf_export_symbol(ce_service_register_module);
1567 
1568 /**
1569  * ce_srng_based() - Does this target use srng
1570  * @ce_state : pointer to the state context of the CE
1571  *
1572  * Description:
1573  *   returns true if the target is SRNG based
1574  *
1575  * Return:
1576  *  false (attribute set to false)
1577  *  true  (attribute set to true);
1578  */
1579 bool ce_srng_based(struct hif_softc *scn)
1580 {
1581 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1582 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1583 
1584 	switch (tgt_info->target_type) {
1585 	case TARGET_TYPE_QCA8074:
1586 	case TARGET_TYPE_QCA8074V2:
1587 	case TARGET_TYPE_QCA6290:
1588 	case TARGET_TYPE_QCA6390:
1589 	case TARGET_TYPE_QCA6490:
1590 	case TARGET_TYPE_QCA6750:
1591 	case TARGET_TYPE_QCA6018:
1592 	case TARGET_TYPE_QCN9000:
1593 	case TARGET_TYPE_QCN6122:
1594 	case TARGET_TYPE_QCA5018:
1595 	case TARGET_TYPE_WCN7850:
1596 	case TARGET_TYPE_QCN9224:
1597 		return true;
1598 	default:
1599 		return false;
1600 	}
1601 	return false;
1602 }
1603 qdf_export_symbol(ce_srng_based);
1604 
1605 #ifdef QCA_WIFI_SUPPORT_SRNG
1606 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1607 {
1608 	struct ce_ops *ops = NULL;
1609 
1610 	if (ce_srng_based(scn)) {
1611 		if (ce_attach_register[CE_SVC_SRNG])
1612 			ops = ce_attach_register[CE_SVC_SRNG]();
1613 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1614 		ops = ce_attach_register[CE_SVC_LEGACY]();
1615 	}
1616 
1617 	return ops;
1618 }
1619 
1620 
1621 #else	/* QCA_LITHIUM */
1622 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1623 {
1624 	if (ce_attach_register[CE_SVC_LEGACY])
1625 		return ce_attach_register[CE_SVC_LEGACY]();
1626 
1627 	return NULL;
1628 }
1629 #endif /* QCA_LITHIUM */
1630 
1631 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1632 		struct pld_shadow_reg_v2_cfg **shadow_config,
1633 		int *num_shadow_registers_configured) {
1634 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1635 
1636 	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1637 			scn, shadow_config, num_shadow_registers_configured);
1638 
1639 	return;
1640 }
1641 
1642 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1643 						uint8_t ring_type)
1644 {
1645 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1646 
1647 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1648 }
1649 
1650 #ifdef QCA_WIFI_SUPPORT_SRNG
1651 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1652 {
1653 	switch (ce_ring_type) {
1654 	case CE_RING_SRC:
1655 		return CE_SRC;
1656 	case CE_RING_DEST:
1657 		return CE_DST;
1658 	case CE_RING_STATUS:
1659 		return CE_DST_STATUS;
1660 	default:
1661 		return -EINVAL;
1662 	}
1663 }
1664 #else
1665 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1666 {
1667 	return 0;
1668 }
1669 #endif
1670 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1671 		uint8_t ring_type, uint32_t nentries)
1672 {
1673 	uint32_t ce_nbytes;
1674 	char *ptr;
1675 	qdf_dma_addr_t base_addr;
1676 	struct CE_ring_state *ce_ring;
1677 	uint32_t desc_size;
1678 	struct hif_softc *scn = CE_state->scn;
1679 
1680 	ce_nbytes = sizeof(struct CE_ring_state)
1681 		+ (nentries * sizeof(void *));
1682 	ptr = qdf_mem_malloc(ce_nbytes);
1683 	if (!ptr)
1684 		return NULL;
1685 
1686 	ce_ring = (struct CE_ring_state *)ptr;
1687 	ptr += sizeof(struct CE_ring_state);
1688 	ce_ring->nentries = nentries;
1689 	ce_ring->nentries_mask = nentries - 1;
1690 
1691 	ce_ring->low_water_mark_nentries = 0;
1692 	ce_ring->high_water_mark_nentries = nentries;
1693 	ce_ring->per_transfer_context = (void **)ptr;
1694 	ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type);
1695 
1696 	desc_size = ce_get_desc_size(scn, ring_type);
1697 
1698 	/* Legacy platforms that do not support cache
1699 	 * coherent DMA are unsupported
1700 	 */
1701 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1702 			       ce_ring, nentries,
1703 			       desc_size) !=
1704 	    QDF_STATUS_SUCCESS) {
1705 		hif_err("ring has no DMA mem");
1706 		qdf_mem_free(ce_ring);
1707 		return NULL;
1708 	}
1709 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1710 
1711 	/* Correctly initialize memory to 0 to
1712 	 * prevent garbage data crashing system
1713 	 * when download firmware
1714 	 */
1715 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1716 			nentries * desc_size +
1717 			CE_DESC_RING_ALIGN);
1718 
1719 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1720 
1721 		ce_ring->base_addr_CE_space =
1722 			(ce_ring->base_addr_CE_space_unaligned +
1723 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1724 
1725 		ce_ring->base_addr_owner_space = (void *)
1726 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1727 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1728 	} else {
1729 		ce_ring->base_addr_CE_space =
1730 				ce_ring->base_addr_CE_space_unaligned;
1731 		ce_ring->base_addr_owner_space =
1732 				ce_ring->base_addr_owner_space_unaligned;
1733 	}
1734 
1735 	return ce_ring;
1736 }
1737 
1738 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1739 			uint32_t ce_id, struct CE_ring_state *ring,
1740 			struct CE_attr *attr)
1741 {
1742 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1743 
1744 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1745 					      ring, attr);
1746 }
1747 
1748 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1749 {
1750 	uint8_t ul_pipe, dl_pipe;
1751 	int ce_id, status, ul_is_polled, dl_is_polled;
1752 	struct CE_state *ce_state;
1753 
1754 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1755 					 &ul_pipe, &dl_pipe,
1756 					 &ul_is_polled, &dl_is_polled);
1757 	if (status) {
1758 		hif_err("pipe_mapping failure");
1759 		return status;
1760 	}
1761 
1762 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1763 		if (ce_id == ul_pipe)
1764 			continue;
1765 		if (ce_id == dl_pipe)
1766 			continue;
1767 
1768 		ce_state = scn->ce_id_to_state[ce_id];
1769 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1770 		if (ce_state->state == CE_RUNNING)
1771 			ce_state->state = CE_PAUSED;
1772 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1773 	}
1774 
1775 	return status;
1776 }
1777 
1778 int hif_ce_bus_late_resume(struct hif_softc *scn)
1779 {
1780 	int ce_id;
1781 	struct CE_state *ce_state;
1782 	int write_index = 0;
1783 	bool index_updated;
1784 
1785 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1786 		ce_state = scn->ce_id_to_state[ce_id];
1787 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1788 		if (ce_state->state == CE_PENDING) {
1789 			write_index = ce_state->src_ring->write_index;
1790 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1791 					write_index);
1792 			ce_state->state = CE_RUNNING;
1793 			index_updated = true;
1794 		} else {
1795 			index_updated = false;
1796 		}
1797 
1798 		if (ce_state->state == CE_PAUSED)
1799 			ce_state->state = CE_RUNNING;
1800 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1801 
1802 		if (index_updated)
1803 			hif_record_ce_desc_event(scn, ce_id,
1804 				RESUME_WRITE_INDEX_UPDATE,
1805 				NULL, NULL, write_index, 0);
1806 	}
1807 
1808 	return 0;
1809 }
1810 
1811 /**
1812  * ce_oom_recovery() - try to recover rx ce from oom condition
1813  * @context: CE_state of the CE with oom rx ring
1814  *
1815  * the executing work Will continue to be rescheduled until
1816  * at least 1 descriptor is successfully posted to the rx ring.
1817  *
1818  * return: none
1819  */
1820 static void ce_oom_recovery(void *context)
1821 {
1822 	struct CE_state *ce_state = context;
1823 	struct hif_softc *scn = ce_state->scn;
1824 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1825 	struct HIF_CE_pipe_info *pipe_info =
1826 		&ce_softc->pipe_info[ce_state->id];
1827 
1828 	hif_post_recv_buffers_for_pipe(pipe_info);
1829 }
1830 
1831 #ifdef HIF_CE_DEBUG_DATA_BUF
1832 /**
1833  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1834  * the CE descriptors.
1835  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1836  * @scn: hif scn handle
1837  * ce_id: Copy Engine Id
1838  *
1839  * Return: QDF_STATUS
1840  */
1841 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1842 {
1843 	struct hif_ce_desc_event *event = NULL;
1844 	struct hif_ce_desc_event *hist_ev = NULL;
1845 	uint32_t index = 0;
1846 
1847 	hist_ev =
1848 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1849 
1850 	if (!hist_ev)
1851 		return QDF_STATUS_E_NOMEM;
1852 
1853 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1854 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1855 		event = &hist_ev[index];
1856 		event->data =
1857 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1858 		if (!event->data) {
1859 			hif_err_rl("ce debug data alloc failed");
1860 			return QDF_STATUS_E_NOMEM;
1861 		}
1862 	}
1863 	return QDF_STATUS_SUCCESS;
1864 }
1865 
1866 /**
1867  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1868  * the CE descriptors.
1869  * @scn: hif scn handle
1870  * ce_id: Copy Engine Id
1871  *
1872  * Return:
1873  */
1874 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1875 {
1876 	struct hif_ce_desc_event *event = NULL;
1877 	struct hif_ce_desc_event *hist_ev = NULL;
1878 	uint32_t index = 0;
1879 
1880 	hist_ev =
1881 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1882 
1883 	if (!hist_ev)
1884 		return;
1885 
1886 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1887 		event = &hist_ev[index];
1888 		if (event->data)
1889 			qdf_mem_free(event->data);
1890 		event->data = NULL;
1891 		event = NULL;
1892 	}
1893 
1894 }
1895 #endif /* HIF_CE_DEBUG_DATA_BUF */
1896 
1897 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
1898 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1899 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1900 
1901 /**
1902  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1903  * @scn: hif scn handle
1904  * @ce_id: Copy Engine Id
1905  * @src_nentries: source ce ring entries
1906  * Return: QDF_STATUS
1907  */
1908 static QDF_STATUS
1909 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1910 			   uint32_t src_nentries)
1911 {
1912 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1913 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1914 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1915 	ce_hist->enable[ce_id] = 1;
1916 
1917 	if (src_nentries) {
1918 		status = alloc_mem_ce_debug_hist_data(scn, ce_id);
1919 		if (status != QDF_STATUS_SUCCESS)
1920 			return status;
1921 	} else {
1922 		ce_hist->data_enable[ce_id] = false;
1923 	}
1924 
1925 	return QDF_STATUS_SUCCESS;
1926 }
1927 
1928 /**
1929  * free_mem_ce_debug_history() - Free CE descriptor history
1930  * @scn: hif scn handle
1931  * @ce_id: Copy Engine Id
1932  *
1933  * Return: None
1934  */
1935 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1936 {
1937 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1938 
1939 	ce_hist->enable[ce_id] = 0;
1940 	if (ce_hist->data_enable[ce_id]) {
1941 		ce_hist->data_enable[ce_id] = false;
1942 		free_mem_ce_debug_hist_data(scn, ce_id);
1943 	}
1944 	ce_hist->hist_ev[ce_id] = NULL;
1945 }
1946 #else
1947 static inline QDF_STATUS
1948 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1949 			   uint32_t src_nentries)
1950 {
1951 	return QDF_STATUS_SUCCESS;
1952 }
1953 
1954 static inline void
1955 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1956 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
1957 #else
1958 #if defined(HIF_CE_DEBUG_DATA_BUF)
1959 
1960 static QDF_STATUS
1961 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1962 			   uint32_t src_nentries)
1963 {
1964 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1965 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1966 
1967 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
1968 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1969 		return QDF_STATUS_E_NOMEM;
1970 	} else {
1971 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1972 		return QDF_STATUS_SUCCESS;
1973 	}
1974 }
1975 
1976 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1977 {
1978 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1979 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1980 
1981 	if (!hist_ev)
1982 		return;
1983 
1984 	if (ce_hist->data_enable[CE_id]) {
1985 		ce_hist->data_enable[CE_id] = false;
1986 		free_mem_ce_debug_hist_data(scn, CE_id);
1987 	}
1988 
1989 	ce_hist->enable[CE_id] = 0;
1990 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1991 	ce_hist->hist_ev[CE_id] = NULL;
1992 }
1993 
1994 #else
1995 
1996 static inline QDF_STATUS
1997 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1998 			   uint32_t src_nentries)
1999 {
2000 	return QDF_STATUS_SUCCESS;
2001 }
2002 
2003 static inline void
2004 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
2005 #endif /* HIF_CE_DEBUG_DATA_BUF */
2006 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
2007 
2008 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2009 /**
2010  * reset_ce_debug_history() - reset the index and ce id used for dumping the
2011  * CE records on the console using sysfs.
2012  * @scn: hif scn handle
2013  *
2014  * Return:
2015  */
2016 static inline void reset_ce_debug_history(struct hif_softc *scn)
2017 {
2018 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
2019 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2020 	 * index. Disable data storing
2021 	 */
2022 	ce_hist->hist_index = 0;
2023 	ce_hist->hist_id = 0;
2024 }
2025 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2026 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
2027 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
2028 
2029 void ce_enable_polling(void *cestate)
2030 {
2031 	struct CE_state *CE_state = (struct CE_state *)cestate;
2032 
2033 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2034 		CE_state->timer_inited = true;
2035 }
2036 
2037 void ce_disable_polling(void *cestate)
2038 {
2039 	struct CE_state *CE_state = (struct CE_state *)cestate;
2040 
2041 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
2042 		CE_state->timer_inited = false;
2043 }
2044 
2045 /*
2046  * Initialize a Copy Engine based on caller-supplied attributes.
2047  * This may be called once to initialize both source and destination
2048  * rings or it may be called twice for separate source and destination
2049  * initialization. It may be that only one side or the other is
2050  * initialized by software/firmware.
2051  *
2052  * This should be called durring the initialization sequence before
2053  * interupts are enabled, so we don't have to worry about thread safety.
2054  */
2055 struct CE_handle *ce_init(struct hif_softc *scn,
2056 			  unsigned int CE_id, struct CE_attr *attr)
2057 {
2058 	struct CE_state *CE_state;
2059 	uint32_t ctrl_addr;
2060 	unsigned int nentries;
2061 	bool malloc_CE_state = false;
2062 	bool malloc_src_ring = false;
2063 	int status;
2064 	QDF_STATUS mem_status = QDF_STATUS_SUCCESS;
2065 
2066 	QDF_ASSERT(CE_id < scn->ce_count);
2067 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
2068 	CE_state = scn->ce_id_to_state[CE_id];
2069 
2070 	if (!CE_state) {
2071 		CE_state =
2072 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
2073 		if (!CE_state)
2074 			return NULL;
2075 
2076 		malloc_CE_state = true;
2077 		qdf_spinlock_create(&CE_state->ce_index_lock);
2078 
2079 		CE_state->id = CE_id;
2080 		CE_state->ctrl_addr = ctrl_addr;
2081 		CE_state->state = CE_RUNNING;
2082 		CE_state->attr_flags = attr->flags;
2083 	}
2084 	CE_state->scn = scn;
2085 	CE_state->service = ce_engine_service_reg;
2086 
2087 	qdf_atomic_init(&CE_state->rx_pending);
2088 	if (!attr) {
2089 		/* Already initialized; caller wants the handle */
2090 		return (struct CE_handle *)CE_state;
2091 	}
2092 
2093 	if (CE_state->src_sz_max)
2094 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
2095 	else
2096 		CE_state->src_sz_max = attr->src_sz_max;
2097 
2098 	ce_init_ce_desc_event_log(scn, CE_id,
2099 				  attr->src_nentries + attr->dest_nentries);
2100 
2101 	/* source ring setup */
2102 	nentries = attr->src_nentries;
2103 	if (nentries) {
2104 		struct CE_ring_state *src_ring;
2105 
2106 		nentries = roundup_pwr2(nentries);
2107 		if (CE_state->src_ring) {
2108 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
2109 		} else {
2110 			src_ring = CE_state->src_ring =
2111 				ce_alloc_ring_state(CE_state,
2112 						CE_RING_SRC,
2113 						nentries);
2114 			if (!src_ring) {
2115 				/* cannot allocate src ring. If the
2116 				 * CE_state is allocated locally free
2117 				 * CE_State and return error.
2118 				 */
2119 				hif_err("src ring has no mem");
2120 				if (malloc_CE_state) {
2121 					/* allocated CE_state locally */
2122 					qdf_mem_free(CE_state);
2123 					malloc_CE_state = false;
2124 				}
2125 				return NULL;
2126 			}
2127 			/* we can allocate src ring. Mark that the src ring is
2128 			 * allocated locally
2129 			 */
2130 			malloc_src_ring = true;
2131 
2132 			/*
2133 			 * Also allocate a shadow src ring in
2134 			 * regular mem to use for faster access.
2135 			 */
2136 			src_ring->shadow_base_unaligned =
2137 				qdf_mem_malloc(nentries *
2138 					       sizeof(struct CE_src_desc) +
2139 					       CE_DESC_RING_ALIGN);
2140 			if (!src_ring->shadow_base_unaligned)
2141 				goto error_no_dma_mem;
2142 
2143 			src_ring->shadow_base = (struct CE_src_desc *)
2144 				(((size_t) src_ring->shadow_base_unaligned +
2145 				CE_DESC_RING_ALIGN - 1) &
2146 				 ~(CE_DESC_RING_ALIGN - 1));
2147 
2148 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
2149 					       src_ring, attr);
2150 			if (status < 0)
2151 				goto error_target_access;
2152 
2153 			ce_ring_test_initial_indexes(CE_id, src_ring,
2154 						     "src_ring");
2155 		}
2156 	}
2157 
2158 	/* destination ring setup */
2159 	nentries = attr->dest_nentries;
2160 	if (nentries) {
2161 		struct CE_ring_state *dest_ring;
2162 
2163 		nentries = roundup_pwr2(nentries);
2164 		if (CE_state->dest_ring) {
2165 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
2166 		} else {
2167 			dest_ring = CE_state->dest_ring =
2168 				ce_alloc_ring_state(CE_state,
2169 						CE_RING_DEST,
2170 						nentries);
2171 			if (!dest_ring) {
2172 				/* cannot allocate dst ring. If the CE_state
2173 				 * or src ring is allocated locally free
2174 				 * CE_State and src ring and return error.
2175 				 */
2176 				hif_err("dest ring has no mem");
2177 				goto error_no_dma_mem;
2178 			}
2179 
2180 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
2181 				      dest_ring, attr);
2182 			if (status < 0)
2183 				goto error_target_access;
2184 
2185 			ce_ring_test_initial_indexes(CE_id, dest_ring,
2186 						     "dest_ring");
2187 
2188 			/* For srng based target, init status ring here */
2189 			if (ce_srng_based(CE_state->scn)) {
2190 				CE_state->status_ring =
2191 					ce_alloc_ring_state(CE_state,
2192 							CE_RING_STATUS,
2193 							nentries);
2194 				if (!CE_state->status_ring) {
2195 					/*Allocation failed. Cleanup*/
2196 					qdf_mem_free(CE_state->dest_ring);
2197 					if (malloc_src_ring) {
2198 						qdf_mem_free
2199 							(CE_state->src_ring);
2200 						CE_state->src_ring = NULL;
2201 						malloc_src_ring = false;
2202 					}
2203 					if (malloc_CE_state) {
2204 						/* allocated CE_state locally */
2205 						scn->ce_id_to_state[CE_id] =
2206 							NULL;
2207 						qdf_mem_free(CE_state);
2208 						malloc_CE_state = false;
2209 					}
2210 
2211 					return NULL;
2212 				}
2213 
2214 				status = ce_ring_setup(scn, CE_RING_STATUS,
2215 					       CE_id, CE_state->status_ring,
2216 					       attr);
2217 				if (status < 0)
2218 					goto error_target_access;
2219 
2220 			}
2221 
2222 			/* epping */
2223 			/* poll timer */
2224 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
2225 				qdf_timer_init(scn->qdf_dev,
2226 						&CE_state->poll_timer,
2227 						ce_poll_timeout,
2228 						CE_state,
2229 						QDF_TIMER_TYPE_WAKE_APPS);
2230 				ce_enable_polling(CE_state);
2231 				qdf_timer_mod(&CE_state->poll_timer,
2232 						      CE_POLL_TIMEOUT);
2233 			}
2234 		}
2235 	}
2236 
2237 	if (!ce_srng_based(scn)) {
2238 		/* Enable CE error interrupts */
2239 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2240 			goto error_target_access;
2241 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
2242 		if (Q_TARGET_ACCESS_END(scn) < 0)
2243 			goto error_target_access;
2244 	}
2245 
2246 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
2247 			ce_oom_recovery, CE_state);
2248 
2249 	/* update the htt_data attribute */
2250 	ce_mark_datapath(CE_state);
2251 	scn->ce_id_to_state[CE_id] = CE_state;
2252 
2253 	mem_status = alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
2254 	if (mem_status != QDF_STATUS_SUCCESS)
2255 		goto error_target_access;
2256 
2257 	return (struct CE_handle *)CE_state;
2258 
2259 error_target_access:
2260 error_no_dma_mem:
2261 	ce_fini((struct CE_handle *)CE_state);
2262 	return NULL;
2263 }
2264 
2265 /**
2266  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
2267  * @hif_ctx: HIF Context
2268  *
2269  * API to check if polling is enabled on all CEs. Returns true when polling
2270  * is enabled on all CEs.
2271  *
2272  * Return: bool
2273  */
2274 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
2275 {
2276 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2277 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2278 	struct CE_attr *attr;
2279 	int id;
2280 
2281 	for (id = 0; id < scn->ce_count; id++) {
2282 		attr = &hif_state->host_ce_config[id];
2283 		if (attr && (attr->dest_nentries) &&
2284 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
2285 			return false;
2286 	}
2287 	return true;
2288 }
2289 qdf_export_symbol(hif_is_polled_mode_enabled);
2290 
2291 static int hif_get_pktlog_ce_num(struct hif_softc *scn)
2292 {
2293 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2294 	int id;
2295 
2296 	for (id = 0; id < hif_state->sz_tgt_svc_map; id++) {
2297 		if (hif_state->tgt_svc_map[id].service_id ==  PACKET_LOG_SVC)
2298 			return hif_state->tgt_svc_map[id].pipenum;
2299 	}
2300 	return -EINVAL;
2301 }
2302 
2303 #ifdef WLAN_FEATURE_FASTPATH
2304 /**
2305  * hif_enable_fastpath() Update that we have enabled fastpath mode
2306  * @hif_ctx: HIF context
2307  *
2308  * For use in data path
2309  *
2310  * Retrun: void
2311  */
2312 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
2313 {
2314 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2315 
2316 	if (ce_srng_based(scn)) {
2317 		hif_warn("srng rings do not support fastpath");
2318 		return;
2319 	}
2320 	hif_debug("Enabling fastpath mode");
2321 	scn->fastpath_mode_on = true;
2322 }
2323 
2324 /**
2325  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
2326  * @hif_ctx: HIF Context
2327  *
2328  * For use in data path to skip HTC
2329  *
2330  * Return: bool
2331  */
2332 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
2333 {
2334 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2335 
2336 	return scn->fastpath_mode_on;
2337 }
2338 
2339 /**
2340  * hif_get_ce_handle - API to get CE handle for FastPath mode
2341  * @hif_ctx: HIF Context
2342  * @id: CopyEngine Id
2343  *
2344  * API to return CE handle for fastpath mode
2345  *
2346  * Return: void
2347  */
2348 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
2349 {
2350 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2351 
2352 	return scn->ce_id_to_state[id];
2353 }
2354 qdf_export_symbol(hif_get_ce_handle);
2355 
2356 /**
2357  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
2358  * No processing is required inside this function.
2359  * @ce_hdl: Cope engine handle
2360  * Using an assert, this function makes sure that,
2361  * the TX CE has been processed completely.
2362  *
2363  * This is called while dismantling CE structures. No other thread
2364  * should be using these structures while dismantling is occurring
2365  * therfore no locking is needed.
2366  *
2367  * Return: none
2368  */
2369 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
2370 {
2371 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2372 	struct CE_ring_state *src_ring = ce_state->src_ring;
2373 	struct hif_softc *sc = ce_state->scn;
2374 	uint32_t sw_index, write_index;
2375 
2376 	if (hif_is_nss_wifi_enabled(sc))
2377 		return;
2378 
2379 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
2380 		hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE");
2381 		sw_index = src_ring->sw_index;
2382 		write_index = src_ring->sw_index;
2383 
2384 		/* At this point Tx CE should be clean */
2385 		qdf_assert_always(sw_index == write_index);
2386 	}
2387 }
2388 
2389 /**
2390  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
2391  * @ce_hdl: Handle to CE
2392  *
2393  * These buffers are never allocated on the fly, but
2394  * are allocated only once during HIF start and freed
2395  * only once during HIF stop.
2396  * NOTE:
2397  * The assumption here is there is no in-flight DMA in progress
2398  * currently, so that buffers can be freed up safely.
2399  *
2400  * Return: NONE
2401  */
2402 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
2403 {
2404 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
2405 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
2406 	qdf_nbuf_t nbuf;
2407 	int i;
2408 
2409 	if (ce_state->scn->fastpath_mode_on == false)
2410 		return;
2411 
2412 	if (!ce_state->htt_rx_data)
2413 		return;
2414 
2415 	/*
2416 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
2417 	 * this CE is completely full: does not leave one blank space, to
2418 	 * distinguish between empty queue & full queue. So free all the
2419 	 * entries.
2420 	 */
2421 	for (i = 0; i < dst_ring->nentries; i++) {
2422 		nbuf = dst_ring->per_transfer_context[i];
2423 
2424 		/*
2425 		 * The reasons for doing this check are:
2426 		 * 1) Protect against calling cleanup before allocating buffers
2427 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
2428 		 *    could have a partially filled ring, because of a memory
2429 		 *    allocation failure in the middle of allocating ring.
2430 		 *    This check accounts for that case, checking
2431 		 *    fastpath_mode_on flag or started flag would not have
2432 		 *    covered that case. This is not in performance path,
2433 		 *    so OK to do this.
2434 		 */
2435 		if (nbuf) {
2436 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
2437 					      QDF_DMA_FROM_DEVICE);
2438 			qdf_nbuf_free(nbuf);
2439 		}
2440 	}
2441 }
2442 
2443 /**
2444  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
2445  * @scn: HIF handle
2446  *
2447  * Datapath Rx CEs are special case, where we reuse all the message buffers.
2448  * Hence we have to post all the entries in the pipe, even, in the beginning
2449  * unlike for other CE pipes where one less than dest_nentries are filled in
2450  * the beginning.
2451  *
2452  * Return: None
2453  */
2454 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2455 {
2456 	int pipe_num;
2457 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2458 
2459 	if (scn->fastpath_mode_on == false)
2460 		return;
2461 
2462 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2463 		struct HIF_CE_pipe_info *pipe_info =
2464 			&hif_state->pipe_info[pipe_num];
2465 		struct CE_state *ce_state =
2466 			scn->ce_id_to_state[pipe_info->pipe_num];
2467 
2468 		if (ce_state->htt_rx_data)
2469 			atomic_inc(&pipe_info->recv_bufs_needed);
2470 	}
2471 }
2472 #else
2473 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2474 {
2475 }
2476 
2477 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
2478 {
2479 	return false;
2480 }
2481 #endif /* WLAN_FEATURE_FASTPATH */
2482 
2483 void ce_fini(struct CE_handle *copyeng)
2484 {
2485 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2486 	unsigned int CE_id = CE_state->id;
2487 	struct hif_softc *scn = CE_state->scn;
2488 	uint32_t desc_size;
2489 
2490 	bool inited = CE_state->timer_inited;
2491 	CE_state->state = CE_UNUSED;
2492 	scn->ce_id_to_state[CE_id] = NULL;
2493 	/* Set the flag to false first to stop processing in ce_poll_timeout */
2494 	ce_disable_polling(CE_state);
2495 
2496 	qdf_lro_deinit(CE_state->lro_data);
2497 
2498 	if (CE_state->src_ring) {
2499 		/* Cleanup the datapath Tx ring */
2500 		ce_h2t_tx_ce_cleanup(copyeng);
2501 
2502 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2503 		if (CE_state->src_ring->shadow_base_unaligned)
2504 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2505 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2506 			ce_free_desc_ring(scn, CE_state->id,
2507 					  CE_state->src_ring,
2508 					  desc_size);
2509 		qdf_mem_free(CE_state->src_ring);
2510 	}
2511 	if (CE_state->dest_ring) {
2512 		/* Cleanup the datapath Rx ring */
2513 		ce_t2h_msg_ce_cleanup(copyeng);
2514 
2515 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
2516 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
2517 			ce_free_desc_ring(scn, CE_state->id,
2518 					  CE_state->dest_ring,
2519 					  desc_size);
2520 		qdf_mem_free(CE_state->dest_ring);
2521 
2522 		/* epping */
2523 		if (inited) {
2524 			qdf_timer_free(&CE_state->poll_timer);
2525 		}
2526 	}
2527 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
2528 		/* Cleanup the datapath Tx ring */
2529 		ce_h2t_tx_ce_cleanup(copyeng);
2530 
2531 		if (CE_state->status_ring->shadow_base_unaligned)
2532 			qdf_mem_free(
2533 				CE_state->status_ring->shadow_base_unaligned);
2534 
2535 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
2536 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
2537 			ce_free_desc_ring(scn, CE_state->id,
2538 					  CE_state->status_ring,
2539 					  desc_size);
2540 		qdf_mem_free(CE_state->status_ring);
2541 	}
2542 
2543 	free_mem_ce_debug_history(scn, CE_id);
2544 	reset_ce_debug_history(scn);
2545 	ce_deinit_ce_desc_event_log(scn, CE_id);
2546 
2547 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2548 	qdf_mem_free(CE_state);
2549 }
2550 
2551 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2552 {
2553 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2554 
2555 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2556 		  sizeof(hif_state->msg_callbacks_pending));
2557 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2558 		  sizeof(hif_state->msg_callbacks_current));
2559 }
2560 
2561 /* Send the first nbytes bytes of the buffer */
2562 QDF_STATUS
2563 hif_send_head(struct hif_opaque_softc *hif_ctx,
2564 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2565 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2566 {
2567 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2568 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2569 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2570 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2571 	int bytes = nbytes, nfrags = 0;
2572 	struct ce_sendlist sendlist;
2573 	int i = 0;
2574 	QDF_STATUS status;
2575 	unsigned int mux_id = 0;
2576 
2577 	if (nbytes > qdf_nbuf_len(nbuf)) {
2578 		hif_err("nbytes: %d nbuf_len: %d", nbytes,
2579 		       (uint32_t)qdf_nbuf_len(nbuf));
2580 		QDF_ASSERT(0);
2581 	}
2582 
2583 	transfer_id =
2584 		(mux_id & MUX_ID_MASK) |
2585 		(transfer_id & TRANSACTION_ID_MASK);
2586 	data_attr &= DESC_DATA_FLAG_MASK;
2587 	/*
2588 	 * The common case involves sending multiple fragments within a
2589 	 * single download (the tx descriptor and the tx frame header).
2590 	 * So, optimize for the case of multiple fragments by not even
2591 	 * checking whether it's necessary to use a sendlist.
2592 	 * The overhead of using a sendlist for a single buffer download
2593 	 * is not a big deal, since it happens rarely (for WMI messages).
2594 	 */
2595 	ce_sendlist_init(&sendlist);
2596 	do {
2597 		qdf_dma_addr_t frag_paddr;
2598 		int frag_bytes;
2599 
2600 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2601 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2602 		/*
2603 		 * Clear the packet offset for all but the first CE desc.
2604 		 */
2605 		if (i++ > 0)
2606 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2607 
2608 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2609 				    frag_bytes >
2610 				    bytes ? bytes : frag_bytes,
2611 				    qdf_nbuf_get_frag_is_wordstream
2612 				    (nbuf,
2613 				    nfrags) ? 0 :
2614 				    CE_SEND_FLAG_SWAP_DISABLE,
2615 				    data_attr);
2616 		if (status != QDF_STATUS_SUCCESS) {
2617 			hif_err("frag_num: %d larger than limit (status=%d)",
2618 			       nfrags, status);
2619 			return status;
2620 		}
2621 		bytes -= frag_bytes;
2622 		nfrags++;
2623 	} while (bytes > 0);
2624 
2625 	/* Make sure we have resources to handle this request */
2626 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2627 	if (pipe_info->num_sends_allowed < nfrags) {
2628 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2629 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2630 		return QDF_STATUS_E_RESOURCES;
2631 	}
2632 	pipe_info->num_sends_allowed -= nfrags;
2633 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2634 
2635 	if (qdf_unlikely(!ce_hdl)) {
2636 		hif_err("CE handle is null");
2637 		return A_ERROR;
2638 	}
2639 
2640 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2641 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2642 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2643 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2644 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2645 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2646 
2647 	return status;
2648 }
2649 
2650 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2651 								int force)
2652 {
2653 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2654 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2655 
2656 	if (!force) {
2657 		int resources;
2658 		/*
2659 		 * Decide whether to actually poll for completions, or just
2660 		 * wait for a later chance. If there seem to be plenty of
2661 		 * resources left, then just wait, since checking involves
2662 		 * reading a CE register, which is a relatively expensive
2663 		 * operation.
2664 		 */
2665 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2666 		/*
2667 		 * If at least 50% of the total resources are still available,
2668 		 * don't bother checking again yet.
2669 		 */
2670 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2671 									 1))
2672 			return;
2673 	}
2674 #ifdef ATH_11AC_TXCOMPACT
2675 	ce_per_engine_servicereap(scn, pipe);
2676 #else
2677 	ce_per_engine_service(scn, pipe);
2678 #endif
2679 }
2680 
2681 uint16_t
2682 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2683 {
2684 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2685 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2686 	uint16_t rv;
2687 
2688 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2689 	rv = pipe_info->num_sends_allowed;
2690 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2691 	return rv;
2692 }
2693 
2694 /* Called by lower (CE) layer when a send to Target completes. */
2695 static void
2696 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2697 		     void *transfer_context, qdf_dma_addr_t CE_data,
2698 		     unsigned int nbytes, unsigned int transfer_id,
2699 		     unsigned int sw_index, unsigned int hw_index,
2700 		     unsigned int toeplitz_hash_result)
2701 {
2702 	struct HIF_CE_pipe_info *pipe_info =
2703 		(struct HIF_CE_pipe_info *)ce_context;
2704 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2705 	struct hif_msg_callbacks *msg_callbacks =
2706 		&pipe_info->pipe_callbacks;
2707 
2708 	do {
2709 		/*
2710 		 * The upper layer callback will be triggered
2711 		 * when last fragment is complteted.
2712 		 */
2713 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2714 			msg_callbacks->txCompletionHandler(
2715 				msg_callbacks->Context,
2716 				transfer_context, transfer_id,
2717 				toeplitz_hash_result);
2718 
2719 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2720 		pipe_info->num_sends_allowed++;
2721 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2722 	} while (ce_completed_send_next(copyeng,
2723 			&ce_context, &transfer_context,
2724 			&CE_data, &nbytes, &transfer_id,
2725 			&sw_idx, &hw_idx,
2726 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2727 }
2728 
2729 /**
2730  * hif_ce_do_recv(): send message from copy engine to upper layers
2731  * @msg_callbacks: structure containing callback and callback context
2732  * @netbuff: skb containing message
2733  * @nbytes: number of bytes in the message
2734  * @pipe_info: used for the pipe_number info
2735  *
2736  * Checks the packet length, configures the length in the netbuff,
2737  * and calls the upper layer callback.
2738  *
2739  * return: None
2740  */
2741 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2742 		qdf_nbuf_t netbuf, int nbytes,
2743 		struct HIF_CE_pipe_info *pipe_info) {
2744 	if (nbytes <= pipe_info->buf_sz) {
2745 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2746 		msg_callbacks->
2747 			rxCompletionHandler(msg_callbacks->Context,
2748 					netbuf, pipe_info->pipe_num);
2749 	} else {
2750 		hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
2751 		qdf_nbuf_free(netbuf);
2752 	}
2753 }
2754 
2755 /* Called by lower (CE) layer when data is received from the Target. */
2756 static void
2757 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2758 		     void *transfer_context, qdf_dma_addr_t CE_data,
2759 		     unsigned int nbytes, unsigned int transfer_id,
2760 		     unsigned int flags)
2761 {
2762 	struct HIF_CE_pipe_info *pipe_info =
2763 		(struct HIF_CE_pipe_info *)ce_context;
2764 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2765 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2766 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2767 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2768 	struct hif_msg_callbacks *msg_callbacks =
2769 		 &pipe_info->pipe_callbacks;
2770 
2771 	do {
2772 		hif_pm_runtime_mark_last_busy(hif_ctx);
2773 		qdf_nbuf_unmap_single(scn->qdf_dev,
2774 				      (qdf_nbuf_t) transfer_context,
2775 				      QDF_DMA_FROM_DEVICE);
2776 
2777 		atomic_inc(&pipe_info->recv_bufs_needed);
2778 		hif_post_recv_buffers_for_pipe(pipe_info);
2779 		if (scn->target_status == TARGET_STATUS_RESET)
2780 			qdf_nbuf_free(transfer_context);
2781 		else
2782 			hif_ce_do_recv(msg_callbacks, transfer_context,
2783 				nbytes, pipe_info);
2784 
2785 		/* Set up force_break flag if num of receices reaches
2786 		 * MAX_NUM_OF_RECEIVES
2787 		 */
2788 		ce_state->receive_count++;
2789 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2790 			ce_state->force_break = 1;
2791 			break;
2792 		}
2793 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2794 					&CE_data, &nbytes, &transfer_id,
2795 					&flags) == QDF_STATUS_SUCCESS);
2796 
2797 }
2798 
2799 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2800 
2801 void
2802 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2803 	      struct hif_msg_callbacks *callbacks)
2804 {
2805 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2806 
2807 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2808 	spin_lock_init(&pcie_access_log_lock);
2809 #endif
2810 	/* Save callbacks for later installation */
2811 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2812 		 sizeof(hif_state->msg_callbacks_pending));
2813 
2814 }
2815 
2816 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state,
2817 						 int pipe_num)
2818 {
2819 	struct CE_attr attr;
2820 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2821 	struct hif_msg_callbacks *hif_msg_callbacks =
2822 		&hif_state->msg_callbacks_current;
2823 	struct HIF_CE_pipe_info *pipe_info;
2824 	struct CE_state *ce_state;
2825 
2826 	if (pipe_num >= CE_COUNT_MAX)
2827 		return -EINVAL;
2828 
2829 	pipe_info = &hif_state->pipe_info[pipe_num];
2830 	ce_state = scn->ce_id_to_state[pipe_num];
2831 
2832 	if (!hif_msg_callbacks ||
2833 	    !hif_msg_callbacks->rxCompletionHandler ||
2834 	    !hif_msg_callbacks->txCompletionHandler) {
2835 		hif_err("%s: no completion handler registered", __func__);
2836 		return -EFAULT;
2837 	}
2838 
2839 	attr = hif_state->host_ce_config[pipe_num];
2840 	if (attr.src_nentries) {
2841 		/* pipe used to send to target */
2842 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
2843 			  __func__, pipe_num, pipe_info);
2844 		ce_send_cb_register(pipe_info->ce_hdl,
2845 				    hif_pci_ce_send_done, pipe_info,
2846 				    attr.flags & CE_ATTR_DISABLE_INTR);
2847 		pipe_info->num_sends_allowed = attr.src_nentries - 1;
2848 	}
2849 	if (attr.dest_nentries) {
2850 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
2851 			  __func__, pipe_num, pipe_info);
2852 		/* pipe used to receive from target */
2853 		ce_recv_cb_register(pipe_info->ce_hdl,
2854 				    hif_pci_ce_recv_data, pipe_info,
2855 				    attr.flags & CE_ATTR_DISABLE_INTR);
2856 	}
2857 
2858 	if (attr.src_nentries)
2859 		qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2860 
2861 	if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND))
2862 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2863 			     sizeof(pipe_info->pipe_callbacks));
2864 
2865 	return 0;
2866 }
2867 
2868 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2869 {
2870 	struct CE_handle *ce_diag = hif_state->ce_diag;
2871 	int pipe_num, ret;
2872 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2873 
2874 	/* daemonize("hif_compl_thread"); */
2875 
2876 	if (scn->ce_count == 0) {
2877 		hif_err("ce_count is 0");
2878 		return -EINVAL;
2879 	}
2880 
2881 
2882 	A_TARGET_ACCESS_LIKELY(scn);
2883 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2884 		struct HIF_CE_pipe_info *pipe_info;
2885 
2886 		pipe_info = &hif_state->pipe_info[pipe_num];
2887 		if (pipe_info->ce_hdl == ce_diag)
2888 			continue;       /* Handle Diagnostic CE specially */
2889 
2890 		ret = hif_completion_thread_startup_by_ceid(hif_state,
2891 							    pipe_num);
2892 		if (ret < 0)
2893 			return ret;
2894 
2895 	}
2896 
2897 	A_TARGET_ACCESS_UNLIKELY(scn);
2898 	return 0;
2899 }
2900 
2901 /*
2902  * Install pending msg callbacks.
2903  *
2904  * TBDXXX: This hack is needed because upper layers install msg callbacks
2905  * for use with HTC before BMI is done; yet this HIF implementation
2906  * needs to continue to use BMI msg callbacks. Really, upper layers
2907  * should not register HTC callbacks until AFTER BMI phase.
2908  */
2909 static void hif_msg_callbacks_install(struct hif_softc *scn)
2910 {
2911 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2912 
2913 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2914 		 &hif_state->msg_callbacks_pending,
2915 		 sizeof(hif_state->msg_callbacks_pending));
2916 }
2917 
2918 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2919 							uint8_t *DLPipe)
2920 {
2921 	int ul_is_polled, dl_is_polled;
2922 
2923 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2924 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2925 }
2926 
2927 /**
2928  * hif_dump_pipe_debug_count() - Log error count
2929  * @scn: hif_softc pointer.
2930  *
2931  * Output the pipe error counts of each pipe to log file
2932  *
2933  * Return: N/A
2934  */
2935 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2936 {
2937 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2938 	int pipe_num;
2939 
2940 	if (!hif_state) {
2941 		hif_err("hif_state is NULL");
2942 		return;
2943 	}
2944 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2945 		struct HIF_CE_pipe_info *pipe_info;
2946 
2947 	pipe_info = &hif_state->pipe_info[pipe_num];
2948 
2949 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2950 			pipe_info->nbuf_dma_err_count > 0 ||
2951 			pipe_info->nbuf_ce_enqueue_err_count)
2952 		hif_err(
2953 			"pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2954 			pipe_info->pipe_num,
2955 			atomic_read(&pipe_info->recv_bufs_needed),
2956 			pipe_info->nbuf_alloc_err_count,
2957 			pipe_info->nbuf_dma_err_count,
2958 			pipe_info->nbuf_ce_enqueue_err_count);
2959 	}
2960 }
2961 
2962 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2963 					  void *nbuf, uint32_t *error_cnt,
2964 					  enum hif_ce_event_type failure_type,
2965 					  const char *failure_type_string)
2966 {
2967 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2968 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2969 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2970 	int ce_id = CE_state->id;
2971 	uint32_t error_cnt_tmp;
2972 
2973 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2974 	error_cnt_tmp = ++(*error_cnt);
2975 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2976 	hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s",
2977 		  pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2978 		  failure_type_string);
2979 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2980 				 NULL, nbuf, bufs_needed_tmp, 0);
2981 	/* if we fail to allocate the last buffer for an rx pipe,
2982 	 *	there is no trigger to refill the ce and we will
2983 	 *	eventually crash
2984 	 */
2985 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
2986 	    (ce_srng_based(scn) &&
2987 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
2988 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2989 
2990 }
2991 
2992 
2993 
2994 
2995 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2996 {
2997 	struct CE_handle *ce_hdl;
2998 	qdf_size_t buf_sz;
2999 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
3000 	QDF_STATUS status;
3001 	uint32_t bufs_posted = 0;
3002 	unsigned int ce_id;
3003 
3004 	buf_sz = pipe_info->buf_sz;
3005 	if (buf_sz == 0) {
3006 		/* Unused Copy Engine */
3007 		return QDF_STATUS_SUCCESS;
3008 	}
3009 
3010 	ce_hdl = pipe_info->ce_hdl;
3011 	ce_id = ((struct CE_state *)ce_hdl)->id;
3012 
3013 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3014 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
3015 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
3016 		qdf_nbuf_t nbuf;
3017 
3018 		atomic_dec(&pipe_info->recv_bufs_needed);
3019 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3020 
3021 		hif_record_ce_desc_event(scn, ce_id,
3022 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
3023 					 0, 0);
3024 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
3025 		if (!nbuf) {
3026 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3027 					&pipe_info->nbuf_alloc_err_count,
3028 					 HIF_RX_NBUF_ALLOC_FAILURE,
3029 					"HIF_RX_NBUF_ALLOC_FAILURE");
3030 			return QDF_STATUS_E_NOMEM;
3031 		}
3032 
3033 		hif_record_ce_desc_event(scn, ce_id,
3034 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
3035 					 0, 0);
3036 		/*
3037 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
3038 		 * CE_data = dma_map_single(dev, data, buf_sz, );
3039 		 * DMA_FROM_DEVICE);
3040 		 */
3041 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
3042 					    QDF_DMA_FROM_DEVICE);
3043 
3044 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3045 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3046 					&pipe_info->nbuf_dma_err_count,
3047 					 HIF_RX_NBUF_MAP_FAILURE,
3048 					"HIF_RX_NBUF_MAP_FAILURE");
3049 			qdf_nbuf_free(nbuf);
3050 			return status;
3051 		}
3052 
3053 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
3054 		hif_record_ce_desc_event(scn, ce_id,
3055 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
3056 					 0, 0);
3057 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
3058 					       buf_sz, DMA_FROM_DEVICE);
3059 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
3060 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
3061 			hif_post_recv_buffers_failure(pipe_info, nbuf,
3062 					&pipe_info->nbuf_ce_enqueue_err_count,
3063 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
3064 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
3065 
3066 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
3067 						QDF_DMA_FROM_DEVICE);
3068 			qdf_nbuf_free(nbuf);
3069 			return status;
3070 		}
3071 
3072 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
3073 		bufs_posted++;
3074 	}
3075 	pipe_info->nbuf_alloc_err_count =
3076 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
3077 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
3078 	pipe_info->nbuf_dma_err_count =
3079 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
3080 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
3081 	pipe_info->nbuf_ce_enqueue_err_count =
3082 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
3083 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
3084 
3085 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
3086 
3087 	return QDF_STATUS_SUCCESS;
3088 }
3089 
3090 /*
3091  * Try to post all desired receive buffers for all pipes.
3092  * Returns 0 for non fastpath rx copy engine as
3093  * oom_allocation_work will be scheduled to recover any
3094  * failures, non-zero if unable to completely replenish
3095  * receive buffers for fastpath rx Copy engine.
3096  */
3097 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
3098 {
3099 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3100 	int pipe_num;
3101 	struct CE_state *ce_state = NULL;
3102 	QDF_STATUS qdf_status;
3103 
3104 	A_TARGET_ACCESS_LIKELY(scn);
3105 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3106 		struct HIF_CE_pipe_info *pipe_info;
3107 
3108 		ce_state = scn->ce_id_to_state[pipe_num];
3109 		pipe_info = &hif_state->pipe_info[pipe_num];
3110 
3111 		if (!ce_state)
3112 			continue;
3113 
3114 		/* Do not init dynamic CEs, during initial load */
3115 		if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)
3116 			continue;
3117 
3118 		if (hif_is_nss_wifi_enabled(scn) &&
3119 		    ce_state && (ce_state->htt_rx_data))
3120 			continue;
3121 
3122 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
3123 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
3124 			ce_state->htt_rx_data &&
3125 			scn->fastpath_mode_on) {
3126 			A_TARGET_ACCESS_UNLIKELY(scn);
3127 			return qdf_status;
3128 		}
3129 	}
3130 
3131 	A_TARGET_ACCESS_UNLIKELY(scn);
3132 
3133 	return QDF_STATUS_SUCCESS;
3134 }
3135 
3136 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
3137 {
3138 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3139 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3140 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
3141 
3142 	hif_update_fastpath_recv_bufs_cnt(scn);
3143 
3144 	hif_msg_callbacks_install(scn);
3145 
3146 	if (hif_completion_thread_startup(hif_state))
3147 		return QDF_STATUS_E_FAILURE;
3148 
3149 	/* enable buffer cleanup */
3150 	hif_state->started = true;
3151 
3152 	/* Post buffers once to start things off. */
3153 	qdf_status = hif_post_recv_buffers(scn);
3154 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3155 		/* cleanup is done in hif_ce_disable */
3156 		hif_err("Failed to post buffers");
3157 		return qdf_status;
3158 	}
3159 
3160 	return qdf_status;
3161 }
3162 
3163 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3164 {
3165 	struct hif_softc *scn;
3166 	struct CE_handle *ce_hdl;
3167 	uint32_t buf_sz;
3168 	struct HIF_CE_state *hif_state;
3169 	qdf_nbuf_t netbuf;
3170 	qdf_dma_addr_t CE_data;
3171 	void *per_CE_context;
3172 
3173 	buf_sz = pipe_info->buf_sz;
3174 	/* Unused Copy Engine */
3175 	if (buf_sz == 0)
3176 		return;
3177 
3178 
3179 	hif_state = pipe_info->HIF_CE_state;
3180 	if (!hif_state->started)
3181 		return;
3182 
3183 	scn = HIF_GET_SOFTC(hif_state);
3184 	ce_hdl = pipe_info->ce_hdl;
3185 
3186 	if (!scn->qdf_dev)
3187 		return;
3188 	while (ce_revoke_recv_next
3189 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
3190 			&CE_data) == QDF_STATUS_SUCCESS) {
3191 		if (netbuf) {
3192 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
3193 					      QDF_DMA_FROM_DEVICE);
3194 			qdf_nbuf_free(netbuf);
3195 		}
3196 	}
3197 }
3198 
3199 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
3200 {
3201 	struct CE_handle *ce_hdl;
3202 	struct HIF_CE_state *hif_state;
3203 	struct hif_softc *scn;
3204 	qdf_nbuf_t netbuf;
3205 	void *per_CE_context;
3206 	qdf_dma_addr_t CE_data;
3207 	unsigned int nbytes;
3208 	unsigned int id;
3209 	uint32_t buf_sz;
3210 	uint32_t toeplitz_hash_result;
3211 
3212 	buf_sz = pipe_info->buf_sz;
3213 	if (buf_sz == 0) {
3214 		/* Unused Copy Engine */
3215 		return;
3216 	}
3217 
3218 	hif_state = pipe_info->HIF_CE_state;
3219 	if (!hif_state->started) {
3220 		return;
3221 	}
3222 
3223 	scn = HIF_GET_SOFTC(hif_state);
3224 
3225 	ce_hdl = pipe_info->ce_hdl;
3226 
3227 	while (ce_cancel_send_next
3228 		       (ce_hdl, &per_CE_context,
3229 		       (void **)&netbuf, &CE_data, &nbytes,
3230 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
3231 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
3232 			/*
3233 			 * Packets enqueued by htt_h2t_ver_req_msg() and
3234 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
3235 			 * freed in htt_htc_misc_pkt_pool_free() in
3236 			 * wlantl_close(), so do not free them here again
3237 			 * by checking whether it's the endpoint
3238 			 * which they are queued in.
3239 			 */
3240 			if (id == scn->htc_htt_tx_endpoint)
3241 				return;
3242 			/* Indicate the completion to higher
3243 			 * layer to free the buffer
3244 			 */
3245 			if (pipe_info->pipe_callbacks.txCompletionHandler)
3246 				pipe_info->pipe_callbacks.
3247 				    txCompletionHandler(pipe_info->
3248 					    pipe_callbacks.Context,
3249 					    netbuf, id, toeplitz_hash_result);
3250 		}
3251 	}
3252 }
3253 
3254 /*
3255  * Cleanup residual buffers for device shutdown:
3256  *    buffers that were enqueued for receive
3257  *    buffers that were to be sent
3258  * Note: Buffers that had completed but which were
3259  * not yet processed are on a completion queue. They
3260  * are handled when the completion thread shuts down.
3261  */
3262 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
3263 {
3264 	int pipe_num;
3265 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
3266 	struct CE_state *ce_state;
3267 
3268 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3269 		struct HIF_CE_pipe_info *pipe_info;
3270 
3271 		ce_state = scn->ce_id_to_state[pipe_num];
3272 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3273 				((ce_state->htt_tx_data) ||
3274 				 (ce_state->htt_rx_data))) {
3275 			continue;
3276 		}
3277 
3278 		pipe_info = &hif_state->pipe_info[pipe_num];
3279 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
3280 		hif_send_buffer_cleanup_on_pipe(pipe_info);
3281 	}
3282 }
3283 
3284 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
3285 {
3286 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3287 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3288 
3289 	hif_buffer_cleanup(hif_state);
3290 }
3291 
3292 static void hif_destroy_oom_work(struct hif_softc *scn)
3293 {
3294 	struct CE_state *ce_state;
3295 	int ce_id;
3296 
3297 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3298 		ce_state = scn->ce_id_to_state[ce_id];
3299 		if (ce_state)
3300 			qdf_destroy_work(scn->qdf_dev,
3301 					 &ce_state->oom_allocation_work);
3302 	}
3303 }
3304 
3305 void hif_ce_stop(struct hif_softc *scn)
3306 {
3307 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3308 	int pipe_num;
3309 
3310 	/*
3311 	 * before cleaning up any memory, ensure irq &
3312 	 * bottom half contexts will not be re-entered
3313 	 */
3314 	hif_disable_isr(&scn->osc);
3315 	hif_destroy_oom_work(scn);
3316 	scn->hif_init_done = false;
3317 
3318 	/*
3319 	 * At this point, asynchronous threads are stopped,
3320 	 * The Target should not DMA nor interrupt, Host code may
3321 	 * not initiate anything more.  So we just need to clean
3322 	 * up Host-side state.
3323 	 */
3324 
3325 	if (scn->athdiag_procfs_inited) {
3326 		athdiag_procfs_remove();
3327 		scn->athdiag_procfs_inited = false;
3328 	}
3329 
3330 	hif_buffer_cleanup(hif_state);
3331 
3332 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3333 		struct HIF_CE_pipe_info *pipe_info;
3334 		struct CE_attr attr;
3335 		struct CE_handle *ce_diag = hif_state->ce_diag;
3336 
3337 		pipe_info = &hif_state->pipe_info[pipe_num];
3338 		if (pipe_info->ce_hdl) {
3339 			if (pipe_info->ce_hdl != ce_diag &&
3340 			    hif_state->started) {
3341 				attr = hif_state->host_ce_config[pipe_num];
3342 				if (attr.src_nentries)
3343 					qdf_spinlock_destroy(&pipe_info->
3344 							completion_freeq_lock);
3345 			}
3346 			ce_fini(pipe_info->ce_hdl);
3347 			pipe_info->ce_hdl = NULL;
3348 			pipe_info->buf_sz = 0;
3349 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3350 		}
3351 	}
3352 
3353 	if (hif_state->sleep_timer_init) {
3354 		qdf_timer_stop(&hif_state->sleep_timer);
3355 		qdf_timer_free(&hif_state->sleep_timer);
3356 		hif_state->sleep_timer_init = false;
3357 	}
3358 
3359 	hif_state->started = false;
3360 }
3361 
3362 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
3363 				   struct shadow_reg_cfg
3364 				   **target_shadow_reg_cfg_ret,
3365 				   uint32_t *shadow_cfg_sz_ret)
3366 {
3367 	if (target_shadow_reg_cfg_ret)
3368 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
3369 	if (shadow_cfg_sz_ret)
3370 		*shadow_cfg_sz_ret = shadow_cfg_sz;
3371 }
3372 
3373 /**
3374  * hif_get_target_ce_config() - get copy engine configuration
3375  * @target_ce_config_ret: basic copy engine configuration
3376  * @target_ce_config_sz_ret: size of the basic configuration in bytes
3377  * @target_service_to_ce_map_ret: service mapping for the copy engines
3378  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
3379  * @target_shadow_reg_cfg_ret: shadow register configuration
3380  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
3381  *
3382  * providing accessor to these values outside of this file.
3383  * currently these are stored in static pointers to const sections.
3384  * there are multiple configurations that are selected from at compile time.
3385  * Runtime selection would need to consider mode, target type and bus type.
3386  *
3387  * Return: return by parameter.
3388  */
3389 void hif_get_target_ce_config(struct hif_softc *scn,
3390 		struct CE_pipe_config **target_ce_config_ret,
3391 		uint32_t *target_ce_config_sz_ret,
3392 		struct service_to_pipe **target_service_to_ce_map_ret,
3393 		uint32_t *target_service_to_ce_map_sz_ret,
3394 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
3395 		uint32_t *shadow_cfg_sz_ret)
3396 {
3397 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3398 
3399 	*target_ce_config_ret = hif_state->target_ce_config;
3400 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
3401 
3402 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
3403 				       target_service_to_ce_map_sz_ret);
3404 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
3405 			       shadow_cfg_sz_ret);
3406 }
3407 
3408 #ifdef CONFIG_SHADOW_V2
3409 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3410 {
3411 	int i;
3412 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3413 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
3414 
3415 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
3416 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
3417 		     "%s: i %d, val %x", __func__, i,
3418 		     cfg->shadow_reg_v2_cfg[i].addr);
3419 	}
3420 }
3421 
3422 #else
3423 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
3424 {
3425 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3426 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
3427 }
3428 #endif
3429 
3430 #ifdef ADRASTEA_RRI_ON_DDR
3431 /**
3432  * hif_get_src_ring_read_index(): Called to get the SRRI
3433  *
3434  * @scn: hif_softc pointer
3435  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3436  *
3437  * This function returns the SRRI to the caller. For CEs that
3438  * dont have interrupts enabled, we look at the DDR based SRRI
3439  *
3440  * Return: SRRI
3441  */
3442 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3443 		uint32_t CE_ctrl_addr)
3444 {
3445 	struct CE_attr attr;
3446 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3447 
3448 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3449 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3450 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3451 	} else {
3452 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3453 			return A_TARGET_READ(scn,
3454 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3455 		else
3456 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3457 					CE_ctrl_addr);
3458 	}
3459 }
3460 
3461 /**
3462  * hif_get_dst_ring_read_index(): Called to get the DRRI
3463  *
3464  * @scn: hif_softc pointer
3465  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3466  *
3467  * This function returns the DRRI to the caller. For CEs that
3468  * dont have interrupts enabled, we look at the DDR based DRRI
3469  *
3470  * Return: DRRI
3471  */
3472 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3473 		uint32_t CE_ctrl_addr)
3474 {
3475 	struct CE_attr attr;
3476 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3477 
3478 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3479 
3480 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3481 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3482 	} else {
3483 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3484 			return A_TARGET_READ(scn,
3485 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3486 		else
3487 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3488 					CE_ctrl_addr);
3489 	}
3490 }
3491 
3492 /**
3493  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
3494  * @scn: hif_softc pointer
3495  *
3496  * Return: qdf status
3497  */
3498 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
3499 {
3500 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
3501 
3502 	scn->vaddr_rri_on_ddr =
3503 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3504 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
3505 		&paddr_rri_on_ddr);
3506 
3507 	if (!scn->vaddr_rri_on_ddr) {
3508 		hif_err("dmaable page alloc fail");
3509 		return QDF_STATUS_E_NOMEM;
3510 	}
3511 
3512 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3513 
3514 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
3515 
3516 	return QDF_STATUS_SUCCESS;
3517 }
3518 #endif
3519 
3520 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
3521 /**
3522  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3523  *
3524  * @scn: hif_softc pointer
3525  *
3526  * This function allocates non cached memory on ddr and sends
3527  * the physical address of this memory to the CE hardware. The
3528  * hardware updates the RRI on this particular location.
3529  *
3530  * Return: None
3531  */
3532 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3533 {
3534 	unsigned int i;
3535 	uint32_t high_paddr, low_paddr;
3536 
3537 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3538 		return;
3539 
3540 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
3541 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
3542 
3543 	hif_debug("using srri and drri from DDR");
3544 
3545 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3546 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3547 
3548 	for (i = 0; i < CE_COUNT; i++)
3549 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3550 }
3551 #else
3552 /**
3553  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3554  *
3555  * @scn: hif_softc pointer
3556  *
3557  * This is a dummy implementation for platforms that don't
3558  * support this functionality.
3559  *
3560  * Return: None
3561  */
3562 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3563 {
3564 }
3565 #endif
3566 
3567 /**
3568  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
3569  *                                    QMI command
3570  * @scn: hif context
3571  * @cfg: wlan enable config
3572  *
3573  * In case of Genoa, rri_over_ddr memory configuration is passed
3574  * to firmware through QMI configure command.
3575  */
3576 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3577 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3578 					   struct pld_wlan_enable_cfg *cfg)
3579 {
3580 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3581 		return;
3582 
3583 	cfg->rri_over_ddr_cfg_valid = true;
3584 	cfg->rri_over_ddr_cfg.base_addr_low =
3585 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3586 	cfg->rri_over_ddr_cfg.base_addr_high =
3587 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3588 }
3589 #else
3590 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3591 					   struct pld_wlan_enable_cfg *cfg)
3592 {
3593 }
3594 #endif
3595 
3596 /**
3597  * hif_wlan_enable(): call the platform driver to enable wlan
3598  * @scn: HIF Context
3599  *
3600  * This function passes the con_mode and CE configuration to
3601  * platform driver to enable wlan.
3602  *
3603  * Return: linux error code
3604  */
3605 int hif_wlan_enable(struct hif_softc *scn)
3606 {
3607 	struct pld_wlan_enable_cfg cfg;
3608 	enum pld_driver_mode mode;
3609 	uint32_t con_mode = hif_get_conparam(scn);
3610 
3611 	hif_get_target_ce_config(scn,
3612 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3613 			&cfg.num_ce_tgt_cfg,
3614 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3615 			&cfg.num_ce_svc_pipe_cfg,
3616 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3617 			&cfg.num_shadow_reg_cfg);
3618 
3619 	/* translate from structure size to array size */
3620 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3621 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3622 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3623 
3624 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3625 			      &cfg.num_shadow_reg_v2_cfg);
3626 
3627 	hif_print_hal_shadow_register_cfg(&cfg);
3628 
3629 	hif_update_rri_over_ddr_config(scn, &cfg);
3630 
3631 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3632 		mode = PLD_FTM;
3633 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3634 		mode = PLD_COLDBOOT_CALIBRATION;
3635 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3636 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3637 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3638 		mode = PLD_EPPING;
3639 	else
3640 		mode = PLD_MISSION;
3641 
3642 	if (BYPASS_QMI)
3643 		return 0;
3644 	else
3645 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3646 }
3647 
3648 #ifdef WLAN_FEATURE_EPPING
3649 
3650 #define CE_EPPING_USES_IRQ true
3651 
3652 void hif_ce_prepare_epping_config(struct hif_softc *scn,
3653 				  struct HIF_CE_state *hif_state)
3654 {
3655 	if (CE_EPPING_USES_IRQ)
3656 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3657 	else
3658 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3659 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3660 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3661 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3662 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3663 	scn->ce_count = EPPING_HOST_CE_COUNT;
3664 }
3665 #endif
3666 
3667 #ifdef QCN7605_SUPPORT
3668 static inline
3669 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3670 			       struct HIF_CE_state *hif_state)
3671 {
3672 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3673 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3674 	hif_state->target_ce_config_sz =
3675 				 sizeof(target_ce_config_wlan_qcn7605);
3676 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3677 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3678 	scn->ce_count = QCN7605_CE_COUNT;
3679 }
3680 #else
3681 static inline
3682 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3683 			       struct HIF_CE_state *hif_state)
3684 {
3685 	hif_err("QCN7605 not supported");
3686 }
3687 #endif
3688 
3689 #ifdef CE_SVC_CMN_INIT
3690 #ifdef QCA_WIFI_SUPPORT_SRNG
3691 static inline void hif_ce_service_init(void)
3692 {
3693 	ce_service_srng_init();
3694 }
3695 #else
3696 static inline void hif_ce_service_init(void)
3697 {
3698 	ce_service_legacy_init();
3699 }
3700 #endif
3701 #else
3702 static inline void hif_ce_service_init(void)
3703 {
3704 }
3705 #endif
3706 
3707 
3708 /**
3709  * hif_ce_prepare_config() - load the correct static tables.
3710  * @scn: hif context
3711  *
3712  * Epping uses different static attribute tables than mission mode.
3713  */
3714 void hif_ce_prepare_config(struct hif_softc *scn)
3715 {
3716 	uint32_t mode = hif_get_conparam(scn);
3717 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3718 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3719 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3720 	int ret;
3721 	int msi_data_count = 0;
3722 	int msi_data_start = 0;
3723 	int msi_irq_start = 0;
3724 
3725 	hif_ce_service_init();
3726 	hif_state->ce_services = ce_services_attach(scn);
3727 
3728 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3729 					  &msi_data_count, &msi_data_start,
3730 					  &msi_irq_start);
3731 
3732 	scn->ce_count = HOST_CE_COUNT;
3733 	scn->int_assignment = &ce_int_context[msi_data_count];
3734 	/* if epping is enabled we need to use the epping configuration. */
3735 	if (QDF_IS_EPPING_ENABLED(mode)) {
3736 		hif_ce_prepare_epping_config(scn, hif_state);
3737 		return;
3738 	}
3739 
3740 	switch (tgt_info->target_type) {
3741 	default:
3742 		hif_state->host_ce_config = host_ce_config_wlan;
3743 		hif_state->target_ce_config = target_ce_config_wlan;
3744 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3745 		break;
3746 	case TARGET_TYPE_QCN7605:
3747 		hif_set_ce_config_qcn7605(scn, hif_state);
3748 		break;
3749 	case TARGET_TYPE_AR900B:
3750 	case TARGET_TYPE_QCA9984:
3751 	case TARGET_TYPE_IPQ4019:
3752 	case TARGET_TYPE_QCA9888:
3753 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3754 			hif_state->host_ce_config =
3755 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3756 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3757 			hif_state->host_ce_config =
3758 				host_lowdesc_ce_cfg_wlan_ar900b;
3759 		} else {
3760 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3761 		}
3762 
3763 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3764 		hif_state->target_ce_config_sz =
3765 				sizeof(target_ce_config_wlan_ar900b);
3766 
3767 		break;
3768 
3769 	case TARGET_TYPE_AR9888:
3770 	case TARGET_TYPE_AR9888V2:
3771 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3772 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3773 		} else {
3774 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3775 		}
3776 
3777 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3778 		hif_state->target_ce_config_sz =
3779 					sizeof(target_ce_config_wlan_ar9888);
3780 
3781 		break;
3782 
3783 	case TARGET_TYPE_QCA8074:
3784 	case TARGET_TYPE_QCA8074V2:
3785 	case TARGET_TYPE_QCA6018:
3786 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3787 			hif_state->host_ce_config =
3788 					host_ce_config_wlan_qca8074_pci;
3789 			hif_state->target_ce_config =
3790 				target_ce_config_wlan_qca8074_pci;
3791 			hif_state->target_ce_config_sz =
3792 				sizeof(target_ce_config_wlan_qca8074_pci);
3793 		} else {
3794 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3795 			hif_state->target_ce_config =
3796 					target_ce_config_wlan_qca8074;
3797 			hif_state->target_ce_config_sz =
3798 				sizeof(target_ce_config_wlan_qca8074);
3799 		}
3800 		break;
3801 	case TARGET_TYPE_QCA6290:
3802 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3803 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3804 		hif_state->target_ce_config_sz =
3805 					sizeof(target_ce_config_wlan_qca6290);
3806 
3807 		scn->ce_count = QCA_6290_CE_COUNT;
3808 		break;
3809 	case TARGET_TYPE_QCN9000:
3810 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3811 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3812 		hif_state->target_ce_config_sz =
3813 					sizeof(target_ce_config_wlan_qcn9000);
3814 		scn->ce_count = QCN_9000_CE_COUNT;
3815 		scn->disable_wake_irq = 1;
3816 		break;
3817 	case TARGET_TYPE_QCN9224:
3818 		hif_set_ce_config_qcn9224(scn, hif_state);
3819 		break;
3820 	case TARGET_TYPE_QCN6122:
3821 		hif_state->host_ce_config = host_ce_config_wlan_qcn6122;
3822 		hif_state->target_ce_config = target_ce_config_wlan_qcn6122;
3823 		hif_state->target_ce_config_sz =
3824 					sizeof(target_ce_config_wlan_qcn6122);
3825 		scn->ce_count = QCN_6122_CE_COUNT;
3826 		scn->disable_wake_irq = 1;
3827 		break;
3828 	case TARGET_TYPE_QCA5018:
3829 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
3830 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
3831 		hif_state->target_ce_config_sz =
3832 					sizeof(target_ce_config_wlan_qca5018);
3833 		scn->ce_count = QCA_5018_CE_COUNT;
3834 		break;
3835 	case TARGET_TYPE_QCA6390:
3836 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3837 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3838 		hif_state->target_ce_config_sz =
3839 					sizeof(target_ce_config_wlan_qca6390);
3840 
3841 		scn->ce_count = QCA_6390_CE_COUNT;
3842 		break;
3843 	case TARGET_TYPE_QCA6490:
3844 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3845 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3846 		hif_state->target_ce_config_sz =
3847 					sizeof(target_ce_config_wlan_qca6490);
3848 
3849 		scn->ce_count = QCA_6490_CE_COUNT;
3850 		break;
3851 	case TARGET_TYPE_QCA6750:
3852 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
3853 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
3854 		hif_state->target_ce_config_sz =
3855 					sizeof(target_ce_config_wlan_qca6750);
3856 
3857 		scn->ce_count = QCA_6750_CE_COUNT;
3858 		break;
3859 	case TARGET_TYPE_WCN7850:
3860 		hif_state->host_ce_config = host_ce_config_wlan_wcn7850;
3861 		hif_state->target_ce_config = target_ce_config_wlan_wcn7850;
3862 		hif_state->target_ce_config_sz =
3863 					sizeof(target_ce_config_wlan_wcn7850);
3864 		scn->ce_count = WCN_7850_CE_COUNT;
3865 		break;
3866 	case TARGET_TYPE_ADRASTEA:
3867 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3868 			hif_state->host_ce_config =
3869 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3870 			hif_state->target_ce_config =
3871 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3872 			hif_state->target_ce_config_sz =
3873 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3874 		} else {
3875 			hif_state->host_ce_config =
3876 				host_ce_config_wlan_adrastea;
3877 			hif_state->target_ce_config =
3878 					target_ce_config_wlan_adrastea;
3879 			hif_state->target_ce_config_sz =
3880 					sizeof(target_ce_config_wlan_adrastea);
3881 		}
3882 		break;
3883 
3884 	}
3885 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3886 }
3887 
3888 /**
3889  * hif_ce_open() - do ce specific allocations
3890  * @hif_sc: pointer to hif context
3891  *
3892  * return: 0 for success or QDF_STATUS_E_NOMEM
3893  */
3894 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3895 {
3896 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3897 
3898 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3899 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3900 	return QDF_STATUS_SUCCESS;
3901 }
3902 
3903 /**
3904  * hif_ce_close() - do ce specific free
3905  * @hif_sc: pointer to hif context
3906  */
3907 void hif_ce_close(struct hif_softc *hif_sc)
3908 {
3909 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3910 
3911 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3912 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3913 }
3914 
3915 /**
3916  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3917  * @hif_sc: hif context
3918  *
3919  * uses state variables to support cleaning up when hif_config_ce fails.
3920  */
3921 void hif_unconfig_ce(struct hif_softc *hif_sc)
3922 {
3923 	int pipe_num;
3924 	struct HIF_CE_pipe_info *pipe_info;
3925 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3926 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3927 
3928 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3929 		pipe_info = &hif_state->pipe_info[pipe_num];
3930 		if (pipe_info->ce_hdl) {
3931 			ce_unregister_irq(hif_state, (1 << pipe_num));
3932 		}
3933 	}
3934 	deinit_tasklet_workers(hif_hdl);
3935 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3936 		pipe_info = &hif_state->pipe_info[pipe_num];
3937 		if (pipe_info->ce_hdl) {
3938 			ce_fini(pipe_info->ce_hdl);
3939 			pipe_info->ce_hdl = NULL;
3940 			pipe_info->buf_sz = 0;
3941 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3942 		}
3943 	}
3944 	if (hif_sc->athdiag_procfs_inited) {
3945 		athdiag_procfs_remove();
3946 		hif_sc->athdiag_procfs_inited = false;
3947 	}
3948 }
3949 
3950 #ifdef CONFIG_BYPASS_QMI
3951 #ifdef QCN7605_SUPPORT
3952 /**
3953  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3954  * @scn: pointer to HIF structure
3955  *
3956  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3957  *
3958  * Return: void
3959  */
3960 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3961 {
3962 	phys_addr_t target_pa;
3963 	struct ce_info *ce_info_ptr;
3964 	uint32_t msi_data_start;
3965 	uint32_t msi_data_count;
3966 	uint32_t msi_irq_start;
3967 	uint32_t i = 0;
3968 	int ret;
3969 
3970 	scn->vaddr_qmi_bypass =
3971 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3972 							     scn->qdf_dev->dev,
3973 							     FW_SHARED_MEM,
3974 							     &target_pa);
3975 	if (!scn->vaddr_qmi_bypass) {
3976 		hif_err("Memory allocation failed could not post target buf");
3977 		return;
3978 	}
3979 
3980 	scn->paddr_qmi_bypass = target_pa;
3981 
3982 	ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass;
3983 
3984 	if (scn->vaddr_rri_on_ddr) {
3985 		ce_info_ptr->rri_over_ddr_low_paddr  =
3986 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3987 		ce_info_ptr->rri_over_ddr_high_paddr =
3988 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3989 	}
3990 
3991 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3992 					  &msi_data_count, &msi_data_start,
3993 					  &msi_irq_start);
3994 	if (ret) {
3995 		hif_err("Failed to get CE msi config");
3996 		return;
3997 	}
3998 
3999 	for (i = 0; i < CE_COUNT_MAX; i++) {
4000 		ce_info_ptr->cfg[i].ce_id = i;
4001 		ce_info_ptr->cfg[i].msi_vector =
4002 			 (i % msi_data_count) + msi_irq_start;
4003 	}
4004 
4005 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4006 	hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass,
4007 		 &target_pa);
4008 }
4009 
4010 /**
4011  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4012  * @scn: pointer to HIF structure
4013  *
4014  *
4015  * Return: void
4016  */
4017 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4018 {
4019 	void *target_va = scn->vaddr_qmi_bypass;
4020 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4021 
4022 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4023 				FW_SHARED_MEM, target_va,
4024 				target_pa, 0);
4025 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4026 }
4027 #else
4028 /**
4029  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
4030  * @scn: pointer to HIF structure
4031  *
4032  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
4033  *
4034  * Return: void
4035  */
4036 static void hif_post_static_buf_to_target(struct hif_softc *scn)
4037 {
4038 	qdf_dma_addr_t target_pa;
4039 
4040 	scn->vaddr_qmi_bypass =
4041 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
4042 							     scn->qdf_dev->dev,
4043 							     FW_SHARED_MEM,
4044 							     &target_pa);
4045 	if (!scn->vaddr_qmi_bypass) {
4046 		hif_err("Memory allocation failed could not post target buf");
4047 		return;
4048 	}
4049 
4050 	scn->paddr_qmi_bypass = target_pa;
4051 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
4052 }
4053 
4054 /**
4055  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
4056  * @scn: pointer to HIF structure
4057  *
4058  *
4059  * Return: void
4060  */
4061 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4062 {
4063 	void *target_va = scn->vaddr_qmi_bypass;
4064 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
4065 
4066 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
4067 				FW_SHARED_MEM, target_va,
4068 				target_pa, 0);
4069 	hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
4070 }
4071 #endif
4072 
4073 #else
4074 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
4075 {
4076 }
4077 
4078 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
4079 {
4080 }
4081 #endif
4082 
4083 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
4084 				bool wait_for_it)
4085 {
4086 	/* todo */
4087 	return 0;
4088 }
4089 
4090 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num)
4091 {
4092 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4093 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4094 	struct HIF_CE_pipe_info *pipe_info;
4095 	struct CE_state *ce_state = NULL;
4096 	struct CE_attr *attr;
4097 	int rv = 0;
4098 
4099 	if (pipe_num >= CE_COUNT_MAX)
4100 		return -EINVAL;
4101 
4102 	pipe_info = &hif_state->pipe_info[pipe_num];
4103 	pipe_info->pipe_num = pipe_num;
4104 	pipe_info->HIF_CE_state = hif_state;
4105 	attr = &hif_state->host_ce_config[pipe_num];
4106 	ce_state = scn->ce_id_to_state[pipe_num];
4107 
4108 	if (ce_state) {
4109 		/* Do not reinitialize the CE if its done already */
4110 		rv = QDF_STATUS_E_BUSY;
4111 		goto err;
4112 	}
4113 
4114 	pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
4115 	ce_state = scn->ce_id_to_state[pipe_num];
4116 	if (!ce_state) {
4117 		A_TARGET_ACCESS_UNLIKELY(scn);
4118 		rv = QDF_STATUS_E_FAILURE;
4119 		goto err;
4120 	}
4121 	qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
4122 	QDF_ASSERT(pipe_info->ce_hdl);
4123 	if (!pipe_info->ce_hdl) {
4124 		rv = QDF_STATUS_E_FAILURE;
4125 		A_TARGET_ACCESS_UNLIKELY(scn);
4126 		goto err;
4127 	}
4128 
4129 	ce_state->lro_data = qdf_lro_init();
4130 
4131 	if (attr->flags & CE_ATTR_DIAG) {
4132 		/* Reserve the ultimate CE for
4133 		 * Diagnostic Window support
4134 		 */
4135 		hif_state->ce_diag = pipe_info->ce_hdl;
4136 		goto skip;
4137 	}
4138 
4139 	if (hif_is_nss_wifi_enabled(scn) && ce_state &&
4140 	    (ce_state->htt_rx_data)) {
4141 		goto skip;
4142 	}
4143 
4144 	pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max);
4145 	if (attr->dest_nentries > 0) {
4146 		atomic_set(&pipe_info->recv_bufs_needed,
4147 			   init_buffer_count(attr->dest_nentries - 1));
4148 		/*SRNG based CE has one entry less */
4149 		if (ce_srng_based(scn))
4150 			atomic_dec(&pipe_info->recv_bufs_needed);
4151 	} else {
4152 		atomic_set(&pipe_info->recv_bufs_needed, 0);
4153 	}
4154 	ce_tasklet_init(hif_state, (1 << pipe_num));
4155 	ce_register_irq(hif_state, (1 << pipe_num));
4156 
4157 	init_tasklet_worker_by_ceid(hif_hdl, pipe_num);
4158 skip:
4159 	return 0;
4160 err:
4161 	return rv;
4162 }
4163 
4164 /**
4165  * hif_config_ce() - configure copy engines
4166  * @scn: hif context
4167  *
4168  * Prepares fw, copy engine hardware and host sw according
4169  * to the attributes selected by hif_ce_prepare_config.
4170  *
4171  * also calls athdiag_procfs_init
4172  *
4173  * return: 0 for success nonzero for failure.
4174  */
4175 int hif_config_ce(struct hif_softc *scn)
4176 {
4177 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4178 	struct HIF_CE_pipe_info *pipe_info;
4179 	int pipe_num;
4180 
4181 #ifdef ADRASTEA_SHADOW_REGISTERS
4182 	int i;
4183 #endif
4184 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
4185 
4186 	scn->notice_send = true;
4187 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
4188 
4189 	hif_post_static_buf_to_target(scn);
4190 
4191 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
4192 
4193 	hif_config_rri_on_ddr(scn);
4194 
4195 	if (ce_srng_based(scn))
4196 		scn->bus_ops.hif_target_sleep_state_adjust =
4197 			&hif_srng_sleep_state_adjust;
4198 
4199 	/* Initialise the CE debug history sysfs interface inputs ce_id and
4200 	 * index. Disable data storing
4201 	 */
4202 	reset_ce_debug_history(scn);
4203 
4204 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
4205 		struct CE_attr *attr;
4206 
4207 		pipe_info = &hif_state->pipe_info[pipe_num];
4208 		attr = &hif_state->host_ce_config[pipe_num];
4209 
4210 		if (attr->flags & CE_ATTR_INIT_ON_DEMAND)
4211 			continue;
4212 
4213 		if (hif_config_ce_by_id(scn, pipe_num))
4214 			goto err;
4215 	}
4216 
4217 	if (athdiag_procfs_init(scn) != 0) {
4218 		A_TARGET_ACCESS_UNLIKELY(scn);
4219 		goto err;
4220 	}
4221 	scn->athdiag_procfs_inited = true;
4222 
4223 	hif_debug("ce_init done");
4224 	hif_debug("%s: X, ret = %d", __func__, rv);
4225 
4226 #ifdef ADRASTEA_SHADOW_REGISTERS
4227 	hif_debug("Using Shadow Registers instead of CE Registers");
4228 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
4229 		hif_debug("Shadow Register%d is mapped to address %x",
4230 			  i,
4231 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
4232 	}
4233 #endif
4234 
4235 	return rv != QDF_STATUS_SUCCESS;
4236 err:
4237 	/* Failure, so clean up */
4238 	hif_unconfig_ce(scn);
4239 	hif_info("X, ret = %d", rv);
4240 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4241 }
4242 
4243 /**
4244  * hif_config_ce_pktlog() - configure copy engines
4245  * @scn: hif context
4246  *
4247  * Prepares fw, copy engine hardware and host sw according
4248  * to the attributes selected by hif_ce_prepare_config.
4249  *
4250  * also calls athdiag_procfs_init
4251  *
4252  * return: 0 for success nonzero for failure.
4253  */
4254 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl)
4255 {
4256 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4257 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4258 	int pipe_num;
4259 	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
4260 	struct HIF_CE_pipe_info *pipe_info;
4261 
4262 	if (!scn)
4263 		goto err;
4264 
4265 	if (scn->pktlog_init)
4266 		return QDF_STATUS_SUCCESS;
4267 
4268 	pipe_num =  hif_get_pktlog_ce_num(scn);
4269 	if (pipe_num < 0) {
4270 		qdf_status = QDF_STATUS_E_FAILURE;
4271 		goto err;
4272 	}
4273 
4274 	pipe_info = &hif_state->pipe_info[pipe_num];
4275 
4276 	qdf_status = hif_config_ce_by_id(scn, pipe_num);
4277 	/* CE Already initialized. Do not try to reinitialized again */
4278 	if (qdf_status == QDF_STATUS_E_BUSY)
4279 		return QDF_STATUS_SUCCESS;
4280 
4281 	qdf_status = hif_config_irq_by_ceid(scn, pipe_num);
4282 	if (qdf_status < 0)
4283 		goto err;
4284 
4285 	qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num);
4286 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4287 		hif_err("%s:failed to start hif thread", __func__);
4288 		goto err;
4289 	}
4290 
4291 	/* Post buffers for pktlog copy engine. */
4292 	qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
4293 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
4294 		/* cleanup is done in hif_ce_disable */
4295 		hif_err("%s:failed to post buffers", __func__);
4296 		return qdf_status;
4297 	}
4298 	scn->pktlog_init = true;
4299 	return qdf_status != QDF_STATUS_SUCCESS;
4300 
4301 err:
4302 	hif_debug("%s: X, ret = %d", __func__, qdf_status);
4303 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
4304 }
4305 
4306 #ifdef IPA_OFFLOAD
4307 /**
4308  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
4309  * @scn: bus context
4310  * @ce_sr_base_paddr: copyengine source ring base physical address
4311  * @ce_sr_ring_size: copyengine source ring size
4312  * @ce_reg_paddr: copyengine register physical address
4313  *
4314  * IPA micro controller data path offload feature enabled,
4315  * HIF should release copy engine related resource information to IPA UC
4316  * IPA UC will access hardware resource with released information
4317  *
4318  * Return: None
4319  */
4320 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
4321 			     qdf_shared_mem_t **ce_sr,
4322 			     uint32_t *ce_sr_ring_size,
4323 			     qdf_dma_addr_t *ce_reg_paddr)
4324 {
4325 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4326 	struct HIF_CE_pipe_info *pipe_info =
4327 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
4328 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4329 
4330 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
4331 			    ce_reg_paddr);
4332 }
4333 #endif /* IPA_OFFLOAD */
4334 
4335 
4336 #ifdef ADRASTEA_SHADOW_REGISTERS
4337 
4338 /*
4339  * Current shadow register config
4340  *
4341  * -----------------------------------------------------------
4342  * Shadow Register      |     CE   |    src/dst write index
4343  * -----------------------------------------------------------
4344  *         0            |     0    |           src
4345  *         1     No Config - Doesn't point to anything
4346  *         2     No Config - Doesn't point to anything
4347  *         3            |     3    |           src
4348  *         4            |     4    |           src
4349  *         5            |     5    |           src
4350  *         6     No Config - Doesn't point to anything
4351  *         7            |     7    |           src
4352  *         8     No Config - Doesn't point to anything
4353  *         9     No Config - Doesn't point to anything
4354  *         10    No Config - Doesn't point to anything
4355  *         11    No Config - Doesn't point to anything
4356  * -----------------------------------------------------------
4357  *         12    No Config - Doesn't point to anything
4358  *         13           |     1    |           dst
4359  *         14           |     2    |           dst
4360  *         15    No Config - Doesn't point to anything
4361  *         16    No Config - Doesn't point to anything
4362  *         17    No Config - Doesn't point to anything
4363  *         18    No Config - Doesn't point to anything
4364  *         19           |     7    |           dst
4365  *         20           |     8    |           dst
4366  *         21    No Config - Doesn't point to anything
4367  *         22    No Config - Doesn't point to anything
4368  *         23    No Config - Doesn't point to anything
4369  * -----------------------------------------------------------
4370  *
4371  *
4372  * ToDo - Move shadow register config to following in the future
4373  * This helps free up a block of shadow registers towards the end.
4374  * Can be used for other purposes
4375  *
4376  * -----------------------------------------------------------
4377  * Shadow Register      |     CE   |    src/dst write index
4378  * -----------------------------------------------------------
4379  *      0            |     0    |           src
4380  *      1            |     3    |           src
4381  *      2            |     4    |           src
4382  *      3            |     5    |           src
4383  *      4            |     7    |           src
4384  * -----------------------------------------------------------
4385  *      5            |     1    |           dst
4386  *      6            |     2    |           dst
4387  *      7            |     7    |           dst
4388  *      8            |     8    |           dst
4389  * -----------------------------------------------------------
4390  *      9     No Config - Doesn't point to anything
4391  *      12    No Config - Doesn't point to anything
4392  *      13    No Config - Doesn't point to anything
4393  *      14    No Config - Doesn't point to anything
4394  *      15    No Config - Doesn't point to anything
4395  *      16    No Config - Doesn't point to anything
4396  *      17    No Config - Doesn't point to anything
4397  *      18    No Config - Doesn't point to anything
4398  *      19    No Config - Doesn't point to anything
4399  *      20    No Config - Doesn't point to anything
4400  *      21    No Config - Doesn't point to anything
4401  *      22    No Config - Doesn't point to anything
4402  *      23    No Config - Doesn't point to anything
4403  * -----------------------------------------------------------
4404 */
4405 #ifndef QCN7605_SUPPORT
4406 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4407 {
4408 	u32 addr = 0;
4409 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4410 
4411 	switch (ce) {
4412 	case 0:
4413 		addr = SHADOW_VALUE0;
4414 		break;
4415 	case 3:
4416 		addr = SHADOW_VALUE3;
4417 		break;
4418 	case 4:
4419 		addr = SHADOW_VALUE4;
4420 		break;
4421 	case 5:
4422 		addr = SHADOW_VALUE5;
4423 		break;
4424 	case 7:
4425 		addr = SHADOW_VALUE7;
4426 		break;
4427 	default:
4428 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4429 		QDF_ASSERT(0);
4430 	}
4431 	return addr;
4432 
4433 }
4434 
4435 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4436 {
4437 	u32 addr = 0;
4438 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4439 
4440 	switch (ce) {
4441 	case 1:
4442 		addr = SHADOW_VALUE13;
4443 		break;
4444 	case 2:
4445 		addr = SHADOW_VALUE14;
4446 		break;
4447 	case 5:
4448 		addr = SHADOW_VALUE17;
4449 		break;
4450 	case 7:
4451 		addr = SHADOW_VALUE19;
4452 		break;
4453 	case 8:
4454 		addr = SHADOW_VALUE20;
4455 		break;
4456 	case 9:
4457 		addr = SHADOW_VALUE21;
4458 		break;
4459 	case 10:
4460 		addr = SHADOW_VALUE22;
4461 		break;
4462 	case 11:
4463 		addr = SHADOW_VALUE23;
4464 		break;
4465 	default:
4466 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4467 		QDF_ASSERT(0);
4468 	}
4469 
4470 	return addr;
4471 
4472 }
4473 #else
4474 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4475 {
4476 	u32 addr = 0;
4477 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4478 
4479 	switch (ce) {
4480 	case 0:
4481 		addr = SHADOW_VALUE0;
4482 		break;
4483 	case 3:
4484 		addr = SHADOW_VALUE3;
4485 		break;
4486 	case 4:
4487 		addr = SHADOW_VALUE4;
4488 		break;
4489 	case 5:
4490 		addr = SHADOW_VALUE5;
4491 		break;
4492 	default:
4493 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4494 		QDF_ASSERT(0);
4495 	}
4496 	return addr;
4497 }
4498 
4499 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4500 {
4501 	u32 addr = 0;
4502 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4503 
4504 	switch (ce) {
4505 	case 1:
4506 		addr = SHADOW_VALUE13;
4507 		break;
4508 	case 2:
4509 		addr = SHADOW_VALUE14;
4510 		break;
4511 	case 3:
4512 		addr = SHADOW_VALUE15;
4513 		break;
4514 	case 5:
4515 		addr = SHADOW_VALUE17;
4516 		break;
4517 	case 7:
4518 		addr = SHADOW_VALUE19;
4519 		break;
4520 	case 8:
4521 		addr = SHADOW_VALUE20;
4522 		break;
4523 	case 9:
4524 		addr = SHADOW_VALUE21;
4525 		break;
4526 	case 10:
4527 		addr = SHADOW_VALUE22;
4528 		break;
4529 	case 11:
4530 		addr = SHADOW_VALUE23;
4531 		break;
4532 	default:
4533 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4534 		QDF_ASSERT(0);
4535 	}
4536 
4537 	return addr;
4538 }
4539 #endif
4540 #endif
4541 
4542 #if defined(FEATURE_LRO)
4543 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
4544 {
4545 	struct CE_state *ce_state;
4546 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4547 
4548 	ce_state = scn->ce_id_to_state[ctx_id];
4549 
4550 	return ce_state->lro_data;
4551 }
4552 #endif
4553 
4554 /**
4555  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
4556  * this service
4557  * @scn: hif_softc pointer.
4558  * @svc_id: Service ID for which the mapping is needed.
4559  * @ul_pipe: address of the container in which ul pipe is returned.
4560  * @dl_pipe: address of the container in which dl pipe is returned.
4561  * @ul_is_polled: address of the container in which a bool
4562  *			indicating if the UL CE for this service
4563  *			is polled is returned.
4564  * @dl_is_polled: address of the container in which a bool
4565  *			indicating if the DL CE for this service
4566  *			is polled is returned.
4567  *
4568  * Return: Indicates whether the service has been found in the table.
4569  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
4570  *         There will be warning logs if either leg has not been updated
4571  *         because it missed the entry in the table (but this is not an err).
4572  */
4573 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
4574 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
4575 			int *dl_is_polled)
4576 {
4577 	int status = -EINVAL;
4578 	unsigned int i;
4579 	struct service_to_pipe element;
4580 	struct service_to_pipe *tgt_svc_map_to_use;
4581 	uint32_t sz_tgt_svc_map_to_use;
4582 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4583 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4584 	bool dl_updated = false;
4585 	bool ul_updated = false;
4586 
4587 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
4588 				       &sz_tgt_svc_map_to_use);
4589 
4590 	*dl_is_polled = 0;  /* polling for received messages not supported */
4591 
4592 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
4593 
4594 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
4595 		if (element.service_id == svc_id) {
4596 			if (element.pipedir == PIPEDIR_OUT) {
4597 				*ul_pipe = element.pipenum;
4598 				*ul_is_polled =
4599 					(hif_state->host_ce_config[*ul_pipe].flags &
4600 					 CE_ATTR_DISABLE_INTR) != 0;
4601 				ul_updated = true;
4602 			} else if (element.pipedir == PIPEDIR_IN) {
4603 				*dl_pipe = element.pipenum;
4604 				dl_updated = true;
4605 			}
4606 			status = 0;
4607 		}
4608 	}
4609 	if (ul_updated == false)
4610 		hif_debug("ul pipe is NOT updated for service %d", svc_id);
4611 	if (dl_updated == false)
4612 		hif_debug("dl pipe is NOT updated for service %d", svc_id);
4613 
4614 	return status;
4615 }
4616 
4617 #ifdef SHADOW_REG_DEBUG
4618 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
4619 		uint32_t CE_ctrl_addr)
4620 {
4621 	uint32_t read_from_hw, srri_from_ddr = 0;
4622 
4623 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
4624 
4625 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4626 
4627 	if (read_from_hw != srri_from_ddr) {
4628 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4629 		       srri_from_ddr, read_from_hw,
4630 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4631 		QDF_ASSERT(0);
4632 	}
4633 	return srri_from_ddr;
4634 }
4635 
4636 
4637 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
4638 		uint32_t CE_ctrl_addr)
4639 {
4640 	uint32_t read_from_hw, drri_from_ddr = 0;
4641 
4642 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
4643 
4644 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4645 
4646 	if (read_from_hw != drri_from_ddr) {
4647 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4648 		       drri_from_ddr, read_from_hw,
4649 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4650 		QDF_ASSERT(0);
4651 	}
4652 	return drri_from_ddr;
4653 }
4654 
4655 #endif
4656 
4657 /**
4658  * hif_dump_ce_registers() - dump ce registers
4659  * @scn: hif_opaque_softc pointer.
4660  *
4661  * Output the copy engine registers
4662  *
4663  * Return: 0 for success or error code
4664  */
4665 int hif_dump_ce_registers(struct hif_softc *scn)
4666 {
4667 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4668 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
4669 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
4670 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
4671 	uint16_t i;
4672 	QDF_STATUS status;
4673 
4674 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
4675 		if (!scn->ce_id_to_state[i]) {
4676 			hif_debug("CE%d not used", i);
4677 			continue;
4678 		}
4679 
4680 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
4681 					   (uint8_t *) &ce_reg_values[0],
4682 					   ce_reg_word_size * sizeof(uint32_t));
4683 
4684 		if (status != QDF_STATUS_SUCCESS) {
4685 			hif_err("Dumping CE register failed!");
4686 			return -EACCES;
4687 		}
4688 		hif_debug("CE%d=>", i);
4689 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
4690 				   (uint8_t *) &ce_reg_values[0],
4691 				   ce_reg_word_size * sizeof(uint32_t));
4692 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
4693 				+ SR_WR_INDEX_ADDRESS),
4694 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
4695 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
4696 				+ CURRENT_SRRI_ADDRESS),
4697 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
4698 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
4699 				+ DST_WR_INDEX_ADDRESS),
4700 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
4701 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
4702 				+ CURRENT_DRRI_ADDRESS),
4703 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
4704 		qdf_print("---");
4705 	}
4706 	return 0;
4707 }
4708 qdf_export_symbol(hif_dump_ce_registers);
4709 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
4710 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
4711 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
4712 {
4713 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4714 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4715 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
4716 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
4717 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4718 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
4719 	struct CE_ring_state *src_ring = ce_state->src_ring;
4720 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
4721 
4722 	if (src_ring) {
4723 		hif_info->ul_pipe.nentries = src_ring->nentries;
4724 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
4725 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
4726 		hif_info->ul_pipe.write_index = src_ring->write_index;
4727 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
4728 		hif_info->ul_pipe.base_addr_CE_space =
4729 			src_ring->base_addr_CE_space;
4730 		hif_info->ul_pipe.base_addr_owner_space =
4731 			src_ring->base_addr_owner_space;
4732 	}
4733 
4734 
4735 	if (dest_ring) {
4736 		hif_info->dl_pipe.nentries = dest_ring->nentries;
4737 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
4738 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
4739 		hif_info->dl_pipe.write_index = dest_ring->write_index;
4740 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
4741 		hif_info->dl_pipe.base_addr_CE_space =
4742 			dest_ring->base_addr_CE_space;
4743 		hif_info->dl_pipe.base_addr_owner_space =
4744 			dest_ring->base_addr_owner_space;
4745 	}
4746 
4747 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
4748 	hif_info->ctrl_addr = ce_state->ctrl_addr;
4749 
4750 	return hif_info;
4751 }
4752 qdf_export_symbol(hif_get_addl_pipe_info);
4753 
4754 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
4755 {
4756 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4757 
4758 	scn->nss_wifi_ol_mode = mode;
4759 	return 0;
4760 }
4761 qdf_export_symbol(hif_set_nss_wifiol_mode);
4762 #endif
4763 
4764 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
4765 {
4766 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4767 	scn->hif_attribute = hif_attrib;
4768 }
4769 
4770 
4771 /* disable interrupts (only applicable for legacy copy engine currently */
4772 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
4773 {
4774 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4775 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
4776 	uint32_t ctrl_addr = CE_state->ctrl_addr;
4777 
4778 	Q_TARGET_ACCESS_BEGIN(scn);
4779 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
4780 	Q_TARGET_ACCESS_END(scn);
4781 }
4782 qdf_export_symbol(hif_disable_interrupt);
4783 
4784 /**
4785  * hif_fw_event_handler() - hif fw event handler
4786  * @hif_state: pointer to hif ce state structure
4787  *
4788  * Process fw events and raise HTC callback to process fw events.
4789  *
4790  * Return: none
4791  */
4792 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4793 {
4794 	struct hif_msg_callbacks *msg_callbacks =
4795 		&hif_state->msg_callbacks_current;
4796 
4797 	if (!msg_callbacks->fwEventHandler)
4798 		return;
4799 
4800 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
4801 			QDF_STATUS_E_FAILURE);
4802 }
4803 
4804 #ifndef QCA_WIFI_3_0
4805 /**
4806  * hif_fw_interrupt_handler() - FW interrupt handler
4807  * @irq: irq number
4808  * @arg: the user pointer
4809  *
4810  * Called from the PCI interrupt handler when a
4811  * firmware-generated interrupt to the Host.
4812  *
4813  * only registered for legacy ce devices
4814  *
4815  * Return: status of handled irq
4816  */
4817 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4818 {
4819 	struct hif_softc *scn = arg;
4820 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4821 	uint32_t fw_indicator_address, fw_indicator;
4822 
4823 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4824 		return ATH_ISR_NOSCHED;
4825 
4826 	fw_indicator_address = hif_state->fw_indicator_address;
4827 	/* For sudden unplug this will return ~0 */
4828 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4829 
4830 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4831 		/* ACK: clear Target-side pending event */
4832 		A_TARGET_WRITE(scn, fw_indicator_address,
4833 			       fw_indicator & ~FW_IND_EVENT_PENDING);
4834 		if (Q_TARGET_ACCESS_END(scn) < 0)
4835 			return ATH_ISR_SCHED;
4836 
4837 		if (hif_state->started) {
4838 			hif_fw_event_handler(hif_state);
4839 		} else {
4840 			/*
4841 			 * Probable Target failure before we're prepared
4842 			 * to handle it.  Generally unexpected.
4843 			 * fw_indicator used as bitmap, and defined as below:
4844 			 *     FW_IND_EVENT_PENDING    0x1
4845 			 *     FW_IND_INITIALIZED      0x2
4846 			 *     FW_IND_NEEDRECOVER      0x4
4847 			 */
4848 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
4849 				("%s: Early firmware event indicated 0x%x\n",
4850 				 __func__, fw_indicator));
4851 		}
4852 	} else {
4853 		if (Q_TARGET_ACCESS_END(scn) < 0)
4854 			return ATH_ISR_SCHED;
4855 	}
4856 
4857 	return ATH_ISR_SCHED;
4858 }
4859 #else
4860 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4861 {
4862 	return ATH_ISR_SCHED;
4863 }
4864 #endif /* #ifdef QCA_WIFI_3_0 */
4865 
4866 
4867 /**
4868  * hif_wlan_disable(): call the platform driver to disable wlan
4869  * @scn: HIF Context
4870  *
4871  * This function passes the con_mode to platform driver to disable
4872  * wlan.
4873  *
4874  * Return: void
4875  */
4876 void hif_wlan_disable(struct hif_softc *scn)
4877 {
4878 	enum pld_driver_mode mode;
4879 	uint32_t con_mode = hif_get_conparam(scn);
4880 
4881 	if (scn->target_status == TARGET_STATUS_RESET)
4882 		return;
4883 
4884 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4885 		mode = PLD_FTM;
4886 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4887 		mode = PLD_EPPING;
4888 	else
4889 		mode = PLD_MISSION;
4890 
4891 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4892 }
4893 
4894 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4895 {
4896 	int status;
4897 	uint8_t ul_pipe, dl_pipe;
4898 	int ul_is_polled, dl_is_polled;
4899 
4900 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4901 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4902 					 HTC_CTRL_RSVD_SVC,
4903 					 &ul_pipe, &dl_pipe,
4904 					 &ul_is_polled, &dl_is_polled);
4905 	if (status) {
4906 		hif_err("Failed to map pipe: %d", status);
4907 		return status;
4908 	}
4909 
4910 	*ce_id = dl_pipe;
4911 
4912 	return 0;
4913 }
4914 
4915 int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4916 {
4917 	int status;
4918 	uint8_t ul_pipe, dl_pipe;
4919 	int ul_is_polled, dl_is_polled;
4920 
4921 	/* DL pipe for WMI_CONTROL_DIAG_SVC should map to the FW DIAG CE_ID */
4922 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4923 					 WMI_CONTROL_DIAG_SVC,
4924 					 &ul_pipe, &dl_pipe,
4925 					 &ul_is_polled, &dl_is_polled);
4926 	if (status) {
4927 		hif_err("Failed to map pipe: %d", status);
4928 		return status;
4929 	}
4930 
4931 	*ce_id = dl_pipe;
4932 
4933 	return 0;
4934 }
4935 
4936 #ifdef HIF_CE_LOG_INFO
4937 /**
4938  * ce_get_index_info(): Get CE index info
4939  * @scn: HIF Context
4940  * @ce_state: CE opaque handle
4941  * @info: CE info
4942  *
4943  * Return: 0 for success and non zero for failure
4944  */
4945 static
4946 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
4947 		      struct ce_index *info)
4948 {
4949 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4950 
4951 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
4952 }
4953 
4954 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
4955 		     unsigned int *offset)
4956 {
4957 	struct hang_event_info info = {0};
4958 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
4959 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
4960 	uint8_t curr_index = 0;
4961 	uint8_t i;
4962 	uint16_t size;
4963 
4964 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
4965 	info.active_grp_tasklet_cnt =
4966 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
4967 
4968 	for (i = 0; i < scn->ce_count; i++) {
4969 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
4970 			continue;
4971 
4972 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
4973 				      &info.ce_info[curr_index]))
4974 			continue;
4975 
4976 		curr_index++;
4977 	}
4978 
4979 	info.ce_count = curr_index;
4980 	size = sizeof(info) -
4981 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
4982 
4983 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
4984 		return;
4985 
4986 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
4987 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
4988 
4989 	qdf_mem_copy(data + *offset, &info, size);
4990 	*offset = *offset + size;
4991 }
4992 #endif
4993