xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 #include "targcfg.h"
28 #include "qdf_lock.h"
29 #include "qdf_status.h"
30 #include "qdf_status.h"
31 #include <qdf_atomic.h>         /* qdf_atomic_read */
32 #include <targaddrs.h>
33 #include "hif_io32.h"
34 #include <hif.h>
35 #include <target_type.h>
36 #include "regtable.h"
37 #define ATH_MODULE_NAME hif
38 #include <a_debug.h>
39 #include "hif_main.h"
40 #include "ce_api.h"
41 #include "qdf_trace.h"
42 #include "pld_common.h"
43 #include "hif_debug.h"
44 #include "ce_internal.h"
45 #include "ce_reg.h"
46 #include "ce_assignment.h"
47 #include "ce_tasklet.h"
48 #ifndef CONFIG_WIN
49 #include "qwlan_version.h"
50 #endif
51 #include "qdf_module.h"
52 
53 #define CE_POLL_TIMEOUT 10      /* ms */
54 
55 #define AGC_DUMP         1
56 #define CHANINFO_DUMP    2
57 #define BB_WATCHDOG_DUMP 3
58 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
59 #define PCIE_ACCESS_DUMP 4
60 #endif
61 #include "mp_dev.h"
62 
63 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
64 	!defined(QCA_WIFI_SUPPORT_SRNG)
65 #define QCA_WIFI_SUPPORT_SRNG
66 #endif
67 
68 /* Forward references */
69 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
70 
71 /*
72  * Fix EV118783, poll to check whether a BMI response comes
73  * other than waiting for the interruption which may be lost.
74  */
75 /* #define BMI_RSP_POLLING */
76 #define BMI_RSP_TO_MILLISEC  1000
77 
78 #ifdef CONFIG_BYPASS_QMI
79 #define BYPASS_QMI 1
80 #else
81 #define BYPASS_QMI 0
82 #endif
83 
84 #ifdef CONFIG_WIN
85 #if ENABLE_10_4_FW_HDR
86 #define WDI_IPA_SERVICE_GROUP 5
87 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
88 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
89 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
90 #endif /* ENABLE_10_4_FW_HDR */
91 #endif
92 
93 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
94 static void hif_config_rri_on_ddr(struct hif_softc *scn);
95 
96 /**
97  * hif_target_access_log_dump() - dump access log
98  *
99  * dump access log
100  *
101  * Return: n/a
102  */
103 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
104 static void hif_target_access_log_dump(void)
105 {
106 	hif_target_dump_access_log();
107 }
108 #endif
109 
110 
111 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
112 		      uint8_t cmd_id, bool start)
113 {
114 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
115 
116 	switch (cmd_id) {
117 	case AGC_DUMP:
118 		if (start)
119 			priv_start_agc(scn);
120 		else
121 			priv_dump_agc(scn);
122 		break;
123 	case CHANINFO_DUMP:
124 		if (start)
125 			priv_start_cap_chaninfo(scn);
126 		else
127 			priv_dump_chaninfo(scn);
128 		break;
129 	case BB_WATCHDOG_DUMP:
130 		priv_dump_bbwatchdog(scn);
131 		break;
132 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
133 	case PCIE_ACCESS_DUMP:
134 		hif_target_access_log_dump();
135 		break;
136 #endif
137 	default:
138 		HIF_ERROR("%s: Invalid htc dump command", __func__);
139 		break;
140 	}
141 }
142 
143 static void ce_poll_timeout(void *arg)
144 {
145 	struct CE_state *CE_state = (struct CE_state *)arg;
146 
147 	if (CE_state->timer_inited) {
148 		ce_per_engine_service(CE_state->scn, CE_state->id);
149 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
150 	}
151 }
152 
153 static unsigned int roundup_pwr2(unsigned int n)
154 {
155 	int i;
156 	unsigned int test_pwr2;
157 
158 	if (!(n & (n - 1)))
159 		return n; /* already a power of 2 */
160 
161 	test_pwr2 = 4;
162 	for (i = 0; i < 29; i++) {
163 		if (test_pwr2 > n)
164 			return test_pwr2;
165 		test_pwr2 = test_pwr2 << 1;
166 	}
167 
168 	QDF_ASSERT(0); /* n too large */
169 	return 0;
170 }
171 
172 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
173 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
174 
175 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
176 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
179 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
180 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
181 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
182 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
183 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
184 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
185 #ifdef QCA_WIFI_3_0_ADRASTEA
186 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
187 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
188 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
189 #endif
190 };
191 
192 #ifdef WLAN_FEATURE_EPPING
193 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
194 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
195 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
196 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
197 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
198 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
199 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
200 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
201 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
202 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
203 };
204 #endif
205 
206 /* CE_PCI TABLE */
207 /*
208  * NOTE: the table below is out of date, though still a useful reference.
209  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
210  * mapping of HTC services to HIF pipes.
211  */
212 /*
213  * This authoritative table defines Copy Engine configuration and the mapping
214  * of services/endpoints to CEs.  A subset of this information is passed to
215  * the Target during startup as a prerequisite to entering BMI phase.
216  * See:
217  *    target_service_to_ce_map - Target-side mapping
218  *    hif_map_service_to_pipe      - Host-side mapping
219  *    target_ce_config         - Target-side configuration
220  *    host_ce_config           - Host-side configuration
221    ============================================================================
222    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
223  |                      |      | ctio | Size     | Frequency
224  |                      |      | n    |          |
225    ============================================================================
226    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
227    descriptor |                      |      |      | O(100B)  | and regular
228    download   |                      |      |      |          |
229    ----------------------------------------------------------------------------
230    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
231    indication |                      |      |      | O(10B)   | regular
232    upload     |                      |      |      |          |
233    ----------------------------------------------------------------------------
234    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
235    upload     |                      |      |      | O(1000B) | (frequent
236    e.g. noise |                      |      |      |          | during IP1.0
237    packets    |                      |      |      |          | testing)
238    ----------------------------------------------------------------------------
239    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
240    download   |                      |      |      | O(1000B) | (frequent
241    e.g.       |                      |      |      |          | during IP1.0
242    misdirecte |                      |      |      |          | testing)
243    d EAPOL    |                      |      |      |          |
244    packets    |                      |      |      |          |
245    ----------------------------------------------------------------------------
246    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
247  | DATA_VO (uplink)     |      |      |          |
248    ----------------------------------------------------------------------------
249    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
250  | DATA_VO (downlink)   |      |      |          |
251    ----------------------------------------------------------------------------
252    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
253  |                      |      |      | O(100B)  |
254    ----------------------------------------------------------------------------
255    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
256    messages   | (downlink)           |      |      | O(100B)  |
257  |                      |      |      |          |
258    ----------------------------------------------------------------------------
259    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
260  | HTC_RAW_STREAMS      |      |      |          |
261  | (uplink)             |      |      |          |
262    ----------------------------------------------------------------------------
263    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
264  | HTC_RAW_STREAMS      |      |      |          |
265  | (downlink)           |      |      |          |
266    ----------------------------------------------------------------------------
267    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
268  |                      |      |      |          | infrequent
269    ============================================================================
270  */
271 
272 /*
273  * Map from service/endpoint to Copy Engine.
274  * This table is derived from the CE_PCI TABLE, above.
275  * It is passed to the Target at startup for use by firmware.
276  */
277 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
278 	{
279 		WMI_DATA_VO_SVC,
280 		PIPEDIR_OUT,    /* out = UL = host -> target */
281 		3,
282 	},
283 	{
284 		WMI_DATA_VO_SVC,
285 		PIPEDIR_IN,     /* in = DL = target -> host */
286 		2,
287 	},
288 	{
289 		WMI_DATA_BK_SVC,
290 		PIPEDIR_OUT,    /* out = UL = host -> target */
291 		3,
292 	},
293 	{
294 		WMI_DATA_BK_SVC,
295 		PIPEDIR_IN,     /* in = DL = target -> host */
296 		2,
297 	},
298 	{
299 		WMI_DATA_BE_SVC,
300 		PIPEDIR_OUT,    /* out = UL = host -> target */
301 		3,
302 	},
303 	{
304 		WMI_DATA_BE_SVC,
305 		PIPEDIR_IN,     /* in = DL = target -> host */
306 		2,
307 	},
308 	{
309 		WMI_DATA_VI_SVC,
310 		PIPEDIR_OUT,    /* out = UL = host -> target */
311 		3,
312 	},
313 	{
314 		WMI_DATA_VI_SVC,
315 		PIPEDIR_IN,     /* in = DL = target -> host */
316 		2,
317 	},
318 	{
319 		WMI_CONTROL_SVC,
320 		PIPEDIR_OUT,    /* out = UL = host -> target */
321 		3,
322 	},
323 	{
324 		WMI_CONTROL_SVC,
325 		PIPEDIR_IN,     /* in = DL = target -> host */
326 		2,
327 	},
328 	{
329 		HTC_CTRL_RSVD_SVC,
330 		PIPEDIR_OUT,    /* out = UL = host -> target */
331 		0,              /* could be moved to 3 (share with WMI) */
332 	},
333 	{
334 		HTC_CTRL_RSVD_SVC,
335 		PIPEDIR_IN,     /* in = DL = target -> host */
336 		2,
337 	},
338 	{
339 		HTC_RAW_STREAMS_SVC, /* not currently used */
340 		PIPEDIR_OUT,    /* out = UL = host -> target */
341 		0,
342 	},
343 	{
344 		HTC_RAW_STREAMS_SVC, /* not currently used */
345 		PIPEDIR_IN,     /* in = DL = target -> host */
346 		2,
347 	},
348 	{
349 		HTT_DATA_MSG_SVC,
350 		PIPEDIR_OUT,    /* out = UL = host -> target */
351 		4,
352 	},
353 	{
354 		HTT_DATA_MSG_SVC,
355 		PIPEDIR_IN,     /* in = DL = target -> host */
356 		1,
357 	},
358 	{
359 		WDI_IPA_TX_SVC,
360 		PIPEDIR_OUT,    /* in = DL = target -> host */
361 		5,
362 	},
363 #if defined(QCA_WIFI_3_0_ADRASTEA)
364 	{
365 		HTT_DATA2_MSG_SVC,
366 		PIPEDIR_IN,    /* in = DL = target -> host */
367 		9,
368 	},
369 	{
370 		HTT_DATA3_MSG_SVC,
371 		PIPEDIR_IN,    /* in = DL = target -> host */
372 		10,
373 	},
374 	{
375 		PACKET_LOG_SVC,
376 		PIPEDIR_IN,    /* in = DL = target -> host */
377 		11,
378 	},
379 #endif
380 	/* (Additions here) */
381 
382 	{                       /* Must be last */
383 		0,
384 		0,
385 		0,
386 	},
387 };
388 
389 /* PIPEDIR_OUT = HOST to Target */
390 /* PIPEDIR_IN  = TARGET to HOST */
391 #if (defined(QCA_WIFI_QCA8074))
392 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
393 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
394 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
395 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
396 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
397 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
398 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
399 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
400 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
401 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
402 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
403 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
404 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
405 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
406 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
407 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
408 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
409 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
410 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
411 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
412 	/* (Additions here) */
413 	{ 0, 0, 0, },
414 };
415 #else
416 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
417 };
418 #endif
419 
420 #if (defined(QCA_WIFI_QCA6290))
421 #ifdef CONFIG_WIN
422 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
423 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
424 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
425 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
426 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
427 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
428 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
429 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
430 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
431 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
432 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
433 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
434 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
435 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
436 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
437 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 	/* (Additions here) */
440 	{ 0, 0, 0, },
441 };
442 #else
443 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
444 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
445 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
446 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
447 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
448 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
449 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
450 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
451 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
452 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
453 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
454 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
455 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
456 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
457 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
458 	/* (Additions here) */
459 	{ 0, 0, 0, },
460 };
461 #endif
462 #else
463 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
464 };
465 #endif
466 
467 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
468 	{
469 		WMI_DATA_VO_SVC,
470 		PIPEDIR_OUT,    /* out = UL = host -> target */
471 		3,
472 	},
473 	{
474 		WMI_DATA_VO_SVC,
475 		PIPEDIR_IN,     /* in = DL = target -> host */
476 		2,
477 	},
478 	{
479 		WMI_DATA_BK_SVC,
480 		PIPEDIR_OUT,    /* out = UL = host -> target */
481 		3,
482 	},
483 	{
484 		WMI_DATA_BK_SVC,
485 		PIPEDIR_IN,     /* in = DL = target -> host */
486 		2,
487 	},
488 	{
489 		WMI_DATA_BE_SVC,
490 		PIPEDIR_OUT,    /* out = UL = host -> target */
491 		3,
492 	},
493 	{
494 		WMI_DATA_BE_SVC,
495 		PIPEDIR_IN,     /* in = DL = target -> host */
496 		2,
497 	},
498 	{
499 		WMI_DATA_VI_SVC,
500 		PIPEDIR_OUT,    /* out = UL = host -> target */
501 		3,
502 	},
503 	{
504 		WMI_DATA_VI_SVC,
505 		PIPEDIR_IN,     /* in = DL = target -> host */
506 		2,
507 	},
508 	{
509 		WMI_CONTROL_SVC,
510 		PIPEDIR_OUT,    /* out = UL = host -> target */
511 		3,
512 	},
513 	{
514 		WMI_CONTROL_SVC,
515 		PIPEDIR_IN,     /* in = DL = target -> host */
516 		2,
517 	},
518 	{
519 		HTC_CTRL_RSVD_SVC,
520 		PIPEDIR_OUT,    /* out = UL = host -> target */
521 		0,              /* could be moved to 3 (share with WMI) */
522 	},
523 	{
524 		HTC_CTRL_RSVD_SVC,
525 		PIPEDIR_IN,     /* in = DL = target -> host */
526 		1,
527 	},
528 	{
529 		HTC_RAW_STREAMS_SVC, /* not currently used */
530 		PIPEDIR_OUT,    /* out = UL = host -> target */
531 		0,
532 	},
533 	{
534 		HTC_RAW_STREAMS_SVC, /* not currently used */
535 		PIPEDIR_IN,     /* in = DL = target -> host */
536 		1,
537 	},
538 	{
539 		HTT_DATA_MSG_SVC,
540 		PIPEDIR_OUT,    /* out = UL = host -> target */
541 		4,
542 	},
543 #if WLAN_FEATURE_FASTPATH
544 	{
545 		HTT_DATA_MSG_SVC,
546 		PIPEDIR_IN,     /* in = DL = target -> host */
547 		5,
548 	},
549 #else /* WLAN_FEATURE_FASTPATH */
550 	{
551 		HTT_DATA_MSG_SVC,
552 		PIPEDIR_IN,  /* in = DL = target -> host */
553 		1,
554 	},
555 #endif /* WLAN_FEATURE_FASTPATH */
556 
557 	/* (Additions here) */
558 
559 	{                       /* Must be last */
560 		0,
561 		0,
562 		0,
563 	},
564 };
565 
566 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
567 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
568 
569 #ifdef WLAN_FEATURE_EPPING
570 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
571 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
572 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
573 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
574 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
575 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
576 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
577 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
578 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
579 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
580 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
581 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
582 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
583 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
584 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
585 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
586 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
587 	{0, 0, 0,},             /* Must be last */
588 };
589 
590 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
591 					   **tgt_svc_map_to_use,
592 					   uint32_t *sz_tgt_svc_map_to_use)
593 {
594 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
595 	*sz_tgt_svc_map_to_use =
596 			sizeof(target_service_to_ce_map_wlan_epping);
597 }
598 #endif
599 
600 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
601 				    struct service_to_pipe **tgt_svc_map_to_use,
602 				    uint32_t *sz_tgt_svc_map_to_use)
603 {
604 	uint32_t mode = hif_get_conparam(scn);
605 	struct hif_target_info *tgt_info = &scn->target_info;
606 
607 	if (QDF_IS_EPPING_ENABLED(mode)) {
608 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
609 						      sz_tgt_svc_map_to_use);
610 	} else {
611 		switch (tgt_info->target_type) {
612 		default:
613 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
614 			*sz_tgt_svc_map_to_use =
615 				sizeof(target_service_to_ce_map_wlan);
616 			break;
617 		case TARGET_TYPE_AR900B:
618 		case TARGET_TYPE_QCA9984:
619 		case TARGET_TYPE_IPQ4019:
620 		case TARGET_TYPE_QCA9888:
621 		case TARGET_TYPE_AR9888:
622 		case TARGET_TYPE_AR9888V2:
623 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
624 			*sz_tgt_svc_map_to_use =
625 				sizeof(target_service_to_ce_map_ar900b);
626 			break;
627 		case TARGET_TYPE_QCA6290:
628 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
629 			*sz_tgt_svc_map_to_use =
630 				sizeof(target_service_to_ce_map_qca6290);
631 			break;
632 		case TARGET_TYPE_QCA8074:
633 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
634 			*sz_tgt_svc_map_to_use =
635 				sizeof(target_service_to_ce_map_qca8074);
636 			break;
637 		}
638 	}
639 }
640 
641 /**
642  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
643  * @ce_state : pointer to the state context of the CE
644  *
645  * Description:
646  *   Sets htt_rx_data attribute of the state structure if the
647  *   CE serves one of the HTT DATA services.
648  *
649  * Return:
650  *  false (attribute set to false)
651  *  true  (attribute set to true);
652  */
653 static bool ce_mark_datapath(struct CE_state *ce_state)
654 {
655 	struct service_to_pipe *svc_map;
656 	uint32_t map_sz, map_len;
657 	int    i;
658 	bool   rc = false;
659 
660 	if (ce_state != NULL) {
661 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
662 					       &map_sz);
663 
664 		map_len = map_sz / sizeof(struct service_to_pipe);
665 		for (i = 0; i < map_len; i++) {
666 			if ((svc_map[i].pipenum == ce_state->id) &&
667 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
668 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
669 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
670 				/* HTT CEs are unidirectional */
671 				if (svc_map[i].pipedir == PIPEDIR_IN)
672 					ce_state->htt_rx_data = true;
673 				else
674 					ce_state->htt_tx_data = true;
675 				rc = true;
676 			}
677 		}
678 	}
679 	return rc;
680 }
681 
682 /**
683  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
684  * @ce_id: ce in question
685  * @ring: ring state being examined
686  * @type: "src_ring" or "dest_ring" string for identifying the ring
687  *
688  * Warns on non-zero index values.
689  * Causes a kernel panic if the ring is not empty durring initialization.
690  */
691 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
692 					 char *type)
693 {
694 	if (ring->write_index != 0 || ring->sw_index != 0)
695 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
696 			  ce_id, type, ring->sw_index, ring->write_index);
697 	if (ring->write_index != ring->sw_index)
698 		QDF_BUG(0);
699 }
700 
701 /**
702  * ce_srng_based() - Does this target use srng
703  * @ce_state : pointer to the state context of the CE
704  *
705  * Description:
706  *   returns true if the target is SRNG based
707  *
708  * Return:
709  *  false (attribute set to false)
710  *  true  (attribute set to true);
711  */
712 bool ce_srng_based(struct hif_softc *scn)
713 {
714 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
715 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
716 
717 	switch (tgt_info->target_type) {
718 	case TARGET_TYPE_QCA8074:
719 	case TARGET_TYPE_QCA6290:
720 		return true;
721 	default:
722 		return false;
723 	}
724 	return false;
725 }
726 qdf_export_symbol(ce_srng_based);
727 
728 #ifdef QCA_WIFI_SUPPORT_SRNG
729 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
730 {
731 	if (ce_srng_based(scn))
732 		return ce_services_srng();
733 
734 	return ce_services_legacy();
735 }
736 
737 
738 #else	/* QCA_LITHIUM */
739 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
740 {
741 	return ce_services_legacy();
742 }
743 #endif /* QCA_LITHIUM */
744 
745 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
746 		struct pld_shadow_reg_v2_cfg **shadow_config,
747 		int *num_shadow_registers_configured) {
748 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
749 
750 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
751 			scn, shadow_config, num_shadow_registers_configured);
752 }
753 
754 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
755 						uint8_t ring_type)
756 {
757 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
758 
759 	return hif_state->ce_services->ce_get_desc_size(ring_type);
760 }
761 
762 
763 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
764 		uint8_t ring_type, uint32_t nentries)
765 {
766 	uint32_t ce_nbytes;
767 	char *ptr;
768 	qdf_dma_addr_t base_addr;
769 	struct CE_ring_state *ce_ring;
770 	uint32_t desc_size;
771 	struct hif_softc *scn = CE_state->scn;
772 
773 	ce_nbytes = sizeof(struct CE_ring_state)
774 		+ (nentries * sizeof(void *));
775 	ptr = qdf_mem_malloc(ce_nbytes);
776 	if (!ptr)
777 		return NULL;
778 
779 	ce_ring = (struct CE_ring_state *)ptr;
780 	ptr += sizeof(struct CE_ring_state);
781 	ce_ring->nentries = nentries;
782 	ce_ring->nentries_mask = nentries - 1;
783 
784 	ce_ring->low_water_mark_nentries = 0;
785 	ce_ring->high_water_mark_nentries = nentries;
786 	ce_ring->per_transfer_context = (void **)ptr;
787 
788 	desc_size = ce_get_desc_size(scn, ring_type);
789 
790 	/* Legacy platforms that do not support cache
791 	 * coherent DMA are unsupported
792 	 */
793 	ce_ring->base_addr_owner_space_unaligned =
794 		qdf_mem_alloc_consistent(scn->qdf_dev,
795 				scn->qdf_dev->dev,
796 				(nentries *
797 				 desc_size +
798 				 CE_DESC_RING_ALIGN),
799 				&base_addr);
800 	if (ce_ring->base_addr_owner_space_unaligned
801 			== NULL) {
802 		HIF_ERROR("%s: ring has no DMA mem",
803 				__func__);
804 		qdf_mem_free(ptr);
805 		return NULL;
806 	}
807 	ce_ring->base_addr_CE_space_unaligned = base_addr;
808 
809 	/* Correctly initialize memory to 0 to
810 	 * prevent garbage data crashing system
811 	 * when download firmware
812 	 */
813 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
814 			nentries * desc_size +
815 			CE_DESC_RING_ALIGN);
816 
817 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
818 
819 		ce_ring->base_addr_CE_space =
820 			(ce_ring->base_addr_CE_space_unaligned +
821 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
822 
823 		ce_ring->base_addr_owner_space = (void *)
824 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
825 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
826 	} else {
827 		ce_ring->base_addr_CE_space =
828 				ce_ring->base_addr_CE_space_unaligned;
829 		ce_ring->base_addr_owner_space =
830 				ce_ring->base_addr_owner_space_unaligned;
831 	}
832 
833 	return ce_ring;
834 }
835 
836 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
837 			uint32_t ce_id, struct CE_ring_state *ring,
838 			struct CE_attr *attr)
839 {
840 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
841 
842 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
843 					      ring, attr);
844 }
845 
846 int hif_ce_bus_early_suspend(struct hif_softc *scn)
847 {
848 	uint8_t ul_pipe, dl_pipe;
849 	int ce_id, status, ul_is_polled, dl_is_polled;
850 	struct CE_state *ce_state;
851 
852 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
853 					 &ul_pipe, &dl_pipe,
854 					 &ul_is_polled, &dl_is_polled);
855 	if (status) {
856 		HIF_ERROR("%s: pipe_mapping failure", __func__);
857 		return status;
858 	}
859 
860 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
861 		if (ce_id == ul_pipe)
862 			continue;
863 		if (ce_id == dl_pipe)
864 			continue;
865 
866 		ce_state = scn->ce_id_to_state[ce_id];
867 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
868 		if (ce_state->state == CE_RUNNING)
869 			ce_state->state = CE_PAUSED;
870 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
871 	}
872 
873 	return status;
874 }
875 
876 int hif_ce_bus_late_resume(struct hif_softc *scn)
877 {
878 	int ce_id;
879 	struct CE_state *ce_state;
880 	int write_index;
881 	bool index_updated;
882 
883 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
884 		ce_state = scn->ce_id_to_state[ce_id];
885 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
886 		if (ce_state->state == CE_PENDING) {
887 			write_index = ce_state->src_ring->write_index;
888 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
889 					write_index);
890 			ce_state->state = CE_RUNNING;
891 			index_updated = true;
892 		} else {
893 			index_updated = false;
894 		}
895 
896 		if (ce_state->state == CE_PAUSED)
897 			ce_state->state = CE_RUNNING;
898 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
899 
900 		if (index_updated)
901 			hif_record_ce_desc_event(scn, ce_id,
902 				RESUME_WRITE_INDEX_UPDATE,
903 				NULL, NULL, write_index, 0);
904 	}
905 
906 	return 0;
907 }
908 
909 /**
910  * ce_oom_recovery() - try to recover rx ce from oom condition
911  * @context: CE_state of the CE with oom rx ring
912  *
913  * the executing work Will continue to be rescheduled untill
914  * at least 1 descriptor is successfully posted to the rx ring.
915  *
916  * return: none
917  */
918 static void ce_oom_recovery(void *context)
919 {
920 	struct CE_state *ce_state = context;
921 	struct hif_softc *scn = ce_state->scn;
922 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
923 	struct HIF_CE_pipe_info *pipe_info =
924 		&ce_softc->pipe_info[ce_state->id];
925 
926 	hif_post_recv_buffers_for_pipe(pipe_info);
927 }
928 
929 #if HIF_CE_DEBUG_DATA_BUF
930 /**
931  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
932  * the CE descriptors.
933  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
934  * @scn: hif scn handle
935  * ce_id: Copy Engine Id
936  *
937  * Return: QDF_STATUS
938  */
939 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
940 {
941 	struct hif_ce_desc_event *event = NULL;
942 	struct hif_ce_desc_event *hist_ev = NULL;
943 	uint32_t index = 0;
944 
945 	hist_ev =
946 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
947 
948 	if (!hist_ev)
949 		return QDF_STATUS_E_NOMEM;
950 
951 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
952 		event = &hist_ev[index];
953 		event->data =
954 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
955 		if (event->data == NULL)
956 			return QDF_STATUS_E_NOMEM;
957 	}
958 	return QDF_STATUS_SUCCESS;
959 }
960 
961 /**
962  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
963  * the CE descriptors.
964  * @scn: hif scn handle
965  * ce_id: Copy Engine Id
966  *
967  * Return:
968  */
969 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
970 {
971 	struct hif_ce_desc_event *event = NULL;
972 	struct hif_ce_desc_event *hist_ev = NULL;
973 	uint32_t index = 0;
974 
975 	hist_ev =
976 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
977 
978 	if (!hist_ev)
979 		return;
980 
981 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
982 		event = &hist_ev[index];
983 		if (event->data != NULL)
984 			qdf_mem_free(event->data);
985 		event->data = NULL;
986 		event = NULL;
987 	}
988 }
989 #endif /* HIF_CE_DEBUG_DATA_BUF */
990 
991 /*
992  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
993  * for defined here
994  */
995 #if HIF_CE_DEBUG_DATA_BUF
996 /**
997  * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
998  * @scn: hif scn handle
999  * ce_id: Copy Engine Id
1000  *
1001  * Return: QDF_STATUS
1002  */
1003 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1004 						unsigned int CE_id)
1005 {
1006 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1007 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1008 
1009 	if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1010 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1011 		return QDF_STATUS_E_NOMEM;
1012 	} else {
1013 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1014 		return QDF_STATUS_SUCCESS;
1015 	}
1016 }
1017 
1018 /**
1019  * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1020  * storing.
1021  * @scn: hif scn handle
1022  * ce_id: Copy Engine Id
1023  *
1024  * Return:
1025  */
1026 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1027 						unsigned int CE_id)
1028 {
1029 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1030 	struct hif_ce_desc_event *hist_ev =
1031 			(struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1032 
1033 	if (!hist_ev)
1034 		return;
1035 
1036 #if HIF_CE_DEBUG_DATA_BUF
1037 	if (ce_hist->data_enable[CE_id] == 1) {
1038 		ce_hist->data_enable[CE_id] = 0;
1039 		free_mem_ce_debug_hist_data(scn, CE_id);
1040 	}
1041 #endif
1042 	ce_hist->enable[CE_id] = 0;
1043 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1044 	ce_hist->hist_ev[CE_id] = NULL;
1045 }
1046 
1047 /**
1048  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1049  * CE records on the console using sysfs.
1050  * @scn: hif scn handle
1051  *
1052  * Return:
1053  */
1054 static inline void reset_ce_debug_history(struct hif_softc *scn)
1055 {
1056 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1057 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1058 	 * index. Disable data storing
1059 	 */
1060 	ce_hist->hist_index = 0;
1061 	ce_hist->hist_id = 0;
1062 }
1063 #else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1064 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1065 						unsigned int CE_id)
1066 {
1067 	return QDF_STATUS_SUCCESS;
1068 }
1069 
1070 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1071 						unsigned int CE_id)
1072 {
1073 }
1074 
1075 static inline void reset_ce_debug_history(struct hif_softc *scn)
1076 {
1077 }
1078 #endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1079 
1080 /*
1081  * Initialize a Copy Engine based on caller-supplied attributes.
1082  * This may be called once to initialize both source and destination
1083  * rings or it may be called twice for separate source and destination
1084  * initialization. It may be that only one side or the other is
1085  * initialized by software/firmware.
1086  *
1087  * This should be called durring the initialization sequence before
1088  * interupts are enabled, so we don't have to worry about thread safety.
1089  */
1090 struct CE_handle *ce_init(struct hif_softc *scn,
1091 			  unsigned int CE_id, struct CE_attr *attr)
1092 {
1093 	struct CE_state *CE_state;
1094 	uint32_t ctrl_addr;
1095 	unsigned int nentries;
1096 	bool malloc_CE_state = false;
1097 	bool malloc_src_ring = false;
1098 	int status;
1099 
1100 	QDF_ASSERT(CE_id < scn->ce_count);
1101 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1102 	CE_state = scn->ce_id_to_state[CE_id];
1103 
1104 	if (!CE_state) {
1105 		CE_state =
1106 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1107 		if (!CE_state) {
1108 			HIF_ERROR("%s: CE_state has no mem", __func__);
1109 			return NULL;
1110 		}
1111 		malloc_CE_state = true;
1112 		qdf_spinlock_create(&CE_state->ce_index_lock);
1113 
1114 		CE_state->id = CE_id;
1115 		CE_state->ctrl_addr = ctrl_addr;
1116 		CE_state->state = CE_RUNNING;
1117 		CE_state->attr_flags = attr->flags;
1118 	}
1119 	CE_state->scn = scn;
1120 
1121 	qdf_atomic_init(&CE_state->rx_pending);
1122 	if (attr == NULL) {
1123 		/* Already initialized; caller wants the handle */
1124 		return (struct CE_handle *)CE_state;
1125 	}
1126 
1127 	if (CE_state->src_sz_max)
1128 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1129 	else
1130 		CE_state->src_sz_max = attr->src_sz_max;
1131 
1132 	ce_init_ce_desc_event_log(scn, CE_id,
1133 				  attr->src_nentries + attr->dest_nentries);
1134 
1135 	/* source ring setup */
1136 	nentries = attr->src_nentries;
1137 	if (nentries) {
1138 		struct CE_ring_state *src_ring;
1139 
1140 		nentries = roundup_pwr2(nentries);
1141 		if (CE_state->src_ring) {
1142 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1143 		} else {
1144 			src_ring = CE_state->src_ring =
1145 				ce_alloc_ring_state(CE_state,
1146 						CE_RING_SRC,
1147 						nentries);
1148 			if (!src_ring) {
1149 				/* cannot allocate src ring. If the
1150 				 * CE_state is allocated locally free
1151 				 * CE_State and return error.
1152 				 */
1153 				HIF_ERROR("%s: src ring has no mem", __func__);
1154 				if (malloc_CE_state) {
1155 					/* allocated CE_state locally */
1156 					qdf_mem_free(CE_state);
1157 					malloc_CE_state = false;
1158 				}
1159 				return NULL;
1160 			}
1161 			/* we can allocate src ring. Mark that the src ring is
1162 			 * allocated locally
1163 			 */
1164 			malloc_src_ring = true;
1165 
1166 			/*
1167 			 * Also allocate a shadow src ring in
1168 			 * regular mem to use for faster access.
1169 			 */
1170 			src_ring->shadow_base_unaligned =
1171 				qdf_mem_malloc(nentries *
1172 					       sizeof(struct CE_src_desc) +
1173 					       CE_DESC_RING_ALIGN);
1174 			if (src_ring->shadow_base_unaligned == NULL) {
1175 				HIF_ERROR("%s: src ring no shadow_base mem",
1176 					  __func__);
1177 				goto error_no_dma_mem;
1178 			}
1179 			src_ring->shadow_base = (struct CE_src_desc *)
1180 				(((size_t) src_ring->shadow_base_unaligned +
1181 				CE_DESC_RING_ALIGN - 1) &
1182 				 ~(CE_DESC_RING_ALIGN - 1));
1183 
1184 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1185 					       src_ring, attr);
1186 			if (status < 0)
1187 				goto error_target_access;
1188 
1189 			ce_ring_test_initial_indexes(CE_id, src_ring,
1190 						     "src_ring");
1191 		}
1192 	}
1193 
1194 	/* destination ring setup */
1195 	nentries = attr->dest_nentries;
1196 	if (nentries) {
1197 		struct CE_ring_state *dest_ring;
1198 
1199 		nentries = roundup_pwr2(nentries);
1200 		if (CE_state->dest_ring) {
1201 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1202 		} else {
1203 			dest_ring = CE_state->dest_ring =
1204 				ce_alloc_ring_state(CE_state,
1205 						CE_RING_DEST,
1206 						nentries);
1207 			if (!dest_ring) {
1208 				/* cannot allocate dst ring. If the CE_state
1209 				 * or src ring is allocated locally free
1210 				 * CE_State and src ring and return error.
1211 				 */
1212 				HIF_ERROR("%s: dest ring has no mem",
1213 					  __func__);
1214 				goto error_no_dma_mem;
1215 			}
1216 
1217 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1218 				      dest_ring, attr);
1219 			if (status < 0)
1220 				goto error_target_access;
1221 
1222 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1223 						     "dest_ring");
1224 
1225 			/* For srng based target, init status ring here */
1226 			if (ce_srng_based(CE_state->scn)) {
1227 				CE_state->status_ring =
1228 					ce_alloc_ring_state(CE_state,
1229 							CE_RING_STATUS,
1230 							nentries);
1231 				if (CE_state->status_ring == NULL) {
1232 					/*Allocation failed. Cleanup*/
1233 					qdf_mem_free(CE_state->dest_ring);
1234 					if (malloc_src_ring) {
1235 						qdf_mem_free
1236 							(CE_state->src_ring);
1237 						CE_state->src_ring = NULL;
1238 						malloc_src_ring = false;
1239 					}
1240 					if (malloc_CE_state) {
1241 						/* allocated CE_state locally */
1242 						scn->ce_id_to_state[CE_id] =
1243 							NULL;
1244 						qdf_mem_free(CE_state);
1245 						malloc_CE_state = false;
1246 					}
1247 
1248 					return NULL;
1249 				}
1250 
1251 				status = ce_ring_setup(scn, CE_RING_STATUS,
1252 					       CE_id, CE_state->status_ring,
1253 					       attr);
1254 				if (status < 0)
1255 					goto error_target_access;
1256 
1257 			}
1258 
1259 			/* epping */
1260 			/* poll timer */
1261 			if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL) ||
1262 					scn->polled_mode_on) {
1263 				qdf_timer_init(scn->qdf_dev,
1264 						       &CE_state->poll_timer,
1265 						       ce_poll_timeout,
1266 						       CE_state,
1267 						       QDF_TIMER_TYPE_SW);
1268 				CE_state->timer_inited = true;
1269 				qdf_timer_mod(&CE_state->poll_timer,
1270 						      CE_POLL_TIMEOUT);
1271 			}
1272 		}
1273 	}
1274 
1275 	if (!ce_srng_based(scn)) {
1276 		/* Enable CE error interrupts */
1277 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1278 			goto error_target_access;
1279 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1280 		if (Q_TARGET_ACCESS_END(scn) < 0)
1281 			goto error_target_access;
1282 	}
1283 
1284 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1285 			ce_oom_recovery, CE_state);
1286 
1287 	/* update the htt_data attribute */
1288 	ce_mark_datapath(CE_state);
1289 	scn->ce_id_to_state[CE_id] = CE_state;
1290 
1291 	alloc_mem_ce_debug_history(scn, CE_id);
1292 
1293 	return (struct CE_handle *)CE_state;
1294 
1295 error_target_access:
1296 error_no_dma_mem:
1297 	ce_fini((struct CE_handle *)CE_state);
1298 	return NULL;
1299 }
1300 
1301 #ifdef WLAN_FEATURE_FASTPATH
1302 /**
1303  * hif_enable_fastpath() Update that we have enabled fastpath mode
1304  * @hif_ctx: HIF context
1305  *
1306  * For use in data path
1307  *
1308  * Retrun: void
1309  */
1310 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1311 {
1312 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1313 
1314 	if (ce_srng_based(scn)) {
1315 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1316 		return;
1317 	}
1318 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1319 	scn->fastpath_mode_on = true;
1320 }
1321 
1322 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx)
1323 {
1324 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1325 	HIF_DBG("%s, Enabling polled mode", __func__);
1326 
1327 	scn->polled_mode_on = true;
1328 }
1329 
1330 /**
1331  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1332  * @hif_ctx: HIF Context
1333  *
1334  * For use in data path to skip HTC
1335  *
1336  * Return: bool
1337  */
1338 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1339 {
1340 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1341 
1342 	return scn->fastpath_mode_on;
1343 }
1344 
1345 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1346 {
1347 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1348 
1349 	return scn->polled_mode_on;
1350 }
1351 
1352 /**
1353  * hif_get_ce_handle - API to get CE handle for FastPath mode
1354  * @hif_ctx: HIF Context
1355  * @id: CopyEngine Id
1356  *
1357  * API to return CE handle for fastpath mode
1358  *
1359  * Return: void
1360  */
1361 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1362 {
1363 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1364 
1365 	return scn->ce_id_to_state[id];
1366 }
1367 
1368 /**
1369  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1370  * No processing is required inside this function.
1371  * @ce_hdl: Cope engine handle
1372  * Using an assert, this function makes sure that,
1373  * the TX CE has been processed completely.
1374  *
1375  * This is called while dismantling CE structures. No other thread
1376  * should be using these structures while dismantling is occuring
1377  * therfore no locking is needed.
1378  *
1379  * Return: none
1380  */
1381 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1382 {
1383 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1384 	struct CE_ring_state *src_ring = ce_state->src_ring;
1385 	struct hif_softc *sc = ce_state->scn;
1386 	uint32_t sw_index, write_index;
1387 
1388 	if (hif_is_nss_wifi_enabled(sc))
1389 		return;
1390 
1391 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1392 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1393 			 __func__, __LINE__);
1394 		sw_index = src_ring->sw_index;
1395 		write_index = src_ring->sw_index;
1396 
1397 		/* At this point Tx CE should be clean */
1398 		qdf_assert_always(sw_index == write_index);
1399 	}
1400 }
1401 
1402 /**
1403  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1404  * @ce_hdl: Handle to CE
1405  *
1406  * These buffers are never allocated on the fly, but
1407  * are allocated only once during HIF start and freed
1408  * only once during HIF stop.
1409  * NOTE:
1410  * The assumption here is there is no in-flight DMA in progress
1411  * currently, so that buffers can be freed up safely.
1412  *
1413  * Return: NONE
1414  */
1415 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1416 {
1417 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1418 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1419 	qdf_nbuf_t nbuf;
1420 	int i;
1421 
1422 	if (ce_state->scn->fastpath_mode_on == false)
1423 		return;
1424 
1425 	if (!ce_state->htt_rx_data)
1426 		return;
1427 
1428 	/*
1429 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1430 	 * this CE is completely full: does not leave one blank space, to
1431 	 * distinguish between empty queue & full queue. So free all the
1432 	 * entries.
1433 	 */
1434 	for (i = 0; i < dst_ring->nentries; i++) {
1435 		nbuf = dst_ring->per_transfer_context[i];
1436 
1437 		/*
1438 		 * The reasons for doing this check are:
1439 		 * 1) Protect against calling cleanup before allocating buffers
1440 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1441 		 *    could have a partially filled ring, because of a memory
1442 		 *    allocation failure in the middle of allocating ring.
1443 		 *    This check accounts for that case, checking
1444 		 *    fastpath_mode_on flag or started flag would not have
1445 		 *    covered that case. This is not in performance path,
1446 		 *    so OK to do this.
1447 		 */
1448 		if (nbuf) {
1449 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1450 					      QDF_DMA_FROM_DEVICE);
1451 			qdf_nbuf_free(nbuf);
1452 		}
1453 	}
1454 }
1455 
1456 /**
1457  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1458  * @scn: HIF handle
1459  *
1460  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1461  * Hence we have to post all the entries in the pipe, even, in the beginning
1462  * unlike for other CE pipes where one less than dest_nentries are filled in
1463  * the beginning.
1464  *
1465  * Return: None
1466  */
1467 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1468 {
1469 	int pipe_num;
1470 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1471 
1472 	if (scn->fastpath_mode_on == false)
1473 		return;
1474 
1475 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1476 		struct HIF_CE_pipe_info *pipe_info =
1477 			&hif_state->pipe_info[pipe_num];
1478 		struct CE_state *ce_state =
1479 			scn->ce_id_to_state[pipe_info->pipe_num];
1480 
1481 		if (ce_state->htt_rx_data)
1482 			atomic_inc(&pipe_info->recv_bufs_needed);
1483 	}
1484 }
1485 #else
1486 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1487 {
1488 }
1489 
1490 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1491 {
1492 	return false;
1493 }
1494 
1495 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1496 {
1497 	return false;
1498 }
1499 #endif /* WLAN_FEATURE_FASTPATH */
1500 
1501 void ce_fini(struct CE_handle *copyeng)
1502 {
1503 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1504 	unsigned int CE_id = CE_state->id;
1505 	struct hif_softc *scn = CE_state->scn;
1506 	uint32_t desc_size;
1507 
1508 	bool inited = CE_state->timer_inited;
1509 	CE_state->state = CE_UNUSED;
1510 	scn->ce_id_to_state[CE_id] = NULL;
1511 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1512 	CE_state->timer_inited = false;
1513 	qdf_lro_deinit(CE_state->lro_data);
1514 
1515 	if (CE_state->src_ring) {
1516 		/* Cleanup the datapath Tx ring */
1517 		ce_h2t_tx_ce_cleanup(copyeng);
1518 
1519 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1520 		if (CE_state->src_ring->shadow_base_unaligned)
1521 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1522 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1523 			qdf_mem_free_consistent(scn->qdf_dev,
1524 						scn->qdf_dev->dev,
1525 					    (CE_state->src_ring->nentries *
1526 					     desc_size +
1527 					     CE_DESC_RING_ALIGN),
1528 					    CE_state->src_ring->
1529 					    base_addr_owner_space_unaligned,
1530 					    CE_state->src_ring->
1531 					    base_addr_CE_space, 0);
1532 		qdf_mem_free(CE_state->src_ring);
1533 	}
1534 	if (CE_state->dest_ring) {
1535 		/* Cleanup the datapath Rx ring */
1536 		ce_t2h_msg_ce_cleanup(copyeng);
1537 
1538 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1539 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1540 			qdf_mem_free_consistent(scn->qdf_dev,
1541 						scn->qdf_dev->dev,
1542 					    (CE_state->dest_ring->nentries *
1543 					     desc_size +
1544 					     CE_DESC_RING_ALIGN),
1545 					    CE_state->dest_ring->
1546 					    base_addr_owner_space_unaligned,
1547 					    CE_state->dest_ring->
1548 					    base_addr_CE_space, 0);
1549 		qdf_mem_free(CE_state->dest_ring);
1550 
1551 		/* epping */
1552 		if (inited) {
1553 			qdf_timer_free(&CE_state->poll_timer);
1554 		}
1555 	}
1556 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1557 		/* Cleanup the datapath Tx ring */
1558 		ce_h2t_tx_ce_cleanup(copyeng);
1559 
1560 		if (CE_state->status_ring->shadow_base_unaligned)
1561 			qdf_mem_free(
1562 				CE_state->status_ring->shadow_base_unaligned);
1563 
1564 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1565 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1566 			qdf_mem_free_consistent(scn->qdf_dev,
1567 						scn->qdf_dev->dev,
1568 					    (CE_state->status_ring->nentries *
1569 					     desc_size +
1570 					     CE_DESC_RING_ALIGN),
1571 					    CE_state->status_ring->
1572 					    base_addr_owner_space_unaligned,
1573 					    CE_state->status_ring->
1574 					    base_addr_CE_space, 0);
1575 		qdf_mem_free(CE_state->status_ring);
1576 	}
1577 
1578 	free_mem_ce_debug_history(scn, CE_id);
1579 	reset_ce_debug_history(scn);
1580 	ce_deinit_ce_desc_event_log(scn, CE_id);
1581 
1582 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1583 	qdf_mem_free(CE_state);
1584 }
1585 
1586 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1587 {
1588 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1589 
1590 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1591 		  sizeof(hif_state->msg_callbacks_pending));
1592 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1593 		  sizeof(hif_state->msg_callbacks_current));
1594 }
1595 
1596 /* Send the first nbytes bytes of the buffer */
1597 QDF_STATUS
1598 hif_send_head(struct hif_opaque_softc *hif_ctx,
1599 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1600 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1601 {
1602 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1603 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1604 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1605 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1606 	int bytes = nbytes, nfrags = 0;
1607 	struct ce_sendlist sendlist;
1608 	int status, i = 0;
1609 	unsigned int mux_id = 0;
1610 
1611 	QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
1612 
1613 	transfer_id =
1614 		(mux_id & MUX_ID_MASK) |
1615 		(transfer_id & TRANSACTION_ID_MASK);
1616 	data_attr &= DESC_DATA_FLAG_MASK;
1617 	/*
1618 	 * The common case involves sending multiple fragments within a
1619 	 * single download (the tx descriptor and the tx frame header).
1620 	 * So, optimize for the case of multiple fragments by not even
1621 	 * checking whether it's necessary to use a sendlist.
1622 	 * The overhead of using a sendlist for a single buffer download
1623 	 * is not a big deal, since it happens rarely (for WMI messages).
1624 	 */
1625 	ce_sendlist_init(&sendlist);
1626 	do {
1627 		qdf_dma_addr_t frag_paddr;
1628 		int frag_bytes;
1629 
1630 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1631 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1632 		/*
1633 		 * Clear the packet offset for all but the first CE desc.
1634 		 */
1635 		if (i++ > 0)
1636 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1637 
1638 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1639 				    frag_bytes >
1640 				    bytes ? bytes : frag_bytes,
1641 				    qdf_nbuf_get_frag_is_wordstream
1642 				    (nbuf,
1643 				    nfrags) ? 0 :
1644 				    CE_SEND_FLAG_SWAP_DISABLE,
1645 				    data_attr);
1646 		if (status != QDF_STATUS_SUCCESS) {
1647 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1648 				__func__, nfrags);
1649 			return status;
1650 		}
1651 		bytes -= frag_bytes;
1652 		nfrags++;
1653 	} while (bytes > 0);
1654 
1655 	/* Make sure we have resources to handle this request */
1656 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1657 	if (pipe_info->num_sends_allowed < nfrags) {
1658 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1659 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1660 		return QDF_STATUS_E_RESOURCES;
1661 	}
1662 	pipe_info->num_sends_allowed -= nfrags;
1663 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1664 
1665 	if (qdf_unlikely(ce_hdl == NULL)) {
1666 		HIF_ERROR("%s: error CE handle is null", __func__);
1667 		return A_ERROR;
1668 	}
1669 
1670 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
1671 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
1672 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1673 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
1674 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
1675 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1676 
1677 	return status;
1678 }
1679 
1680 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1681 								int force)
1682 {
1683 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1684 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1685 
1686 	if (!force) {
1687 		int resources;
1688 		/*
1689 		 * Decide whether to actually poll for completions, or just
1690 		 * wait for a later chance. If there seem to be plenty of
1691 		 * resources left, then just wait, since checking involves
1692 		 * reading a CE register, which is a relatively expensive
1693 		 * operation.
1694 		 */
1695 		resources = hif_get_free_queue_number(hif_ctx, pipe);
1696 		/*
1697 		 * If at least 50% of the total resources are still available,
1698 		 * don't bother checking again yet.
1699 		 */
1700 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1701 									 1))
1702 			return;
1703 	}
1704 #if ATH_11AC_TXCOMPACT
1705 	ce_per_engine_servicereap(scn, pipe);
1706 #else
1707 	ce_per_engine_service(scn, pipe);
1708 #endif
1709 }
1710 
1711 uint16_t
1712 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
1713 {
1714 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1715 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1716 	uint16_t rv;
1717 
1718 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1719 	rv = pipe_info->num_sends_allowed;
1720 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1721 	return rv;
1722 }
1723 
1724 /* Called by lower (CE) layer when a send to Target completes. */
1725 static void
1726 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
1727 		     void *transfer_context, qdf_dma_addr_t CE_data,
1728 		     unsigned int nbytes, unsigned int transfer_id,
1729 		     unsigned int sw_index, unsigned int hw_index,
1730 		     unsigned int toeplitz_hash_result)
1731 {
1732 	struct HIF_CE_pipe_info *pipe_info =
1733 		(struct HIF_CE_pipe_info *)ce_context;
1734 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1735 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1736 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
1737 	struct hif_msg_callbacks *msg_callbacks =
1738 		&pipe_info->pipe_callbacks;
1739 
1740 	do {
1741 		/*
1742 		 * The upper layer callback will be triggered
1743 		 * when last fragment is complteted.
1744 		 */
1745 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
1746 			if (scn->target_status == TARGET_STATUS_RESET) {
1747 
1748 				qdf_nbuf_unmap_single(scn->qdf_dev,
1749 						      transfer_context,
1750 						      QDF_DMA_TO_DEVICE);
1751 				qdf_nbuf_free(transfer_context);
1752 			} else
1753 				msg_callbacks->txCompletionHandler(
1754 					msg_callbacks->Context,
1755 					transfer_context, transfer_id,
1756 					toeplitz_hash_result);
1757 		}
1758 
1759 		qdf_spin_lock(&pipe_info->completion_freeq_lock);
1760 		pipe_info->num_sends_allowed++;
1761 		qdf_spin_unlock(&pipe_info->completion_freeq_lock);
1762 	} while (ce_completed_send_next(copyeng,
1763 			&ce_context, &transfer_context,
1764 			&CE_data, &nbytes, &transfer_id,
1765 			&sw_idx, &hw_idx,
1766 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
1767 }
1768 
1769 /**
1770  * hif_ce_do_recv(): send message from copy engine to upper layers
1771  * @msg_callbacks: structure containing callback and callback context
1772  * @netbuff: skb containing message
1773  * @nbytes: number of bytes in the message
1774  * @pipe_info: used for the pipe_number info
1775  *
1776  * Checks the packet length, configures the lenght in the netbuff,
1777  * and calls the upper layer callback.
1778  *
1779  * return: None
1780  */
1781 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
1782 		qdf_nbuf_t netbuf, int nbytes,
1783 		struct HIF_CE_pipe_info *pipe_info) {
1784 	if (nbytes <= pipe_info->buf_sz) {
1785 		qdf_nbuf_set_pktlen(netbuf, nbytes);
1786 		msg_callbacks->
1787 			rxCompletionHandler(msg_callbacks->Context,
1788 					netbuf, pipe_info->pipe_num);
1789 	} else {
1790 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
1791 				__func__, netbuf, nbytes);
1792 
1793 		qdf_nbuf_free(netbuf);
1794 	}
1795 }
1796 
1797 /* Called by lower (CE) layer when data is received from the Target. */
1798 static void
1799 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
1800 		     void *transfer_context, qdf_dma_addr_t CE_data,
1801 		     unsigned int nbytes, unsigned int transfer_id,
1802 		     unsigned int flags)
1803 {
1804 	struct HIF_CE_pipe_info *pipe_info =
1805 		(struct HIF_CE_pipe_info *)ce_context;
1806 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1807 	struct CE_state *ce_state = (struct CE_state *) copyeng;
1808 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1809 #ifdef HIF_PCI
1810 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1811 #endif
1812 	struct hif_msg_callbacks *msg_callbacks =
1813 		 &pipe_info->pipe_callbacks;
1814 
1815 	do {
1816 #ifdef HIF_PCI
1817 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1818 #endif
1819 		qdf_nbuf_unmap_single(scn->qdf_dev,
1820 				      (qdf_nbuf_t) transfer_context,
1821 				      QDF_DMA_FROM_DEVICE);
1822 
1823 		atomic_inc(&pipe_info->recv_bufs_needed);
1824 		hif_post_recv_buffers_for_pipe(pipe_info);
1825 		if (scn->target_status == TARGET_STATUS_RESET)
1826 			qdf_nbuf_free(transfer_context);
1827 		else
1828 			hif_ce_do_recv(msg_callbacks, transfer_context,
1829 				nbytes, pipe_info);
1830 
1831 		/* Set up force_break flag if num of receices reaches
1832 		 * MAX_NUM_OF_RECEIVES
1833 		 */
1834 		ce_state->receive_count++;
1835 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1836 			ce_state->force_break = 1;
1837 			break;
1838 		}
1839 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1840 					&CE_data, &nbytes, &transfer_id,
1841 					&flags) == QDF_STATUS_SUCCESS);
1842 
1843 }
1844 
1845 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1846 
1847 void
1848 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
1849 	      struct hif_msg_callbacks *callbacks)
1850 {
1851 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1852 
1853 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1854 	spin_lock_init(&pcie_access_log_lock);
1855 #endif
1856 	/* Save callbacks for later installation */
1857 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
1858 		 sizeof(hif_state->msg_callbacks_pending));
1859 
1860 }
1861 
1862 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1863 {
1864 	struct CE_handle *ce_diag = hif_state->ce_diag;
1865 	int pipe_num;
1866 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1867 	struct hif_msg_callbacks *hif_msg_callbacks =
1868 		&hif_state->msg_callbacks_current;
1869 
1870 	/* daemonize("hif_compl_thread"); */
1871 
1872 	if (scn->ce_count == 0) {
1873 		HIF_ERROR("%s: Invalid ce_count", __func__);
1874 		return -EINVAL;
1875 	}
1876 
1877 	if (!hif_msg_callbacks ||
1878 			!hif_msg_callbacks->rxCompletionHandler ||
1879 			!hif_msg_callbacks->txCompletionHandler) {
1880 		HIF_ERROR("%s: no completion handler registered", __func__);
1881 		return -EFAULT;
1882 	}
1883 
1884 	A_TARGET_ACCESS_LIKELY(scn);
1885 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1886 		struct CE_attr attr;
1887 		struct HIF_CE_pipe_info *pipe_info;
1888 
1889 		pipe_info = &hif_state->pipe_info[pipe_num];
1890 		if (pipe_info->ce_hdl == ce_diag)
1891 			continue;       /* Handle Diagnostic CE specially */
1892 		attr = hif_state->host_ce_config[pipe_num];
1893 		if (attr.src_nentries) {
1894 			/* pipe used to send to target */
1895 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
1896 					 __func__, pipe_num, pipe_info);
1897 			ce_send_cb_register(pipe_info->ce_hdl,
1898 					    hif_pci_ce_send_done, pipe_info,
1899 					    attr.flags & CE_ATTR_DISABLE_INTR);
1900 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
1901 		}
1902 		if (attr.dest_nentries) {
1903 			/* pipe used to receive from target */
1904 			ce_recv_cb_register(pipe_info->ce_hdl,
1905 					    hif_pci_ce_recv_data, pipe_info,
1906 					    attr.flags & CE_ATTR_DISABLE_INTR);
1907 		}
1908 
1909 		if (attr.src_nentries)
1910 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
1911 
1912 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1913 					sizeof(pipe_info->pipe_callbacks));
1914 	}
1915 
1916 	A_TARGET_ACCESS_UNLIKELY(scn);
1917 	return 0;
1918 }
1919 
1920 /*
1921  * Install pending msg callbacks.
1922  *
1923  * TBDXXX: This hack is needed because upper layers install msg callbacks
1924  * for use with HTC before BMI is done; yet this HIF implementation
1925  * needs to continue to use BMI msg callbacks. Really, upper layers
1926  * should not register HTC callbacks until AFTER BMI phase.
1927  */
1928 static void hif_msg_callbacks_install(struct hif_softc *scn)
1929 {
1930 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1931 
1932 	qdf_mem_copy(&hif_state->msg_callbacks_current,
1933 		 &hif_state->msg_callbacks_pending,
1934 		 sizeof(hif_state->msg_callbacks_pending));
1935 }
1936 
1937 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1938 							uint8_t *DLPipe)
1939 {
1940 	int ul_is_polled, dl_is_polled;
1941 
1942 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
1943 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1944 }
1945 
1946 /**
1947  * hif_dump_pipe_debug_count() - Log error count
1948  * @scn: hif_softc pointer.
1949  *
1950  * Output the pipe error counts of each pipe to log file
1951  *
1952  * Return: N/A
1953  */
1954 void hif_dump_pipe_debug_count(struct hif_softc *scn)
1955 {
1956 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1957 	int pipe_num;
1958 
1959 	if (hif_state == NULL) {
1960 		HIF_ERROR("%s hif_state is NULL", __func__);
1961 		return;
1962 	}
1963 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1964 		struct HIF_CE_pipe_info *pipe_info;
1965 
1966 	pipe_info = &hif_state->pipe_info[pipe_num];
1967 
1968 	if (pipe_info->nbuf_alloc_err_count > 0 ||
1969 			pipe_info->nbuf_dma_err_count > 0 ||
1970 			pipe_info->nbuf_ce_enqueue_err_count)
1971 		HIF_ERROR(
1972 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1973 			__func__, pipe_info->pipe_num,
1974 			atomic_read(&pipe_info->recv_bufs_needed),
1975 			pipe_info->nbuf_alloc_err_count,
1976 			pipe_info->nbuf_dma_err_count,
1977 			pipe_info->nbuf_ce_enqueue_err_count);
1978 	}
1979 }
1980 
1981 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1982 					  void *nbuf, uint32_t *error_cnt,
1983 					  enum hif_ce_event_type failure_type,
1984 					  const char *failure_type_string)
1985 {
1986 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1987 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1988 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1989 	int ce_id = CE_state->id;
1990 	uint32_t error_cnt_tmp;
1991 
1992 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1993 	error_cnt_tmp = ++(*error_cnt);
1994 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1995 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
1996 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1997 		  failure_type_string);
1998 	hif_record_ce_desc_event(scn, ce_id, failure_type,
1999 				 NULL, nbuf, bufs_needed_tmp, 0);
2000 	/* if we fail to allocate the last buffer for an rx pipe,
2001 	 *	there is no trigger to refill the ce and we will
2002 	 *	eventually crash
2003 	 */
2004 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2005 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2006 
2007 }
2008 
2009 
2010 
2011 
2012 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2013 {
2014 	struct CE_handle *ce_hdl;
2015 	qdf_size_t buf_sz;
2016 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2017 	QDF_STATUS status;
2018 	uint32_t bufs_posted = 0;
2019 
2020 	buf_sz = pipe_info->buf_sz;
2021 	if (buf_sz == 0) {
2022 		/* Unused Copy Engine */
2023 		return QDF_STATUS_SUCCESS;
2024 	}
2025 
2026 	ce_hdl = pipe_info->ce_hdl;
2027 
2028 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2029 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2030 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2031 		qdf_nbuf_t nbuf;
2032 
2033 		atomic_dec(&pipe_info->recv_bufs_needed);
2034 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2035 
2036 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2037 		if (!nbuf) {
2038 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2039 					&pipe_info->nbuf_alloc_err_count,
2040 					 HIF_RX_NBUF_ALLOC_FAILURE,
2041 					"HIF_RX_NBUF_ALLOC_FAILURE");
2042 			return QDF_STATUS_E_NOMEM;
2043 		}
2044 
2045 		/*
2046 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2047 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2048 		 * DMA_FROM_DEVICE);
2049 		 */
2050 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2051 					    QDF_DMA_FROM_DEVICE);
2052 
2053 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2054 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2055 					&pipe_info->nbuf_dma_err_count,
2056 					 HIF_RX_NBUF_MAP_FAILURE,
2057 					"HIF_RX_NBUF_MAP_FAILURE");
2058 			qdf_nbuf_free(nbuf);
2059 			return status;
2060 		}
2061 
2062 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2063 
2064 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2065 					       buf_sz, DMA_FROM_DEVICE);
2066 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2067 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2068 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2069 					&pipe_info->nbuf_ce_enqueue_err_count,
2070 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2071 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2072 
2073 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2074 						QDF_DMA_FROM_DEVICE);
2075 			qdf_nbuf_free(nbuf);
2076 			return status;
2077 		}
2078 
2079 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2080 		bufs_posted++;
2081 	}
2082 	pipe_info->nbuf_alloc_err_count =
2083 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2084 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2085 	pipe_info->nbuf_dma_err_count =
2086 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2087 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2088 	pipe_info->nbuf_ce_enqueue_err_count =
2089 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2090 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2091 
2092 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2093 
2094 	return QDF_STATUS_SUCCESS;
2095 }
2096 
2097 /*
2098  * Try to post all desired receive buffers for all pipes.
2099  * Returns 0 for non fastpath rx copy engine as
2100  * oom_allocation_work will be scheduled to recover any
2101  * failures, non-zero if unable to completely replenish
2102  * receive buffers for fastpath rx Copy engine.
2103  */
2104 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2105 {
2106 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2107 	int pipe_num;
2108 	struct CE_state *ce_state;
2109 	QDF_STATUS qdf_status;
2110 
2111 	A_TARGET_ACCESS_LIKELY(scn);
2112 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2113 		struct HIF_CE_pipe_info *pipe_info;
2114 
2115 		ce_state = scn->ce_id_to_state[pipe_num];
2116 		pipe_info = &hif_state->pipe_info[pipe_num];
2117 
2118 		if (hif_is_nss_wifi_enabled(scn) &&
2119 		    ce_state && (ce_state->htt_rx_data))
2120 			continue;
2121 
2122 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2123 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) &&
2124 			ce_state->htt_rx_data &&
2125 			scn->fastpath_mode_on) {
2126 			A_TARGET_ACCESS_UNLIKELY(scn);
2127 			return qdf_status;
2128 		}
2129 	}
2130 
2131 	A_TARGET_ACCESS_UNLIKELY(scn);
2132 
2133 	return QDF_STATUS_SUCCESS;
2134 }
2135 
2136 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2137 {
2138 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2139 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2140 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2141 
2142 	hif_update_fastpath_recv_bufs_cnt(scn);
2143 
2144 	hif_msg_callbacks_install(scn);
2145 
2146 	if (hif_completion_thread_startup(hif_state))
2147 		return QDF_STATUS_E_FAILURE;
2148 
2149 	/* enable buffer cleanup */
2150 	hif_state->started = true;
2151 
2152 	/* Post buffers once to start things off. */
2153 	qdf_status = hif_post_recv_buffers(scn);
2154 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2155 		/* cleanup is done in hif_ce_disable */
2156 		HIF_ERROR("%s:failed to post buffers", __func__);
2157 		return qdf_status;
2158 	}
2159 
2160 	return qdf_status;
2161 }
2162 
2163 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2164 {
2165 	struct hif_softc *scn;
2166 	struct CE_handle *ce_hdl;
2167 	uint32_t buf_sz;
2168 	struct HIF_CE_state *hif_state;
2169 	qdf_nbuf_t netbuf;
2170 	qdf_dma_addr_t CE_data;
2171 	void *per_CE_context;
2172 
2173 	buf_sz = pipe_info->buf_sz;
2174 	/* Unused Copy Engine */
2175 	if (buf_sz == 0)
2176 		return;
2177 
2178 
2179 	hif_state = pipe_info->HIF_CE_state;
2180 	if (!hif_state->started)
2181 		return;
2182 
2183 	scn = HIF_GET_SOFTC(hif_state);
2184 	ce_hdl = pipe_info->ce_hdl;
2185 
2186 	if (scn->qdf_dev == NULL)
2187 		return;
2188 	while (ce_revoke_recv_next
2189 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2190 			&CE_data) == QDF_STATUS_SUCCESS) {
2191 		if (netbuf) {
2192 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2193 					      QDF_DMA_FROM_DEVICE);
2194 			qdf_nbuf_free(netbuf);
2195 		}
2196 	}
2197 }
2198 
2199 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2200 {
2201 	struct CE_handle *ce_hdl;
2202 	struct HIF_CE_state *hif_state;
2203 	struct hif_softc *scn;
2204 	qdf_nbuf_t netbuf;
2205 	void *per_CE_context;
2206 	qdf_dma_addr_t CE_data;
2207 	unsigned int nbytes;
2208 	unsigned int id;
2209 	uint32_t buf_sz;
2210 	uint32_t toeplitz_hash_result;
2211 
2212 	buf_sz = pipe_info->buf_sz;
2213 	if (buf_sz == 0) {
2214 		/* Unused Copy Engine */
2215 		return;
2216 	}
2217 
2218 	hif_state = pipe_info->HIF_CE_state;
2219 	if (!hif_state->started) {
2220 		return;
2221 	}
2222 
2223 	scn = HIF_GET_SOFTC(hif_state);
2224 
2225 	ce_hdl = pipe_info->ce_hdl;
2226 
2227 	while (ce_cancel_send_next
2228 		       (ce_hdl, &per_CE_context,
2229 		       (void **)&netbuf, &CE_data, &nbytes,
2230 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2231 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2232 			/*
2233 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2234 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2235 			 * freed in htt_htc_misc_pkt_pool_free() in
2236 			 * wlantl_close(), so do not free them here again
2237 			 * by checking whether it's the endpoint
2238 			 * which they are queued in.
2239 			 */
2240 			if (id == scn->htc_htt_tx_endpoint)
2241 				return;
2242 			/* Indicate the completion to higher
2243 			 * layer to free the buffer
2244 			 */
2245 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2246 				pipe_info->pipe_callbacks.
2247 				    txCompletionHandler(pipe_info->
2248 					    pipe_callbacks.Context,
2249 					    netbuf, id, toeplitz_hash_result);
2250 		}
2251 	}
2252 }
2253 
2254 /*
2255  * Cleanup residual buffers for device shutdown:
2256  *    buffers that were enqueued for receive
2257  *    buffers that were to be sent
2258  * Note: Buffers that had completed but which were
2259  * not yet processed are on a completion queue. They
2260  * are handled when the completion thread shuts down.
2261  */
2262 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2263 {
2264 	int pipe_num;
2265 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2266 	struct CE_state *ce_state;
2267 
2268 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2269 		struct HIF_CE_pipe_info *pipe_info;
2270 
2271 		ce_state = scn->ce_id_to_state[pipe_num];
2272 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2273 				((ce_state->htt_tx_data) ||
2274 				 (ce_state->htt_rx_data))) {
2275 			continue;
2276 		}
2277 
2278 		pipe_info = &hif_state->pipe_info[pipe_num];
2279 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2280 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2281 	}
2282 }
2283 
2284 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2285 {
2286 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2287 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2288 
2289 	hif_buffer_cleanup(hif_state);
2290 }
2291 
2292 static void hif_destroy_oom_work(struct hif_softc *scn)
2293 {
2294 	struct CE_state *ce_state;
2295 	int ce_id;
2296 
2297 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2298 		ce_state = scn->ce_id_to_state[ce_id];
2299 		if (ce_state)
2300 			qdf_destroy_work(scn->qdf_dev,
2301 					 &ce_state->oom_allocation_work);
2302 	}
2303 }
2304 
2305 void hif_ce_stop(struct hif_softc *scn)
2306 {
2307 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2308 	int pipe_num;
2309 
2310 	/*
2311 	 * before cleaning up any memory, ensure irq &
2312 	 * bottom half contexts will not be re-entered
2313 	 */
2314 	hif_disable_isr(&scn->osc);
2315 	hif_destroy_oom_work(scn);
2316 	scn->hif_init_done = false;
2317 
2318 	/*
2319 	 * At this point, asynchronous threads are stopped,
2320 	 * The Target should not DMA nor interrupt, Host code may
2321 	 * not initiate anything more.  So we just need to clean
2322 	 * up Host-side state.
2323 	 */
2324 
2325 	if (scn->athdiag_procfs_inited) {
2326 		athdiag_procfs_remove();
2327 		scn->athdiag_procfs_inited = false;
2328 	}
2329 
2330 	hif_buffer_cleanup(hif_state);
2331 
2332 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2333 		struct HIF_CE_pipe_info *pipe_info;
2334 		struct CE_attr attr;
2335 		struct CE_handle *ce_diag = hif_state->ce_diag;
2336 
2337 		pipe_info = &hif_state->pipe_info[pipe_num];
2338 		if (pipe_info->ce_hdl) {
2339 			if (pipe_info->ce_hdl != ce_diag) {
2340 				attr = hif_state->host_ce_config[pipe_num];
2341 				if (attr.src_nentries)
2342 					qdf_spinlock_destroy(&pipe_info->
2343 							completion_freeq_lock);
2344 			}
2345 			ce_fini(pipe_info->ce_hdl);
2346 			pipe_info->ce_hdl = NULL;
2347 			pipe_info->buf_sz = 0;
2348 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2349 		}
2350 	}
2351 
2352 	if (hif_state->sleep_timer_init) {
2353 		qdf_timer_stop(&hif_state->sleep_timer);
2354 		qdf_timer_free(&hif_state->sleep_timer);
2355 		hif_state->sleep_timer_init = false;
2356 	}
2357 
2358 	hif_state->started = false;
2359 }
2360 
2361 
2362 /**
2363  * hif_get_target_ce_config() - get copy engine configuration
2364  * @target_ce_config_ret: basic copy engine configuration
2365  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2366  * @target_service_to_ce_map_ret: service mapping for the copy engines
2367  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2368  * @target_shadow_reg_cfg_ret: shadow register configuration
2369  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2370  *
2371  * providing accessor to these values outside of this file.
2372  * currently these are stored in static pointers to const sections.
2373  * there are multiple configurations that are selected from at compile time.
2374  * Runtime selection would need to consider mode, target type and bus type.
2375  *
2376  * Return: return by parameter.
2377  */
2378 void hif_get_target_ce_config(struct hif_softc *scn,
2379 		struct CE_pipe_config **target_ce_config_ret,
2380 		uint32_t *target_ce_config_sz_ret,
2381 		struct service_to_pipe **target_service_to_ce_map_ret,
2382 		uint32_t *target_service_to_ce_map_sz_ret,
2383 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2384 		uint32_t *shadow_cfg_sz_ret)
2385 {
2386 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2387 
2388 	*target_ce_config_ret = hif_state->target_ce_config;
2389 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2390 
2391 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2392 				       target_service_to_ce_map_sz_ret);
2393 
2394 	if (target_shadow_reg_cfg_ret)
2395 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2396 
2397 	if (shadow_cfg_sz_ret)
2398 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2399 }
2400 
2401 #ifdef CONFIG_SHADOW_V2
2402 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2403 {
2404 	int i;
2405 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2406 		  "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2407 
2408 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2409 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2410 		     "%s: i %d, val %x\n", __func__, i,
2411 		     cfg->shadow_reg_v2_cfg[i].addr);
2412 	}
2413 }
2414 
2415 #else
2416 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2417 {
2418 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2419 		  "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2420 }
2421 #endif
2422 
2423 /**
2424  * hif_wlan_enable(): call the platform driver to enable wlan
2425  * @scn: HIF Context
2426  *
2427  * This function passes the con_mode and CE configuration to
2428  * platform driver to enable wlan.
2429  *
2430  * Return: linux error code
2431  */
2432 int hif_wlan_enable(struct hif_softc *scn)
2433 {
2434 	struct pld_wlan_enable_cfg cfg;
2435 	enum pld_driver_mode mode;
2436 	uint32_t con_mode = hif_get_conparam(scn);
2437 
2438 	hif_get_target_ce_config(scn,
2439 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2440 			&cfg.num_ce_tgt_cfg,
2441 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2442 			&cfg.num_ce_svc_pipe_cfg,
2443 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2444 			&cfg.num_shadow_reg_cfg);
2445 
2446 	/* translate from structure size to array size */
2447 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2448 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2449 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2450 
2451 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2452 			      &cfg.num_shadow_reg_v2_cfg);
2453 
2454 	hif_print_hal_shadow_register_cfg(&cfg);
2455 
2456 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2457 		mode = PLD_FTM;
2458 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2459 		mode = PLD_COLDBOOT_CALIBRATION;
2460 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2461 		mode = PLD_EPPING;
2462 	else
2463 		mode = PLD_MISSION;
2464 
2465 	if (BYPASS_QMI)
2466 		return 0;
2467 	else
2468 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2469 				       mode, QWLAN_VERSIONSTR);
2470 }
2471 
2472 #ifdef WLAN_FEATURE_EPPING
2473 
2474 #define CE_EPPING_USES_IRQ true
2475 
2476 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2477 {
2478 	if (CE_EPPING_USES_IRQ)
2479 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2480 	else
2481 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2482 	hif_state->target_ce_config = target_ce_config_wlan_epping;
2483 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2484 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2485 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2486 }
2487 #endif
2488 
2489 /**
2490  * hif_ce_prepare_config() - load the correct static tables.
2491  * @scn: hif context
2492  *
2493  * Epping uses different static attribute tables than mission mode.
2494  */
2495 void hif_ce_prepare_config(struct hif_softc *scn)
2496 {
2497 	uint32_t mode = hif_get_conparam(scn);
2498 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2499 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2500 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2501 
2502 	hif_state->ce_services = ce_services_attach(scn);
2503 
2504 	scn->ce_count = HOST_CE_COUNT;
2505 	/* if epping is enabled we need to use the epping configuration. */
2506 	if (QDF_IS_EPPING_ENABLED(mode)) {
2507 		hif_ce_prepare_epping_config(hif_state);
2508 	}
2509 
2510 	switch (tgt_info->target_type) {
2511 	default:
2512 		hif_state->host_ce_config = host_ce_config_wlan;
2513 		hif_state->target_ce_config = target_ce_config_wlan;
2514 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
2515 		break;
2516 	case TARGET_TYPE_AR900B:
2517 	case TARGET_TYPE_QCA9984:
2518 	case TARGET_TYPE_IPQ4019:
2519 	case TARGET_TYPE_QCA9888:
2520 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2521 			hif_state->host_ce_config =
2522 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2523 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2524 			hif_state->host_ce_config =
2525 				host_lowdesc_ce_cfg_wlan_ar900b;
2526 		} else {
2527 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2528 		}
2529 
2530 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2531 		hif_state->target_ce_config_sz =
2532 				sizeof(target_ce_config_wlan_ar900b);
2533 
2534 		break;
2535 
2536 	case TARGET_TYPE_AR9888:
2537 	case TARGET_TYPE_AR9888V2:
2538 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2539 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2540 		} else {
2541 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2542 		}
2543 
2544 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2545 		hif_state->target_ce_config_sz =
2546 					sizeof(target_ce_config_wlan_ar9888);
2547 
2548 		break;
2549 
2550 	case TARGET_TYPE_QCA8074:
2551 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2552 			hif_state->host_ce_config =
2553 					host_ce_config_wlan_qca8074_pci;
2554 			hif_state->target_ce_config =
2555 				target_ce_config_wlan_qca8074_pci;
2556 			hif_state->target_ce_config_sz =
2557 				sizeof(target_ce_config_wlan_qca8074_pci);
2558 		} else {
2559 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2560 			hif_state->target_ce_config =
2561 					target_ce_config_wlan_qca8074;
2562 			hif_state->target_ce_config_sz =
2563 				sizeof(target_ce_config_wlan_qca8074);
2564 		}
2565 		break;
2566 	case TARGET_TYPE_QCA6290:
2567 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2568 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2569 		hif_state->target_ce_config_sz =
2570 					sizeof(target_ce_config_wlan_qca6290);
2571 
2572 		scn->ce_count = QCA_6290_CE_COUNT;
2573 		break;
2574 	}
2575 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
2576 }
2577 
2578 /**
2579  * hif_ce_open() - do ce specific allocations
2580  * @hif_sc: pointer to hif context
2581  *
2582  * return: 0 for success or QDF_STATUS_E_NOMEM
2583  */
2584 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2585 {
2586 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2587 
2588 	qdf_spinlock_create(&hif_state->irq_reg_lock);
2589 	qdf_spinlock_create(&hif_state->keep_awake_lock);
2590 	return QDF_STATUS_SUCCESS;
2591 }
2592 
2593 /**
2594  * hif_ce_close() - do ce specific free
2595  * @hif_sc: pointer to hif context
2596  */
2597 void hif_ce_close(struct hif_softc *hif_sc)
2598 {
2599 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2600 
2601 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
2602 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
2603 }
2604 
2605 /**
2606  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2607  * @hif_sc: hif context
2608  *
2609  * uses state variables to support cleaning up when hif_config_ce fails.
2610  */
2611 void hif_unconfig_ce(struct hif_softc *hif_sc)
2612 {
2613 	int pipe_num;
2614 	struct HIF_CE_pipe_info *pipe_info;
2615 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2616 
2617 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2618 		pipe_info = &hif_state->pipe_info[pipe_num];
2619 		if (pipe_info->ce_hdl) {
2620 			ce_unregister_irq(hif_state, (1 << pipe_num));
2621 			ce_fini(pipe_info->ce_hdl);
2622 			pipe_info->ce_hdl = NULL;
2623 			pipe_info->buf_sz = 0;
2624 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2625 		}
2626 	}
2627 	if (hif_sc->athdiag_procfs_inited) {
2628 		athdiag_procfs_remove();
2629 		hif_sc->athdiag_procfs_inited = false;
2630 	}
2631 }
2632 
2633 #ifdef CONFIG_BYPASS_QMI
2634 #define FW_SHARED_MEM (2 * 1024 * 1024)
2635 
2636 /**
2637  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2638  * @scn: pointer to HIF structure
2639  *
2640  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2641  *
2642  * Return: void
2643  */
2644 static void hif_post_static_buf_to_target(struct hif_softc *scn)
2645 {
2646 	void *target_va;
2647 	phys_addr_t target_pa;
2648 
2649 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2650 				FW_SHARED_MEM, &target_pa);
2651 	if (NULL == target_va) {
2652 		HIF_TRACE("Memory allocation failed could not post target buf");
2653 		return;
2654 	}
2655 	hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2656 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
2657 }
2658 #else
2659 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2660 {
2661 }
2662 #endif
2663 
2664 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2665 				bool wait_for_it)
2666 {
2667 	/* todo */
2668 	return 0;
2669 }
2670 
2671 /**
2672  * hif_config_ce() - configure copy engines
2673  * @scn: hif context
2674  *
2675  * Prepares fw, copy engine hardware and host sw according
2676  * to the attributes selected by hif_ce_prepare_config.
2677  *
2678  * also calls athdiag_procfs_init
2679  *
2680  * return: 0 for success nonzero for failure.
2681  */
2682 int hif_config_ce(struct hif_softc *scn)
2683 {
2684 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2685 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2686 	struct HIF_CE_pipe_info *pipe_info;
2687 	int pipe_num;
2688 	struct CE_state *ce_state;
2689 
2690 #ifdef ADRASTEA_SHADOW_REGISTERS
2691 	int i;
2692 #endif
2693 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
2694 
2695 	scn->notice_send = true;
2696 
2697 	hif_post_static_buf_to_target(scn);
2698 
2699 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
2700 
2701 	hif_config_rri_on_ddr(scn);
2702 
2703 	if (ce_srng_based(scn))
2704 		scn->bus_ops.hif_target_sleep_state_adjust =
2705 			&hif_srng_sleep_state_adjust;
2706 
2707 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2708 	 * index. Disable data storing
2709 	 */
2710 	reset_ce_debug_history(scn);
2711 
2712 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2713 		struct CE_attr *attr;
2714 
2715 		pipe_info = &hif_state->pipe_info[pipe_num];
2716 		pipe_info->pipe_num = pipe_num;
2717 		pipe_info->HIF_CE_state = hif_state;
2718 		attr = &hif_state->host_ce_config[pipe_num];
2719 
2720 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
2721 		ce_state = scn->ce_id_to_state[pipe_num];
2722 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
2723 		QDF_ASSERT(pipe_info->ce_hdl != NULL);
2724 		if (pipe_info->ce_hdl == NULL) {
2725 			rv = QDF_STATUS_E_FAILURE;
2726 			A_TARGET_ACCESS_UNLIKELY(scn);
2727 			goto err;
2728 		}
2729 
2730 		ce_state->lro_data = qdf_lro_init();
2731 
2732 		if (attr->flags & CE_ATTR_DIAG) {
2733 			/* Reserve the ultimate CE for
2734 			 * Diagnostic Window support
2735 			 */
2736 			hif_state->ce_diag = pipe_info->ce_hdl;
2737 			continue;
2738 		}
2739 
2740 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2741 				(ce_state->htt_rx_data))
2742 			continue;
2743 
2744 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
2745 		if (attr->dest_nentries > 0) {
2746 			atomic_set(&pipe_info->recv_bufs_needed,
2747 				   init_buffer_count(attr->dest_nentries - 1));
2748 			/*SRNG based CE has one entry less */
2749 			if (ce_srng_based(scn))
2750 				atomic_dec(&pipe_info->recv_bufs_needed);
2751 		} else {
2752 			atomic_set(&pipe_info->recv_bufs_needed, 0);
2753 		}
2754 		ce_tasklet_init(hif_state, (1 << pipe_num));
2755 		ce_register_irq(hif_state, (1 << pipe_num));
2756 	}
2757 
2758 	if (athdiag_procfs_init(scn) != 0) {
2759 		A_TARGET_ACCESS_UNLIKELY(scn);
2760 		goto err;
2761 	}
2762 	scn->athdiag_procfs_inited = true;
2763 
2764 	HIF_DBG("%s: ce_init done", __func__);
2765 
2766 	init_tasklet_workers(hif_hdl);
2767 
2768 	HIF_DBG("%s: X, ret = %d", __func__, rv);
2769 
2770 #ifdef ADRASTEA_SHADOW_REGISTERS
2771 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
2772 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2773 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
2774 			  __func__, i,
2775 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2776 	}
2777 #endif
2778 
2779 	return rv != QDF_STATUS_SUCCESS;
2780 
2781 err:
2782 	/* Failure, so clean up */
2783 	hif_unconfig_ce(scn);
2784 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
2785 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
2786 }
2787 
2788 #ifdef WLAN_FEATURE_FASTPATH
2789 /**
2790  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2791  * @handler: Callback funtcion
2792  * @context: handle for callback function
2793  *
2794  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2795  */
2796 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2797 				fastpath_msg_handler handler,
2798 				void *context)
2799 {
2800 	struct CE_state *ce_state;
2801 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2802 	int i;
2803 
2804 	if (!scn) {
2805 		HIF_ERROR("%s: scn is NULL", __func__);
2806 		QDF_ASSERT(0);
2807 		return QDF_STATUS_E_FAILURE;
2808 	}
2809 
2810 	if (!scn->fastpath_mode_on) {
2811 		HIF_WARN("%s: Fastpath mode disabled", __func__);
2812 		return QDF_STATUS_E_FAILURE;
2813 	}
2814 
2815 	for (i = 0; i < scn->ce_count; i++) {
2816 		ce_state = scn->ce_id_to_state[i];
2817 		if (ce_state->htt_rx_data) {
2818 			ce_state->fastpath_handler = handler;
2819 			ce_state->context = context;
2820 		}
2821 	}
2822 
2823 	return QDF_STATUS_SUCCESS;
2824 }
2825 qdf_export_symbol(hif_ce_fastpath_cb_register);
2826 #endif
2827 
2828 #ifdef IPA_OFFLOAD
2829 /**
2830  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
2831  * @scn: bus context
2832  * @ce_sr_base_paddr: copyengine source ring base physical address
2833  * @ce_sr_ring_size: copyengine source ring size
2834  * @ce_reg_paddr: copyengine register physical address
2835  *
2836  * IPA micro controller data path offload feature enabled,
2837  * HIF should release copy engine related resource information to IPA UC
2838  * IPA UC will access hardware resource with released information
2839  *
2840  * Return: None
2841  */
2842 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
2843 			     qdf_dma_addr_t *ce_sr_base_paddr,
2844 			     uint32_t *ce_sr_ring_size,
2845 			     qdf_dma_addr_t *ce_reg_paddr)
2846 {
2847 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2848 	struct HIF_CE_pipe_info *pipe_info =
2849 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2850 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2851 
2852 	ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2853 			    ce_reg_paddr);
2854 }
2855 #endif /* IPA_OFFLOAD */
2856 
2857 
2858 #ifdef ADRASTEA_SHADOW_REGISTERS
2859 
2860 /*
2861  * Current shadow register config
2862  *
2863  * -----------------------------------------------------------
2864  * Shadow Register      |     CE   |    src/dst write index
2865  * -----------------------------------------------------------
2866  *         0            |     0    |           src
2867  *         1     No Config - Doesn't point to anything
2868  *         2     No Config - Doesn't point to anything
2869  *         3            |     3    |           src
2870  *         4            |     4    |           src
2871  *         5            |     5    |           src
2872  *         6     No Config - Doesn't point to anything
2873  *         7            |     7    |           src
2874  *         8     No Config - Doesn't point to anything
2875  *         9     No Config - Doesn't point to anything
2876  *         10    No Config - Doesn't point to anything
2877  *         11    No Config - Doesn't point to anything
2878  * -----------------------------------------------------------
2879  *         12    No Config - Doesn't point to anything
2880  *         13           |     1    |           dst
2881  *         14           |     2    |           dst
2882  *         15    No Config - Doesn't point to anything
2883  *         16    No Config - Doesn't point to anything
2884  *         17    No Config - Doesn't point to anything
2885  *         18    No Config - Doesn't point to anything
2886  *         19           |     7    |           dst
2887  *         20           |     8    |           dst
2888  *         21    No Config - Doesn't point to anything
2889  *         22    No Config - Doesn't point to anything
2890  *         23    No Config - Doesn't point to anything
2891  * -----------------------------------------------------------
2892  *
2893  *
2894  * ToDo - Move shadow register config to following in the future
2895  * This helps free up a block of shadow registers towards the end.
2896  * Can be used for other purposes
2897  *
2898  * -----------------------------------------------------------
2899  * Shadow Register      |     CE   |    src/dst write index
2900  * -----------------------------------------------------------
2901  *      0            |     0    |           src
2902  *      1            |     3    |           src
2903  *      2            |     4    |           src
2904  *      3            |     5    |           src
2905  *      4            |     7    |           src
2906  * -----------------------------------------------------------
2907  *      5            |     1    |           dst
2908  *      6            |     2    |           dst
2909  *      7            |     7    |           dst
2910  *      8            |     8    |           dst
2911  * -----------------------------------------------------------
2912  *      9     No Config - Doesn't point to anything
2913  *      12    No Config - Doesn't point to anything
2914  *      13    No Config - Doesn't point to anything
2915  *      14    No Config - Doesn't point to anything
2916  *      15    No Config - Doesn't point to anything
2917  *      16    No Config - Doesn't point to anything
2918  *      17    No Config - Doesn't point to anything
2919  *      18    No Config - Doesn't point to anything
2920  *      19    No Config - Doesn't point to anything
2921  *      20    No Config - Doesn't point to anything
2922  *      21    No Config - Doesn't point to anything
2923  *      22    No Config - Doesn't point to anything
2924  *      23    No Config - Doesn't point to anything
2925  * -----------------------------------------------------------
2926 */
2927 
2928 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
2929 {
2930 	u32 addr = 0;
2931 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
2932 
2933 	switch (ce) {
2934 	case 0:
2935 		addr = SHADOW_VALUE0;
2936 		break;
2937 	case 3:
2938 		addr = SHADOW_VALUE3;
2939 		break;
2940 	case 4:
2941 		addr = SHADOW_VALUE4;
2942 		break;
2943 	case 5:
2944 		addr = SHADOW_VALUE5;
2945 		break;
2946 	case 7:
2947 		addr = SHADOW_VALUE7;
2948 		break;
2949 	default:
2950 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
2951 		QDF_ASSERT(0);
2952 	}
2953 	return addr;
2954 
2955 }
2956 
2957 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
2958 {
2959 	u32 addr = 0;
2960 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
2961 
2962 	switch (ce) {
2963 	case 1:
2964 		addr = SHADOW_VALUE13;
2965 		break;
2966 	case 2:
2967 		addr = SHADOW_VALUE14;
2968 		break;
2969 	case 5:
2970 		addr = SHADOW_VALUE17;
2971 		break;
2972 	case 7:
2973 		addr = SHADOW_VALUE19;
2974 		break;
2975 	case 8:
2976 		addr = SHADOW_VALUE20;
2977 		break;
2978 	case 9:
2979 		addr = SHADOW_VALUE21;
2980 		break;
2981 	case 10:
2982 		addr = SHADOW_VALUE22;
2983 		break;
2984 	case 11:
2985 		addr = SHADOW_VALUE23;
2986 		break;
2987 	default:
2988 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
2989 		QDF_ASSERT(0);
2990 	}
2991 
2992 	return addr;
2993 
2994 }
2995 #endif
2996 
2997 #if defined(FEATURE_LRO)
2998 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2999 {
3000 	struct CE_state *ce_state;
3001 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3002 
3003 	ce_state = scn->ce_id_to_state[ctx_id];
3004 
3005 	return ce_state->lro_data;
3006 }
3007 #endif
3008 
3009 /**
3010  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3011  * this service
3012  * @scn: hif_softc pointer.
3013  * @svc_id: Service ID for which the mapping is needed.
3014  * @ul_pipe: address of the container in which ul pipe is returned.
3015  * @dl_pipe: address of the container in which dl pipe is returned.
3016  * @ul_is_polled: address of the container in which a bool
3017  *			indicating if the UL CE for this service
3018  *			is polled is returned.
3019  * @dl_is_polled: address of the container in which a bool
3020  *			indicating if the DL CE for this service
3021  *			is polled is returned.
3022  *
3023  * Return: Indicates whether the service has been found in the table.
3024  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3025  *         There will be warning logs if either leg has not been updated
3026  *         because it missed the entry in the table (but this is not an err).
3027  */
3028 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3029 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3030 			int *dl_is_polled)
3031 {
3032 	int status = QDF_STATUS_E_INVAL;
3033 	unsigned int i;
3034 	struct service_to_pipe element;
3035 	struct service_to_pipe *tgt_svc_map_to_use;
3036 	uint32_t sz_tgt_svc_map_to_use;
3037 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3038 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3039 	bool dl_updated = false;
3040 	bool ul_updated = false;
3041 
3042 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3043 				       &sz_tgt_svc_map_to_use);
3044 
3045 	*dl_is_polled = 0;  /* polling for received messages not supported */
3046 
3047 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3048 
3049 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3050 		if (element.service_id == svc_id) {
3051 			if (element.pipedir == PIPEDIR_OUT) {
3052 				*ul_pipe = element.pipenum;
3053 				*ul_is_polled =
3054 					(hif_state->host_ce_config[*ul_pipe].flags &
3055 					 CE_ATTR_DISABLE_INTR) != 0;
3056 				ul_updated = true;
3057 			} else if (element.pipedir == PIPEDIR_IN) {
3058 				*dl_pipe = element.pipenum;
3059 				dl_updated = true;
3060 			}
3061 			status = QDF_STATUS_SUCCESS;
3062 		}
3063 	}
3064 	if (ul_updated == false)
3065 		HIF_INFO("%s: ul pipe is NOT updated for service %d",
3066 			 __func__, svc_id);
3067 	if (dl_updated == false)
3068 		HIF_INFO("%s: dl pipe is NOT updated for service %d",
3069 			 __func__, svc_id);
3070 
3071 	return status;
3072 }
3073 
3074 #ifdef SHADOW_REG_DEBUG
3075 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3076 		uint32_t CE_ctrl_addr)
3077 {
3078 	uint32_t read_from_hw, srri_from_ddr = 0;
3079 
3080 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3081 
3082 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3083 
3084 	if (read_from_hw != srri_from_ddr) {
3085 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3086 		       __func__, srri_from_ddr, read_from_hw,
3087 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3088 		QDF_ASSERT(0);
3089 	}
3090 	return srri_from_ddr;
3091 }
3092 
3093 
3094 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3095 		uint32_t CE_ctrl_addr)
3096 {
3097 	uint32_t read_from_hw, drri_from_ddr = 0;
3098 
3099 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3100 
3101 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3102 
3103 	if (read_from_hw != drri_from_ddr) {
3104 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3105 		       drri_from_ddr, read_from_hw,
3106 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3107 		QDF_ASSERT(0);
3108 	}
3109 	return drri_from_ddr;
3110 }
3111 
3112 #endif
3113 
3114 #ifdef ADRASTEA_RRI_ON_DDR
3115 /**
3116  * hif_get_src_ring_read_index(): Called to get the SRRI
3117  *
3118  * @scn: hif_softc pointer
3119  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3120  *
3121  * This function returns the SRRI to the caller. For CEs that
3122  * dont have interrupts enabled, we look at the DDR based SRRI
3123  *
3124  * Return: SRRI
3125  */
3126 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3127 		uint32_t CE_ctrl_addr)
3128 {
3129 	struct CE_attr attr;
3130 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3131 
3132 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3133 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3134 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3135 	} else {
3136 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3137 			return A_TARGET_READ(scn,
3138 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3139 		else
3140 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3141 					CE_ctrl_addr);
3142 	}
3143 }
3144 
3145 /**
3146  * hif_get_dst_ring_read_index(): Called to get the DRRI
3147  *
3148  * @scn: hif_softc pointer
3149  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3150  *
3151  * This function returns the DRRI to the caller. For CEs that
3152  * dont have interrupts enabled, we look at the DDR based DRRI
3153  *
3154  * Return: DRRI
3155  */
3156 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3157 		uint32_t CE_ctrl_addr)
3158 {
3159 	struct CE_attr attr;
3160 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3161 
3162 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3163 
3164 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3165 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3166 	} else {
3167 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3168 			return A_TARGET_READ(scn,
3169 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3170 		else
3171 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3172 					CE_ctrl_addr);
3173 	}
3174 }
3175 
3176 /**
3177  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3178  *
3179  * @scn: hif_softc pointer
3180  *
3181  * This function allocates non cached memory on ddr and sends
3182  * the physical address of this memory to the CE hardware. The
3183  * hardware updates the RRI on this particular location.
3184  *
3185  * Return: None
3186  */
3187 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3188 {
3189 	unsigned int i;
3190 	qdf_dma_addr_t paddr_rri_on_ddr;
3191 	uint32_t high_paddr, low_paddr;
3192 
3193 	scn->vaddr_rri_on_ddr =
3194 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3195 		scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3196 		&paddr_rri_on_ddr);
3197 
3198 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3199 	low_paddr  = BITS0_TO_31(paddr_rri_on_ddr);
3200 	high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3201 
3202 	HIF_DBG("%s using srri and drri from DDR", __func__);
3203 
3204 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3205 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3206 
3207 	for (i = 0; i < CE_COUNT; i++)
3208 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3209 
3210 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
3211 
3212 }
3213 #else
3214 
3215 /**
3216  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3217  *
3218  * @scn: hif_softc pointer
3219  *
3220  * This is a dummy implementation for platforms that don't
3221  * support this functionality.
3222  *
3223  * Return: None
3224  */
3225 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3226 {
3227 }
3228 #endif
3229 
3230 /**
3231  * hif_dump_ce_registers() - dump ce registers
3232  * @scn: hif_opaque_softc pointer.
3233  *
3234  * Output the copy engine registers
3235  *
3236  * Return: 0 for success or error code
3237  */
3238 int hif_dump_ce_registers(struct hif_softc *scn)
3239 {
3240 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3241 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3242 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3243 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3244 	uint16_t i;
3245 	QDF_STATUS status;
3246 
3247 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3248 		if (scn->ce_id_to_state[i] == NULL) {
3249 			HIF_DBG("CE%d not used.", i);
3250 			continue;
3251 		}
3252 
3253 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3254 					   (uint8_t *) &ce_reg_values[0],
3255 					   ce_reg_word_size * sizeof(uint32_t));
3256 
3257 		if (status != QDF_STATUS_SUCCESS) {
3258 			HIF_ERROR("Dumping CE register failed!");
3259 			return -EACCES;
3260 		}
3261 		HIF_ERROR("CE%d=>\n", i);
3262 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3263 				   (uint8_t *) &ce_reg_values[0],
3264 				   ce_reg_word_size * sizeof(uint32_t));
3265 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3266 				+ SR_WR_INDEX_ADDRESS),
3267 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3268 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3269 				+ CURRENT_SRRI_ADDRESS),
3270 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3271 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3272 				+ DST_WR_INDEX_ADDRESS),
3273 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3274 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3275 				+ CURRENT_DRRI_ADDRESS),
3276 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3277 		qdf_print("---\n");
3278 	}
3279 	return 0;
3280 }
3281 qdf_export_symbol(hif_dump_ce_registers);
3282 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3283 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3284 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3285 {
3286 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3287 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3288 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3289 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3290 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3291 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3292 	struct CE_ring_state *src_ring = ce_state->src_ring;
3293 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3294 
3295 	if (src_ring) {
3296 		hif_info->ul_pipe.nentries = src_ring->nentries;
3297 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3298 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3299 		hif_info->ul_pipe.write_index = src_ring->write_index;
3300 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3301 		hif_info->ul_pipe.base_addr_CE_space =
3302 			src_ring->base_addr_CE_space;
3303 		hif_info->ul_pipe.base_addr_owner_space =
3304 			src_ring->base_addr_owner_space;
3305 	}
3306 
3307 
3308 	if (dest_ring) {
3309 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3310 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3311 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3312 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3313 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3314 		hif_info->dl_pipe.base_addr_CE_space =
3315 			dest_ring->base_addr_CE_space;
3316 		hif_info->dl_pipe.base_addr_owner_space =
3317 			dest_ring->base_addr_owner_space;
3318 	}
3319 
3320 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3321 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3322 
3323 	return hif_info;
3324 }
3325 qdf_export_symbol(hif_get_addl_pipe_info);
3326 
3327 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3328 {
3329 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3330 
3331 	scn->nss_wifi_ol_mode = mode;
3332 	return 0;
3333 }
3334 qdf_export_symbol(hif_set_nss_wifiol_mode);
3335 #endif
3336 
3337 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3338 {
3339 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3340 	scn->hif_attribute = hif_attrib;
3341 }
3342 
3343 
3344 /* disable interrupts (only applicable for legacy copy engine currently */
3345 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3346 {
3347 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3348 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3349 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3350 
3351 	Q_TARGET_ACCESS_BEGIN(scn);
3352 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3353 	Q_TARGET_ACCESS_END(scn);
3354 }
3355 qdf_export_symbol(hif_disable_interrupt);
3356 
3357 /**
3358  * hif_fw_event_handler() - hif fw event handler
3359  * @hif_state: pointer to hif ce state structure
3360  *
3361  * Process fw events and raise HTC callback to process fw events.
3362  *
3363  * Return: none
3364  */
3365 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3366 {
3367 	struct hif_msg_callbacks *msg_callbacks =
3368 		&hif_state->msg_callbacks_current;
3369 
3370 	if (!msg_callbacks->fwEventHandler)
3371 		return;
3372 
3373 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3374 			QDF_STATUS_E_FAILURE);
3375 }
3376 
3377 #ifndef QCA_WIFI_3_0
3378 /**
3379  * hif_fw_interrupt_handler() - FW interrupt handler
3380  * @irq: irq number
3381  * @arg: the user pointer
3382  *
3383  * Called from the PCI interrupt handler when a
3384  * firmware-generated interrupt to the Host.
3385  *
3386  * only registered for legacy ce devices
3387  *
3388  * Return: status of handled irq
3389  */
3390 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3391 {
3392 	struct hif_softc *scn = arg;
3393 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3394 	uint32_t fw_indicator_address, fw_indicator;
3395 
3396 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3397 		return ATH_ISR_NOSCHED;
3398 
3399 	fw_indicator_address = hif_state->fw_indicator_address;
3400 	/* For sudden unplug this will return ~0 */
3401 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3402 
3403 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3404 		/* ACK: clear Target-side pending event */
3405 		A_TARGET_WRITE(scn, fw_indicator_address,
3406 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3407 		if (Q_TARGET_ACCESS_END(scn) < 0)
3408 			return ATH_ISR_SCHED;
3409 
3410 		if (hif_state->started) {
3411 			hif_fw_event_handler(hif_state);
3412 		} else {
3413 			/*
3414 			 * Probable Target failure before we're prepared
3415 			 * to handle it.  Generally unexpected.
3416 			 */
3417 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3418 				("%s: Early firmware event indicated\n",
3419 				 __func__));
3420 		}
3421 	} else {
3422 		if (Q_TARGET_ACCESS_END(scn) < 0)
3423 			return ATH_ISR_SCHED;
3424 	}
3425 
3426 	return ATH_ISR_SCHED;
3427 }
3428 #else
3429 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3430 {
3431 	return ATH_ISR_SCHED;
3432 }
3433 #endif /* #ifdef QCA_WIFI_3_0 */
3434 
3435 
3436 /**
3437  * hif_wlan_disable(): call the platform driver to disable wlan
3438  * @scn: HIF Context
3439  *
3440  * This function passes the con_mode to platform driver to disable
3441  * wlan.
3442  *
3443  * Return: void
3444  */
3445 void hif_wlan_disable(struct hif_softc *scn)
3446 {
3447 	enum pld_driver_mode mode;
3448 	uint32_t con_mode = hif_get_conparam(scn);
3449 
3450 	if (scn->target_status == TARGET_STATUS_RESET)
3451 		return;
3452 
3453 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3454 		mode = PLD_FTM;
3455 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3456 		mode = PLD_EPPING;
3457 	else
3458 		mode = PLD_MISSION;
3459 
3460 	pld_wlan_disable(scn->qdf_dev->dev, mode);
3461 }
3462 
3463 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3464 {
3465 	QDF_STATUS status;
3466 	uint8_t ul_pipe, dl_pipe;
3467 	int ul_is_polled, dl_is_polled;
3468 
3469 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3470 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3471 					 HTC_CTRL_RSVD_SVC,
3472 					 &ul_pipe, &dl_pipe,
3473 					 &ul_is_polled, &dl_is_polled);
3474 	if (status) {
3475 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3476 		return qdf_status_to_os_return(status);
3477 	}
3478 
3479 	*ce_id = dl_pipe;
3480 
3481 	return 0;
3482 }
3483