xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 27d564647e9b50e713c60b0d7e5ea2a9b0a3ae74)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #ifndef CONFIG_WIN
41 #include "qwlan_version.h"
42 #endif
43 #include "qdf_module.h"
44 
45 #define CE_POLL_TIMEOUT 10      /* ms */
46 
47 #define AGC_DUMP         1
48 #define CHANINFO_DUMP    2
49 #define BB_WATCHDOG_DUMP 3
50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51 #define PCIE_ACCESS_DUMP 4
52 #endif
53 #include "mp_dev.h"
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
56 	!defined(QCA_WIFI_SUPPORT_SRNG)
57 #define QCA_WIFI_SUPPORT_SRNG
58 #endif
59 
60 /* Forward references */
61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
62 
63 /*
64  * Fix EV118783, poll to check whether a BMI response comes
65  * other than waiting for the interruption which may be lost.
66  */
67 /* #define BMI_RSP_POLLING */
68 #define BMI_RSP_TO_MILLISEC  1000
69 
70 #ifdef CONFIG_BYPASS_QMI
71 #define BYPASS_QMI 1
72 #else
73 #define BYPASS_QMI 0
74 #endif
75 
76 #ifdef CONFIG_WIN
77 #if ENABLE_10_4_FW_HDR
78 #define WDI_IPA_SERVICE_GROUP 5
79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
82 #endif /* ENABLE_10_4_FW_HDR */
83 #endif
84 
85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
86 static void hif_config_rri_on_ddr(struct hif_softc *scn);
87 
88 /**
89  * hif_target_access_log_dump() - dump access log
90  *
91  * dump access log
92  *
93  * Return: n/a
94  */
95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96 static void hif_target_access_log_dump(void)
97 {
98 	hif_target_dump_access_log();
99 }
100 #endif
101 
102 
103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 		      uint8_t cmd_id, bool start)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	switch (cmd_id) {
109 	case AGC_DUMP:
110 		if (start)
111 			priv_start_agc(scn);
112 		else
113 			priv_dump_agc(scn);
114 		break;
115 	case CHANINFO_DUMP:
116 		if (start)
117 			priv_start_cap_chaninfo(scn);
118 		else
119 			priv_dump_chaninfo(scn);
120 		break;
121 	case BB_WATCHDOG_DUMP:
122 		priv_dump_bbwatchdog(scn);
123 		break;
124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 	case PCIE_ACCESS_DUMP:
126 		hif_target_access_log_dump();
127 		break;
128 #endif
129 	default:
130 		HIF_ERROR("%s: Invalid htc dump command", __func__);
131 		break;
132 	}
133 }
134 
135 static void ce_poll_timeout(void *arg)
136 {
137 	struct CE_state *CE_state = (struct CE_state *)arg;
138 
139 	if (CE_state->timer_inited) {
140 		ce_per_engine_service(CE_state->scn, CE_state->id);
141 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
142 	}
143 }
144 
145 static unsigned int roundup_pwr2(unsigned int n)
146 {
147 	int i;
148 	unsigned int test_pwr2;
149 
150 	if (!(n & (n - 1)))
151 		return n; /* already a power of 2 */
152 
153 	test_pwr2 = 4;
154 	for (i = 0; i < 29; i++) {
155 		if (test_pwr2 > n)
156 			return test_pwr2;
157 		test_pwr2 = test_pwr2 << 1;
158 	}
159 
160 	QDF_ASSERT(0); /* n too large */
161 	return 0;
162 }
163 
164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166 
167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
177 #ifdef QCA_WIFI_3_0_ADRASTEA
178 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
181 #endif
182 };
183 
184 #ifdef QCN7605_SUPPORT
185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194 };
195 #endif
196 
197 #ifdef WLAN_FEATURE_EPPING
198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208 };
209 #endif
210 
211 /* CE_PCI TABLE */
212 /*
213  * NOTE: the table below is out of date, though still a useful reference.
214  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215  * mapping of HTC services to HIF pipes.
216  */
217 /*
218  * This authoritative table defines Copy Engine configuration and the mapping
219  * of services/endpoints to CEs.  A subset of this information is passed to
220  * the Target during startup as a prerequisite to entering BMI phase.
221  * See:
222  *    target_service_to_ce_map - Target-side mapping
223  *    hif_map_service_to_pipe      - Host-side mapping
224  *    target_ce_config         - Target-side configuration
225  *    host_ce_config           - Host-side configuration
226    ============================================================================
227    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
228  |                      |      | ctio | Size     | Frequency
229  |                      |      | n    |          |
230    ============================================================================
231    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
232    descriptor |                      |      |      | O(100B)  | and regular
233    download   |                      |      |      |          |
234    ----------------------------------------------------------------------------
235    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
236    indication |                      |      |      | O(10B)   | regular
237    upload     |                      |      |      |          |
238    ----------------------------------------------------------------------------
239    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
240    upload     |                      |      |      | O(1000B) | (frequent
241    e.g. noise |                      |      |      |          | during IP1.0
242    packets    |                      |      |      |          | testing)
243    ----------------------------------------------------------------------------
244    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
245    download   |                      |      |      | O(1000B) | (frequent
246    e.g.       |                      |      |      |          | during IP1.0
247    misdirecte |                      |      |      |          | testing)
248    d EAPOL    |                      |      |      |          |
249    packets    |                      |      |      |          |
250    ----------------------------------------------------------------------------
251    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
252  | DATA_VO (uplink)     |      |      |          |
253    ----------------------------------------------------------------------------
254    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
255  | DATA_VO (downlink)   |      |      |          |
256    ----------------------------------------------------------------------------
257    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258  |                      |      |      | O(100B)  |
259    ----------------------------------------------------------------------------
260    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
261    messages   | (downlink)           |      |      | O(100B)  |
262  |                      |      |      |          |
263    ----------------------------------------------------------------------------
264    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
265  | HTC_RAW_STREAMS      |      |      |          |
266  | (uplink)             |      |      |          |
267    ----------------------------------------------------------------------------
268    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
269  | HTC_RAW_STREAMS      |      |      |          |
270  | (downlink)           |      |      |          |
271    ----------------------------------------------------------------------------
272    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
273  |                      |      |      |          | infrequent
274    ============================================================================
275  */
276 
277 /*
278  * Map from service/endpoint to Copy Engine.
279  * This table is derived from the CE_PCI TABLE, above.
280  * It is passed to the Target at startup for use by firmware.
281  */
282 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 	{
284 		WMI_DATA_VO_SVC,
285 		PIPEDIR_OUT,    /* out = UL = host -> target */
286 		3,
287 	},
288 	{
289 		WMI_DATA_VO_SVC,
290 		PIPEDIR_IN,     /* in = DL = target -> host */
291 		2,
292 	},
293 	{
294 		WMI_DATA_BK_SVC,
295 		PIPEDIR_OUT,    /* out = UL = host -> target */
296 		3,
297 	},
298 	{
299 		WMI_DATA_BK_SVC,
300 		PIPEDIR_IN,     /* in = DL = target -> host */
301 		2,
302 	},
303 	{
304 		WMI_DATA_BE_SVC,
305 		PIPEDIR_OUT,    /* out = UL = host -> target */
306 		3,
307 	},
308 	{
309 		WMI_DATA_BE_SVC,
310 		PIPEDIR_IN,     /* in = DL = target -> host */
311 		2,
312 	},
313 	{
314 		WMI_DATA_VI_SVC,
315 		PIPEDIR_OUT,    /* out = UL = host -> target */
316 		3,
317 	},
318 	{
319 		WMI_DATA_VI_SVC,
320 		PIPEDIR_IN,     /* in = DL = target -> host */
321 		2,
322 	},
323 	{
324 		WMI_CONTROL_SVC,
325 		PIPEDIR_OUT,    /* out = UL = host -> target */
326 		3,
327 	},
328 	{
329 		WMI_CONTROL_SVC,
330 		PIPEDIR_IN,     /* in = DL = target -> host */
331 		2,
332 	},
333 	{
334 		HTC_CTRL_RSVD_SVC,
335 		PIPEDIR_OUT,    /* out = UL = host -> target */
336 		0,              /* could be moved to 3 (share with WMI) */
337 	},
338 	{
339 		HTC_CTRL_RSVD_SVC,
340 		PIPEDIR_IN,     /* in = DL = target -> host */
341 		2,
342 	},
343 	{
344 		HTC_RAW_STREAMS_SVC, /* not currently used */
345 		PIPEDIR_OUT,    /* out = UL = host -> target */
346 		0,
347 	},
348 	{
349 		HTC_RAW_STREAMS_SVC, /* not currently used */
350 		PIPEDIR_IN,     /* in = DL = target -> host */
351 		2,
352 	},
353 	{
354 		HTT_DATA_MSG_SVC,
355 		PIPEDIR_OUT,    /* out = UL = host -> target */
356 		4,
357 	},
358 	{
359 		HTT_DATA_MSG_SVC,
360 		PIPEDIR_IN,     /* in = DL = target -> host */
361 		1,
362 	},
363 	{
364 		WDI_IPA_TX_SVC,
365 		PIPEDIR_OUT,    /* in = DL = target -> host */
366 		5,
367 	},
368 #if defined(QCA_WIFI_3_0_ADRASTEA)
369 	{
370 		HTT_DATA2_MSG_SVC,
371 		PIPEDIR_IN,    /* in = DL = target -> host */
372 		9,
373 	},
374 	{
375 		HTT_DATA3_MSG_SVC,
376 		PIPEDIR_IN,    /* in = DL = target -> host */
377 		10,
378 	},
379 	{
380 		PACKET_LOG_SVC,
381 		PIPEDIR_IN,    /* in = DL = target -> host */
382 		11,
383 	},
384 #endif
385 	/* (Additions here) */
386 
387 	{                       /* Must be last */
388 		0,
389 		0,
390 		0,
391 	},
392 };
393 
394 /* PIPEDIR_OUT = HOST to Target */
395 /* PIPEDIR_IN  = TARGET to HOST */
396 #if (defined(QCA_WIFI_QCA8074))
397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
416 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
417 	/* (Additions here) */
418 	{ 0, 0, 0, },
419 };
420 #else
421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422 };
423 #endif
424 
425 #if (defined(QCA_WIFI_QCA8074V2))
426 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
427 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
428 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
429 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
430 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
431 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
432 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
433 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
434 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
435 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
436 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
437 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
440 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
441 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
442 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
443 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
444 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
445 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
446 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
447 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
448 	/* (Additions here) */
449 	{ 0, 0, 0, },
450 };
451 #else
452 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
453 };
454 #endif
455 
456 /* PIPEDIR_OUT = HOST to Target */
457 /* PIPEDIR_IN  = TARGET to HOST */
458 #ifdef QCN7605_SUPPORT
459 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
460 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
461 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
462 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
463 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
464 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
465 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
466 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
467 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
468 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
469 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
470 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
472 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
473 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
474 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
475 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
476 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
477 #ifdef IPA_OFFLOAD
478 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
479 #else
480 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
481 #endif
482 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
483 	/* (Additions here) */
484 	{ 0, 0, 0, },
485 };
486 #endif
487 
488 #if (defined(QCA_WIFI_QCA6290))
489 #ifdef CONFIG_WIN
490 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
491 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
492 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
493 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
494 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
495 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
496 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
497 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
498 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
499 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
500 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
501 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
502 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
503 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
504 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
505 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
506 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
507 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
508 	/* (Additions here) */
509 	{ 0, 0, 0, },
510 };
511 #else
512 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
513 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
514 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
515 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
516 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
517 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
518 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
519 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
520 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
521 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
522 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
523 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
524 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
525 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
526 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
527 	/* (Additions here) */
528 	{ 0, 0, 0, },
529 };
530 #endif
531 #else
532 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
533 };
534 #endif
535 
536 #if (defined(QCA_WIFI_QCA6390))
537 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
538 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
539 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
540 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
541 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
542 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
543 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
544 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
545 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
546 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
547 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
548 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
549 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
550 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
551 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
552 	/* (Additions here) */
553 	{ 0, 0, 0, },
554 };
555 #else
556 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
557 };
558 #endif
559 
560 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
561 	{
562 		WMI_DATA_VO_SVC,
563 		PIPEDIR_OUT,    /* out = UL = host -> target */
564 		3,
565 	},
566 	{
567 		WMI_DATA_VO_SVC,
568 		PIPEDIR_IN,     /* in = DL = target -> host */
569 		2,
570 	},
571 	{
572 		WMI_DATA_BK_SVC,
573 		PIPEDIR_OUT,    /* out = UL = host -> target */
574 		3,
575 	},
576 	{
577 		WMI_DATA_BK_SVC,
578 		PIPEDIR_IN,     /* in = DL = target -> host */
579 		2,
580 	},
581 	{
582 		WMI_DATA_BE_SVC,
583 		PIPEDIR_OUT,    /* out = UL = host -> target */
584 		3,
585 	},
586 	{
587 		WMI_DATA_BE_SVC,
588 		PIPEDIR_IN,     /* in = DL = target -> host */
589 		2,
590 	},
591 	{
592 		WMI_DATA_VI_SVC,
593 		PIPEDIR_OUT,    /* out = UL = host -> target */
594 		3,
595 	},
596 	{
597 		WMI_DATA_VI_SVC,
598 		PIPEDIR_IN,     /* in = DL = target -> host */
599 		2,
600 	},
601 	{
602 		WMI_CONTROL_SVC,
603 		PIPEDIR_OUT,    /* out = UL = host -> target */
604 		3,
605 	},
606 	{
607 		WMI_CONTROL_SVC,
608 		PIPEDIR_IN,     /* in = DL = target -> host */
609 		2,
610 	},
611 	{
612 		HTC_CTRL_RSVD_SVC,
613 		PIPEDIR_OUT,    /* out = UL = host -> target */
614 		0,              /* could be moved to 3 (share with WMI) */
615 	},
616 	{
617 		HTC_CTRL_RSVD_SVC,
618 		PIPEDIR_IN,     /* in = DL = target -> host */
619 		1,
620 	},
621 	{
622 		HTC_RAW_STREAMS_SVC, /* not currently used */
623 		PIPEDIR_OUT,    /* out = UL = host -> target */
624 		0,
625 	},
626 	{
627 		HTC_RAW_STREAMS_SVC, /* not currently used */
628 		PIPEDIR_IN,     /* in = DL = target -> host */
629 		1,
630 	},
631 	{
632 		HTT_DATA_MSG_SVC,
633 		PIPEDIR_OUT,    /* out = UL = host -> target */
634 		4,
635 	},
636 #ifdef WLAN_FEATURE_FASTPATH
637 	{
638 		HTT_DATA_MSG_SVC,
639 		PIPEDIR_IN,     /* in = DL = target -> host */
640 		5,
641 	},
642 #else /* WLAN_FEATURE_FASTPATH */
643 	{
644 		HTT_DATA_MSG_SVC,
645 		PIPEDIR_IN,  /* in = DL = target -> host */
646 		1,
647 	},
648 #endif /* WLAN_FEATURE_FASTPATH */
649 
650 	/* (Additions here) */
651 
652 	{                       /* Must be last */
653 		0,
654 		0,
655 		0,
656 	},
657 };
658 
659 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
660 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
661 
662 #ifdef WLAN_FEATURE_EPPING
663 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
664 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
665 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
666 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
667 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
668 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
669 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
670 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
671 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
672 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
673 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
674 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
675 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
676 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
677 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
678 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
679 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
680 	{0, 0, 0,},             /* Must be last */
681 };
682 
683 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
684 					   **tgt_svc_map_to_use,
685 					   uint32_t *sz_tgt_svc_map_to_use)
686 {
687 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
688 	*sz_tgt_svc_map_to_use =
689 			sizeof(target_service_to_ce_map_wlan_epping);
690 }
691 #endif
692 
693 #ifdef QCN7605_SUPPORT
694 static inline
695 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
696 			       uint32_t *sz_tgt_svc_map_to_use)
697 {
698 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
699 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
700 }
701 #else
702 static inline
703 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
704 			       uint32_t *sz_tgt_svc_map_to_use)
705 {
706 	HIF_ERROR("%s: QCN7605 not supported", __func__);
707 }
708 #endif
709 
710 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
711 				    struct service_to_pipe **tgt_svc_map_to_use,
712 				    uint32_t *sz_tgt_svc_map_to_use)
713 {
714 	uint32_t mode = hif_get_conparam(scn);
715 	struct hif_target_info *tgt_info = &scn->target_info;
716 
717 	if (QDF_IS_EPPING_ENABLED(mode)) {
718 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
719 						      sz_tgt_svc_map_to_use);
720 	} else {
721 		switch (tgt_info->target_type) {
722 		default:
723 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
724 			*sz_tgt_svc_map_to_use =
725 				sizeof(target_service_to_ce_map_wlan);
726 			break;
727 		case TARGET_TYPE_QCN7605:
728 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
729 						  sz_tgt_svc_map_to_use);
730 			break;
731 		case TARGET_TYPE_AR900B:
732 		case TARGET_TYPE_QCA9984:
733 		case TARGET_TYPE_IPQ4019:
734 		case TARGET_TYPE_QCA9888:
735 		case TARGET_TYPE_AR9888:
736 		case TARGET_TYPE_AR9888V2:
737 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
738 			*sz_tgt_svc_map_to_use =
739 				sizeof(target_service_to_ce_map_ar900b);
740 			break;
741 		case TARGET_TYPE_QCA6290:
742 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
743 			*sz_tgt_svc_map_to_use =
744 				sizeof(target_service_to_ce_map_qca6290);
745 			break;
746 		case TARGET_TYPE_QCA6390:
747 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
748 			*sz_tgt_svc_map_to_use =
749 				sizeof(target_service_to_ce_map_qca6390);
750 			break;
751 		case TARGET_TYPE_QCA8074:
752 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
753 			*sz_tgt_svc_map_to_use =
754 				sizeof(target_service_to_ce_map_qca8074);
755 			break;
756 		case TARGET_TYPE_QCA8074V2:
757 			*tgt_svc_map_to_use =
758 				target_service_to_ce_map_qca8074_v2;
759 			*sz_tgt_svc_map_to_use =
760 				sizeof(target_service_to_ce_map_qca8074_v2);
761 			break;
762 		}
763 	}
764 }
765 
766 /**
767  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
768  * @ce_state : pointer to the state context of the CE
769  *
770  * Description:
771  *   Sets htt_rx_data attribute of the state structure if the
772  *   CE serves one of the HTT DATA services.
773  *
774  * Return:
775  *  false (attribute set to false)
776  *  true  (attribute set to true);
777  */
778 static bool ce_mark_datapath(struct CE_state *ce_state)
779 {
780 	struct service_to_pipe *svc_map;
781 	uint32_t map_sz, map_len;
782 	int    i;
783 	bool   rc = false;
784 
785 	if (ce_state != NULL) {
786 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
787 					       &map_sz);
788 
789 		map_len = map_sz / sizeof(struct service_to_pipe);
790 		for (i = 0; i < map_len; i++) {
791 			if ((svc_map[i].pipenum == ce_state->id) &&
792 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
793 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
794 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
795 				/* HTT CEs are unidirectional */
796 				if (svc_map[i].pipedir == PIPEDIR_IN)
797 					ce_state->htt_rx_data = true;
798 				else
799 					ce_state->htt_tx_data = true;
800 				rc = true;
801 			}
802 		}
803 	}
804 	return rc;
805 }
806 
807 /**
808  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
809  * @ce_id: ce in question
810  * @ring: ring state being examined
811  * @type: "src_ring" or "dest_ring" string for identifying the ring
812  *
813  * Warns on non-zero index values.
814  * Causes a kernel panic if the ring is not empty durring initialization.
815  */
816 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
817 					 char *type)
818 {
819 	if (ring->write_index != 0 || ring->sw_index != 0)
820 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
821 			  ce_id, type, ring->sw_index, ring->write_index);
822 	if (ring->write_index != ring->sw_index)
823 		QDF_BUG(0);
824 }
825 
826 #ifdef IPA_OFFLOAD
827 /**
828  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
829  * @scn: softc instance
830  * @ce_id: ce in question
831  * @base_addr: pointer to copyengine ring base address
832  * @ce_ring: copyengine instance
833  * @nentries: number of entries should be allocated
834  * @desc_size: ce desc size
835  *
836  * Return: QDF_STATUS_SUCCESS - for success
837  */
838 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
839 				     qdf_dma_addr_t *base_addr,
840 				     struct CE_ring_state *ce_ring,
841 				     unsigned int nentries, uint32_t desc_size)
842 {
843 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
844 	    !ce_srng_based(scn)) {
845 		if (!scn->ipa_ce_ring) {
846 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
847 				scn->qdf_dev,
848 				nentries * desc_size + CE_DESC_RING_ALIGN);
849 			if (!scn->ipa_ce_ring) {
850 				HIF_ERROR(
851 				"%s: Failed to allocate memory for IPA ce ring",
852 				__func__);
853 				return QDF_STATUS_E_NOMEM;
854 			}
855 		}
856 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
857 						&scn->ipa_ce_ring->mem_info);
858 		ce_ring->base_addr_owner_space_unaligned =
859 						scn->ipa_ce_ring->vaddr;
860 	} else {
861 		ce_ring->base_addr_owner_space_unaligned =
862 			qdf_mem_alloc_consistent(scn->qdf_dev,
863 						 scn->qdf_dev->dev,
864 						 (nentries * desc_size +
865 						 CE_DESC_RING_ALIGN),
866 						 base_addr);
867 		if (!ce_ring->base_addr_owner_space_unaligned) {
868 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
869 				  __func__, CE_id);
870 			return QDF_STATUS_E_NOMEM;
871 		}
872 	}
873 	return QDF_STATUS_SUCCESS;
874 }
875 
876 /**
877  * ce_free_desc_ring() - Frees copyengine descriptor ring
878  * @scn: softc instance
879  * @ce_id: ce in question
880  * @ce_ring: copyengine instance
881  * @desc_size: ce desc size
882  *
883  * Return: None
884  */
885 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
886 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
887 {
888 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
889 	    !ce_srng_based(scn)) {
890 		if (scn->ipa_ce_ring) {
891 			qdf_mem_shared_mem_free(scn->qdf_dev,
892 						scn->ipa_ce_ring);
893 			scn->ipa_ce_ring = NULL;
894 		}
895 		ce_ring->base_addr_owner_space_unaligned = NULL;
896 	} else {
897 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
898 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
899 			ce_ring->base_addr_owner_space_unaligned,
900 			ce_ring->base_addr_CE_space, 0);
901 		ce_ring->base_addr_owner_space_unaligned = NULL;
902 	}
903 }
904 #else
905 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
906 				     qdf_dma_addr_t *base_addr,
907 				     struct CE_ring_state *ce_ring,
908 				     unsigned int nentries, uint32_t desc_size)
909 {
910 	ce_ring->base_addr_owner_space_unaligned =
911 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
912 					 (nentries * desc_size +
913 					 CE_DESC_RING_ALIGN), base_addr);
914 	if (!ce_ring->base_addr_owner_space_unaligned) {
915 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
916 			  __func__, CE_id);
917 		return QDF_STATUS_E_NOMEM;
918 	}
919 	return QDF_STATUS_SUCCESS;
920 }
921 
922 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
923 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
924 {
925 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
926 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
927 		ce_ring->base_addr_owner_space_unaligned,
928 		ce_ring->base_addr_CE_space, 0);
929 	ce_ring->base_addr_owner_space_unaligned = NULL;
930 }
931 #endif /* IPA_OFFLOAD */
932 
933 /*
934  * TODO: Need to explore the possibility of having this as part of a
935  * target context instead of a global array.
936  */
937 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
938 
939 void ce_service_register_module(enum ce_target_type target_type,
940 				struct ce_ops* (*ce_attach)(void))
941 {
942 	if (target_type < CE_MAX_TARGET_TYPE)
943 		ce_attach_register[target_type] = ce_attach;
944 }
945 
946 qdf_export_symbol(ce_service_register_module);
947 
948 /**
949  * ce_srng_based() - Does this target use srng
950  * @ce_state : pointer to the state context of the CE
951  *
952  * Description:
953  *   returns true if the target is SRNG based
954  *
955  * Return:
956  *  false (attribute set to false)
957  *  true  (attribute set to true);
958  */
959 bool ce_srng_based(struct hif_softc *scn)
960 {
961 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
962 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
963 
964 	switch (tgt_info->target_type) {
965 	case TARGET_TYPE_QCA8074:
966 	case TARGET_TYPE_QCA8074V2:
967 	case TARGET_TYPE_QCA6290:
968 	case TARGET_TYPE_QCA6390:
969 		return true;
970 	default:
971 		return false;
972 	}
973 	return false;
974 }
975 qdf_export_symbol(ce_srng_based);
976 
977 #ifdef QCA_WIFI_SUPPORT_SRNG
978 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
979 {
980 	struct ce_ops *ops = NULL;
981 
982 	if (ce_srng_based(scn)) {
983 		if (ce_attach_register[CE_SVC_SRNG])
984 			ops = ce_attach_register[CE_SVC_SRNG]();
985 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
986 		ops = ce_attach_register[CE_SVC_LEGACY]();
987 	}
988 
989 	return ops;
990 }
991 
992 
993 #else	/* QCA_LITHIUM */
994 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
995 {
996 	if (ce_attach_register[CE_SVC_LEGACY])
997 		return ce_attach_register[CE_SVC_LEGACY]();
998 
999 	return NULL;
1000 }
1001 #endif /* QCA_LITHIUM */
1002 
1003 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1004 		struct pld_shadow_reg_v2_cfg **shadow_config,
1005 		int *num_shadow_registers_configured) {
1006 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1007 
1008 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1009 			scn, shadow_config, num_shadow_registers_configured);
1010 }
1011 
1012 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1013 						uint8_t ring_type)
1014 {
1015 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1016 
1017 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1018 }
1019 
1020 
1021 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1022 		uint8_t ring_type, uint32_t nentries)
1023 {
1024 	uint32_t ce_nbytes;
1025 	char *ptr;
1026 	qdf_dma_addr_t base_addr;
1027 	struct CE_ring_state *ce_ring;
1028 	uint32_t desc_size;
1029 	struct hif_softc *scn = CE_state->scn;
1030 
1031 	ce_nbytes = sizeof(struct CE_ring_state)
1032 		+ (nentries * sizeof(void *));
1033 	ptr = qdf_mem_malloc(ce_nbytes);
1034 	if (!ptr)
1035 		return NULL;
1036 
1037 	ce_ring = (struct CE_ring_state *)ptr;
1038 	ptr += sizeof(struct CE_ring_state);
1039 	ce_ring->nentries = nentries;
1040 	ce_ring->nentries_mask = nentries - 1;
1041 
1042 	ce_ring->low_water_mark_nentries = 0;
1043 	ce_ring->high_water_mark_nentries = nentries;
1044 	ce_ring->per_transfer_context = (void **)ptr;
1045 
1046 	desc_size = ce_get_desc_size(scn, ring_type);
1047 
1048 	/* Legacy platforms that do not support cache
1049 	 * coherent DMA are unsupported
1050 	 */
1051 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1052 			       ce_ring, nentries,
1053 			       desc_size) !=
1054 	    QDF_STATUS_SUCCESS) {
1055 		HIF_ERROR("%s: ring has no DMA mem",
1056 				__func__);
1057 		qdf_mem_free(ce_ring);
1058 		return NULL;
1059 	}
1060 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1061 
1062 	/* Correctly initialize memory to 0 to
1063 	 * prevent garbage data crashing system
1064 	 * when download firmware
1065 	 */
1066 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1067 			nentries * desc_size +
1068 			CE_DESC_RING_ALIGN);
1069 
1070 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1071 
1072 		ce_ring->base_addr_CE_space =
1073 			(ce_ring->base_addr_CE_space_unaligned +
1074 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1075 
1076 		ce_ring->base_addr_owner_space = (void *)
1077 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1078 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1079 	} else {
1080 		ce_ring->base_addr_CE_space =
1081 				ce_ring->base_addr_CE_space_unaligned;
1082 		ce_ring->base_addr_owner_space =
1083 				ce_ring->base_addr_owner_space_unaligned;
1084 	}
1085 
1086 	return ce_ring;
1087 }
1088 
1089 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1090 			uint32_t ce_id, struct CE_ring_state *ring,
1091 			struct CE_attr *attr)
1092 {
1093 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1094 
1095 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1096 					      ring, attr);
1097 }
1098 
1099 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1100 {
1101 	uint8_t ul_pipe, dl_pipe;
1102 	int ce_id, status, ul_is_polled, dl_is_polled;
1103 	struct CE_state *ce_state;
1104 
1105 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1106 					 &ul_pipe, &dl_pipe,
1107 					 &ul_is_polled, &dl_is_polled);
1108 	if (status) {
1109 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1110 		return status;
1111 	}
1112 
1113 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1114 		if (ce_id == ul_pipe)
1115 			continue;
1116 		if (ce_id == dl_pipe)
1117 			continue;
1118 
1119 		ce_state = scn->ce_id_to_state[ce_id];
1120 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1121 		if (ce_state->state == CE_RUNNING)
1122 			ce_state->state = CE_PAUSED;
1123 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1124 	}
1125 
1126 	return status;
1127 }
1128 
1129 int hif_ce_bus_late_resume(struct hif_softc *scn)
1130 {
1131 	int ce_id;
1132 	struct CE_state *ce_state;
1133 	int write_index;
1134 	bool index_updated;
1135 
1136 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1137 		ce_state = scn->ce_id_to_state[ce_id];
1138 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1139 		if (ce_state->state == CE_PENDING) {
1140 			write_index = ce_state->src_ring->write_index;
1141 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1142 					write_index);
1143 			ce_state->state = CE_RUNNING;
1144 			index_updated = true;
1145 		} else {
1146 			index_updated = false;
1147 		}
1148 
1149 		if (ce_state->state == CE_PAUSED)
1150 			ce_state->state = CE_RUNNING;
1151 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1152 
1153 		if (index_updated)
1154 			hif_record_ce_desc_event(scn, ce_id,
1155 				RESUME_WRITE_INDEX_UPDATE,
1156 				NULL, NULL, write_index, 0);
1157 	}
1158 
1159 	return 0;
1160 }
1161 
1162 /**
1163  * ce_oom_recovery() - try to recover rx ce from oom condition
1164  * @context: CE_state of the CE with oom rx ring
1165  *
1166  * the executing work Will continue to be rescheduled until
1167  * at least 1 descriptor is successfully posted to the rx ring.
1168  *
1169  * return: none
1170  */
1171 static void ce_oom_recovery(void *context)
1172 {
1173 	struct CE_state *ce_state = context;
1174 	struct hif_softc *scn = ce_state->scn;
1175 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1176 	struct HIF_CE_pipe_info *pipe_info =
1177 		&ce_softc->pipe_info[ce_state->id];
1178 
1179 	hif_post_recv_buffers_for_pipe(pipe_info);
1180 }
1181 
1182 #if HIF_CE_DEBUG_DATA_BUF
1183 /**
1184  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1185  * the CE descriptors.
1186  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1187  * @scn: hif scn handle
1188  * ce_id: Copy Engine Id
1189  *
1190  * Return: QDF_STATUS
1191  */
1192 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1193 {
1194 	struct hif_ce_desc_event *event = NULL;
1195 	struct hif_ce_desc_event *hist_ev = NULL;
1196 	uint32_t index = 0;
1197 
1198 	hist_ev =
1199 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1200 
1201 	if (!hist_ev)
1202 		return QDF_STATUS_E_NOMEM;
1203 
1204 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1205 		event = &hist_ev[index];
1206 		event->data =
1207 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1208 		if (event->data == NULL)
1209 			return QDF_STATUS_E_NOMEM;
1210 	}
1211 	return QDF_STATUS_SUCCESS;
1212 }
1213 
1214 /**
1215  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1216  * the CE descriptors.
1217  * @scn: hif scn handle
1218  * ce_id: Copy Engine Id
1219  *
1220  * Return:
1221  */
1222 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1223 {
1224 	struct hif_ce_desc_event *event = NULL;
1225 	struct hif_ce_desc_event *hist_ev = NULL;
1226 	uint32_t index = 0;
1227 
1228 	hist_ev =
1229 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1230 
1231 	if (!hist_ev)
1232 		return;
1233 
1234 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1235 		event = &hist_ev[index];
1236 		if (event->data != NULL)
1237 			qdf_mem_free(event->data);
1238 		event->data = NULL;
1239 		event = NULL;
1240 	}
1241 }
1242 #endif /* HIF_CE_DEBUG_DATA_BUF */
1243 
1244 /*
1245  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1246  * for defined here
1247  */
1248 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF
1249 /**
1250  * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
1251  * @scn: hif scn handle
1252  * ce_id: Copy Engine Id
1253  *
1254  * Return: QDF_STATUS
1255  */
1256 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1257 						unsigned int CE_id)
1258 {
1259 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1260 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1261 
1262 	if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1263 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1264 		return QDF_STATUS_E_NOMEM;
1265 	} else {
1266 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1267 		return QDF_STATUS_SUCCESS;
1268 	}
1269 }
1270 
1271 /**
1272  * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1273  * storing.
1274  * @scn: hif scn handle
1275  * ce_id: Copy Engine Id
1276  *
1277  * Return:
1278  */
1279 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1280 						unsigned int CE_id)
1281 {
1282 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1283 	struct hif_ce_desc_event *hist_ev =
1284 			(struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1285 
1286 	if (!hist_ev)
1287 		return;
1288 
1289 #if HIF_CE_DEBUG_DATA_BUF
1290 	if (ce_hist->data_enable[CE_id] == 1) {
1291 		ce_hist->data_enable[CE_id] = 0;
1292 		free_mem_ce_debug_hist_data(scn, CE_id);
1293 	}
1294 #endif
1295 	ce_hist->enable[CE_id] = 0;
1296 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1297 	ce_hist->hist_ev[CE_id] = NULL;
1298 }
1299 
1300 /**
1301  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1302  * CE records on the console using sysfs.
1303  * @scn: hif scn handle
1304  *
1305  * Return:
1306  */
1307 static inline void reset_ce_debug_history(struct hif_softc *scn)
1308 {
1309 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1310 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1311 	 * index. Disable data storing
1312 	 */
1313 	ce_hist->hist_index = 0;
1314 	ce_hist->hist_id = 0;
1315 }
1316 #else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1317 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1318 						unsigned int CE_id)
1319 {
1320 	return QDF_STATUS_SUCCESS;
1321 }
1322 
1323 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1324 						unsigned int CE_id)
1325 {
1326 }
1327 
1328 static inline void reset_ce_debug_history(struct hif_softc *scn)
1329 {
1330 }
1331 #endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1332 
1333 void ce_enable_polling(void *cestate)
1334 {
1335 	struct CE_state *CE_state = (struct CE_state *)cestate;
1336 
1337 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1338 		CE_state->timer_inited = true;
1339 }
1340 
1341 void ce_disable_polling(void *cestate)
1342 {
1343 	struct CE_state *CE_state = (struct CE_state *)cestate;
1344 
1345 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1346 		CE_state->timer_inited = false;
1347 }
1348 
1349 /*
1350  * Initialize a Copy Engine based on caller-supplied attributes.
1351  * This may be called once to initialize both source and destination
1352  * rings or it may be called twice for separate source and destination
1353  * initialization. It may be that only one side or the other is
1354  * initialized by software/firmware.
1355  *
1356  * This should be called durring the initialization sequence before
1357  * interupts are enabled, so we don't have to worry about thread safety.
1358  */
1359 struct CE_handle *ce_init(struct hif_softc *scn,
1360 			  unsigned int CE_id, struct CE_attr *attr)
1361 {
1362 	struct CE_state *CE_state;
1363 	uint32_t ctrl_addr;
1364 	unsigned int nentries;
1365 	bool malloc_CE_state = false;
1366 	bool malloc_src_ring = false;
1367 	int status;
1368 
1369 	QDF_ASSERT(CE_id < scn->ce_count);
1370 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1371 	CE_state = scn->ce_id_to_state[CE_id];
1372 
1373 	if (!CE_state) {
1374 		CE_state =
1375 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1376 		if (!CE_state) {
1377 			HIF_ERROR("%s: CE_state has no mem", __func__);
1378 			return NULL;
1379 		}
1380 		malloc_CE_state = true;
1381 		qdf_spinlock_create(&CE_state->ce_index_lock);
1382 
1383 		CE_state->id = CE_id;
1384 		CE_state->ctrl_addr = ctrl_addr;
1385 		CE_state->state = CE_RUNNING;
1386 		CE_state->attr_flags = attr->flags;
1387 	}
1388 	CE_state->scn = scn;
1389 
1390 	qdf_atomic_init(&CE_state->rx_pending);
1391 	if (attr == NULL) {
1392 		/* Already initialized; caller wants the handle */
1393 		return (struct CE_handle *)CE_state;
1394 	}
1395 
1396 	if (CE_state->src_sz_max)
1397 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1398 	else
1399 		CE_state->src_sz_max = attr->src_sz_max;
1400 
1401 	ce_init_ce_desc_event_log(scn, CE_id,
1402 				  attr->src_nentries + attr->dest_nentries);
1403 
1404 	/* source ring setup */
1405 	nentries = attr->src_nentries;
1406 	if (nentries) {
1407 		struct CE_ring_state *src_ring;
1408 
1409 		nentries = roundup_pwr2(nentries);
1410 		if (CE_state->src_ring) {
1411 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1412 		} else {
1413 			src_ring = CE_state->src_ring =
1414 				ce_alloc_ring_state(CE_state,
1415 						CE_RING_SRC,
1416 						nentries);
1417 			if (!src_ring) {
1418 				/* cannot allocate src ring. If the
1419 				 * CE_state is allocated locally free
1420 				 * CE_State and return error.
1421 				 */
1422 				HIF_ERROR("%s: src ring has no mem", __func__);
1423 				if (malloc_CE_state) {
1424 					/* allocated CE_state locally */
1425 					qdf_mem_free(CE_state);
1426 					malloc_CE_state = false;
1427 				}
1428 				return NULL;
1429 			}
1430 			/* we can allocate src ring. Mark that the src ring is
1431 			 * allocated locally
1432 			 */
1433 			malloc_src_ring = true;
1434 
1435 			/*
1436 			 * Also allocate a shadow src ring in
1437 			 * regular mem to use for faster access.
1438 			 */
1439 			src_ring->shadow_base_unaligned =
1440 				qdf_mem_malloc(nentries *
1441 					       sizeof(struct CE_src_desc) +
1442 					       CE_DESC_RING_ALIGN);
1443 			if (src_ring->shadow_base_unaligned == NULL) {
1444 				HIF_ERROR("%s: src ring no shadow_base mem",
1445 					  __func__);
1446 				goto error_no_dma_mem;
1447 			}
1448 			src_ring->shadow_base = (struct CE_src_desc *)
1449 				(((size_t) src_ring->shadow_base_unaligned +
1450 				CE_DESC_RING_ALIGN - 1) &
1451 				 ~(CE_DESC_RING_ALIGN - 1));
1452 
1453 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1454 					       src_ring, attr);
1455 			if (status < 0)
1456 				goto error_target_access;
1457 
1458 			ce_ring_test_initial_indexes(CE_id, src_ring,
1459 						     "src_ring");
1460 		}
1461 	}
1462 
1463 	/* destination ring setup */
1464 	nentries = attr->dest_nentries;
1465 	if (nentries) {
1466 		struct CE_ring_state *dest_ring;
1467 
1468 		nentries = roundup_pwr2(nentries);
1469 		if (CE_state->dest_ring) {
1470 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1471 		} else {
1472 			dest_ring = CE_state->dest_ring =
1473 				ce_alloc_ring_state(CE_state,
1474 						CE_RING_DEST,
1475 						nentries);
1476 			if (!dest_ring) {
1477 				/* cannot allocate dst ring. If the CE_state
1478 				 * or src ring is allocated locally free
1479 				 * CE_State and src ring and return error.
1480 				 */
1481 				HIF_ERROR("%s: dest ring has no mem",
1482 					  __func__);
1483 				goto error_no_dma_mem;
1484 			}
1485 
1486 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1487 				      dest_ring, attr);
1488 			if (status < 0)
1489 				goto error_target_access;
1490 
1491 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1492 						     "dest_ring");
1493 
1494 			/* For srng based target, init status ring here */
1495 			if (ce_srng_based(CE_state->scn)) {
1496 				CE_state->status_ring =
1497 					ce_alloc_ring_state(CE_state,
1498 							CE_RING_STATUS,
1499 							nentries);
1500 				if (CE_state->status_ring == NULL) {
1501 					/*Allocation failed. Cleanup*/
1502 					qdf_mem_free(CE_state->dest_ring);
1503 					if (malloc_src_ring) {
1504 						qdf_mem_free
1505 							(CE_state->src_ring);
1506 						CE_state->src_ring = NULL;
1507 						malloc_src_ring = false;
1508 					}
1509 					if (malloc_CE_state) {
1510 						/* allocated CE_state locally */
1511 						scn->ce_id_to_state[CE_id] =
1512 							NULL;
1513 						qdf_mem_free(CE_state);
1514 						malloc_CE_state = false;
1515 					}
1516 
1517 					return NULL;
1518 				}
1519 
1520 				status = ce_ring_setup(scn, CE_RING_STATUS,
1521 					       CE_id, CE_state->status_ring,
1522 					       attr);
1523 				if (status < 0)
1524 					goto error_target_access;
1525 
1526 			}
1527 
1528 			/* epping */
1529 			/* poll timer */
1530 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1531 				qdf_timer_init(scn->qdf_dev,
1532 						&CE_state->poll_timer,
1533 						ce_poll_timeout,
1534 						CE_state,
1535 						QDF_TIMER_TYPE_WAKE_APPS);
1536 				ce_enable_polling(CE_state);
1537 				qdf_timer_mod(&CE_state->poll_timer,
1538 						      CE_POLL_TIMEOUT);
1539 			}
1540 		}
1541 	}
1542 
1543 	if (!ce_srng_based(scn)) {
1544 		/* Enable CE error interrupts */
1545 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1546 			goto error_target_access;
1547 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1548 		if (Q_TARGET_ACCESS_END(scn) < 0)
1549 			goto error_target_access;
1550 	}
1551 
1552 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1553 			ce_oom_recovery, CE_state);
1554 
1555 	/* update the htt_data attribute */
1556 	ce_mark_datapath(CE_state);
1557 	scn->ce_id_to_state[CE_id] = CE_state;
1558 
1559 	alloc_mem_ce_debug_history(scn, CE_id);
1560 
1561 	return (struct CE_handle *)CE_state;
1562 
1563 error_target_access:
1564 error_no_dma_mem:
1565 	ce_fini((struct CE_handle *)CE_state);
1566 	return NULL;
1567 }
1568 
1569 #ifdef WLAN_FEATURE_FASTPATH
1570 /**
1571  * hif_enable_fastpath() Update that we have enabled fastpath mode
1572  * @hif_ctx: HIF context
1573  *
1574  * For use in data path
1575  *
1576  * Retrun: void
1577  */
1578 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1579 {
1580 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1581 
1582 	if (ce_srng_based(scn)) {
1583 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1584 		return;
1585 	}
1586 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1587 	scn->fastpath_mode_on = true;
1588 }
1589 
1590 /**
1591  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1592  * @hif_ctx: HIF Context
1593  *
1594  * For use in data path to skip HTC
1595  *
1596  * Return: bool
1597  */
1598 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1599 {
1600 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1601 
1602 	return scn->fastpath_mode_on;
1603 }
1604 
1605 /**
1606  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1607  * @hif_ctx: HIF Context
1608  *
1609  * API to check if polling is enabled on all CEs. Returns true when polling
1610  * is enabled on all CEs.
1611  *
1612  * Return: bool
1613  */
1614 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1615 {
1616 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1617 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1618 	struct CE_attr *attr;
1619 	int id;
1620 
1621 	for (id = 0; id < scn->ce_count; id++) {
1622 		attr = &hif_state->host_ce_config[id];
1623 		if (attr && (attr->dest_nentries) &&
1624 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1625 			return false;
1626 	}
1627 	return true;
1628 }
1629 qdf_export_symbol(hif_is_polled_mode_enabled);
1630 
1631 /**
1632  * hif_get_ce_handle - API to get CE handle for FastPath mode
1633  * @hif_ctx: HIF Context
1634  * @id: CopyEngine Id
1635  *
1636  * API to return CE handle for fastpath mode
1637  *
1638  * Return: void
1639  */
1640 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1641 {
1642 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1643 
1644 	return scn->ce_id_to_state[id];
1645 }
1646 
1647 /**
1648  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1649  * No processing is required inside this function.
1650  * @ce_hdl: Cope engine handle
1651  * Using an assert, this function makes sure that,
1652  * the TX CE has been processed completely.
1653  *
1654  * This is called while dismantling CE structures. No other thread
1655  * should be using these structures while dismantling is occurring
1656  * therfore no locking is needed.
1657  *
1658  * Return: none
1659  */
1660 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1661 {
1662 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1663 	struct CE_ring_state *src_ring = ce_state->src_ring;
1664 	struct hif_softc *sc = ce_state->scn;
1665 	uint32_t sw_index, write_index;
1666 
1667 	if (hif_is_nss_wifi_enabled(sc))
1668 		return;
1669 
1670 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1671 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1672 			 __func__, __LINE__);
1673 		sw_index = src_ring->sw_index;
1674 		write_index = src_ring->sw_index;
1675 
1676 		/* At this point Tx CE should be clean */
1677 		qdf_assert_always(sw_index == write_index);
1678 	}
1679 }
1680 
1681 /**
1682  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1683  * @ce_hdl: Handle to CE
1684  *
1685  * These buffers are never allocated on the fly, but
1686  * are allocated only once during HIF start and freed
1687  * only once during HIF stop.
1688  * NOTE:
1689  * The assumption here is there is no in-flight DMA in progress
1690  * currently, so that buffers can be freed up safely.
1691  *
1692  * Return: NONE
1693  */
1694 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1695 {
1696 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1697 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1698 	qdf_nbuf_t nbuf;
1699 	int i;
1700 
1701 	if (ce_state->scn->fastpath_mode_on == false)
1702 		return;
1703 
1704 	if (!ce_state->htt_rx_data)
1705 		return;
1706 
1707 	/*
1708 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1709 	 * this CE is completely full: does not leave one blank space, to
1710 	 * distinguish between empty queue & full queue. So free all the
1711 	 * entries.
1712 	 */
1713 	for (i = 0; i < dst_ring->nentries; i++) {
1714 		nbuf = dst_ring->per_transfer_context[i];
1715 
1716 		/*
1717 		 * The reasons for doing this check are:
1718 		 * 1) Protect against calling cleanup before allocating buffers
1719 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1720 		 *    could have a partially filled ring, because of a memory
1721 		 *    allocation failure in the middle of allocating ring.
1722 		 *    This check accounts for that case, checking
1723 		 *    fastpath_mode_on flag or started flag would not have
1724 		 *    covered that case. This is not in performance path,
1725 		 *    so OK to do this.
1726 		 */
1727 		if (nbuf) {
1728 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1729 					      QDF_DMA_FROM_DEVICE);
1730 			qdf_nbuf_free(nbuf);
1731 		}
1732 	}
1733 }
1734 
1735 /**
1736  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1737  * @scn: HIF handle
1738  *
1739  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1740  * Hence we have to post all the entries in the pipe, even, in the beginning
1741  * unlike for other CE pipes where one less than dest_nentries are filled in
1742  * the beginning.
1743  *
1744  * Return: None
1745  */
1746 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1747 {
1748 	int pipe_num;
1749 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1750 
1751 	if (scn->fastpath_mode_on == false)
1752 		return;
1753 
1754 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1755 		struct HIF_CE_pipe_info *pipe_info =
1756 			&hif_state->pipe_info[pipe_num];
1757 		struct CE_state *ce_state =
1758 			scn->ce_id_to_state[pipe_info->pipe_num];
1759 
1760 		if (ce_state->htt_rx_data)
1761 			atomic_inc(&pipe_info->recv_bufs_needed);
1762 	}
1763 }
1764 #else
1765 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1766 {
1767 }
1768 
1769 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1770 {
1771 	return false;
1772 }
1773 
1774 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1775 {
1776 	return false;
1777 }
1778 #endif /* WLAN_FEATURE_FASTPATH */
1779 
1780 void ce_fini(struct CE_handle *copyeng)
1781 {
1782 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1783 	unsigned int CE_id = CE_state->id;
1784 	struct hif_softc *scn = CE_state->scn;
1785 	uint32_t desc_size;
1786 
1787 	bool inited = CE_state->timer_inited;
1788 	CE_state->state = CE_UNUSED;
1789 	scn->ce_id_to_state[CE_id] = NULL;
1790 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1791 	ce_disable_polling(CE_state);
1792 
1793 	qdf_lro_deinit(CE_state->lro_data);
1794 
1795 	if (CE_state->src_ring) {
1796 		/* Cleanup the datapath Tx ring */
1797 		ce_h2t_tx_ce_cleanup(copyeng);
1798 
1799 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1800 		if (CE_state->src_ring->shadow_base_unaligned)
1801 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1802 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1803 			ce_free_desc_ring(scn, CE_state->id,
1804 					  CE_state->src_ring,
1805 					  desc_size);
1806 		qdf_mem_free(CE_state->src_ring);
1807 	}
1808 	if (CE_state->dest_ring) {
1809 		/* Cleanup the datapath Rx ring */
1810 		ce_t2h_msg_ce_cleanup(copyeng);
1811 
1812 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1813 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1814 			ce_free_desc_ring(scn, CE_state->id,
1815 					  CE_state->dest_ring,
1816 					  desc_size);
1817 		qdf_mem_free(CE_state->dest_ring);
1818 
1819 		/* epping */
1820 		if (inited) {
1821 			qdf_timer_free(&CE_state->poll_timer);
1822 		}
1823 	}
1824 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1825 		/* Cleanup the datapath Tx ring */
1826 		ce_h2t_tx_ce_cleanup(copyeng);
1827 
1828 		if (CE_state->status_ring->shadow_base_unaligned)
1829 			qdf_mem_free(
1830 				CE_state->status_ring->shadow_base_unaligned);
1831 
1832 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1833 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1834 			ce_free_desc_ring(scn, CE_state->id,
1835 					  CE_state->status_ring,
1836 					  desc_size);
1837 		qdf_mem_free(CE_state->status_ring);
1838 	}
1839 
1840 	free_mem_ce_debug_history(scn, CE_id);
1841 	reset_ce_debug_history(scn);
1842 	ce_deinit_ce_desc_event_log(scn, CE_id);
1843 
1844 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1845 	qdf_mem_free(CE_state);
1846 }
1847 
1848 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1849 {
1850 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1851 
1852 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1853 		  sizeof(hif_state->msg_callbacks_pending));
1854 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1855 		  sizeof(hif_state->msg_callbacks_current));
1856 }
1857 
1858 /* Send the first nbytes bytes of the buffer */
1859 QDF_STATUS
1860 hif_send_head(struct hif_opaque_softc *hif_ctx,
1861 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1862 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1863 {
1864 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1865 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1866 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1867 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1868 	int bytes = nbytes, nfrags = 0;
1869 	struct ce_sendlist sendlist;
1870 	int status, i = 0;
1871 	unsigned int mux_id = 0;
1872 
1873 	if (nbytes > qdf_nbuf_len(nbuf)) {
1874 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
1875 			  (uint32_t)qdf_nbuf_len(nbuf));
1876 		QDF_ASSERT(0);
1877 	}
1878 
1879 	transfer_id =
1880 		(mux_id & MUX_ID_MASK) |
1881 		(transfer_id & TRANSACTION_ID_MASK);
1882 	data_attr &= DESC_DATA_FLAG_MASK;
1883 	/*
1884 	 * The common case involves sending multiple fragments within a
1885 	 * single download (the tx descriptor and the tx frame header).
1886 	 * So, optimize for the case of multiple fragments by not even
1887 	 * checking whether it's necessary to use a sendlist.
1888 	 * The overhead of using a sendlist for a single buffer download
1889 	 * is not a big deal, since it happens rarely (for WMI messages).
1890 	 */
1891 	ce_sendlist_init(&sendlist);
1892 	do {
1893 		qdf_dma_addr_t frag_paddr;
1894 		int frag_bytes;
1895 
1896 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1897 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1898 		/*
1899 		 * Clear the packet offset for all but the first CE desc.
1900 		 */
1901 		if (i++ > 0)
1902 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1903 
1904 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1905 				    frag_bytes >
1906 				    bytes ? bytes : frag_bytes,
1907 				    qdf_nbuf_get_frag_is_wordstream
1908 				    (nbuf,
1909 				    nfrags) ? 0 :
1910 				    CE_SEND_FLAG_SWAP_DISABLE,
1911 				    data_attr);
1912 		if (status != QDF_STATUS_SUCCESS) {
1913 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1914 				__func__, nfrags);
1915 			return status;
1916 		}
1917 		bytes -= frag_bytes;
1918 		nfrags++;
1919 	} while (bytes > 0);
1920 
1921 	/* Make sure we have resources to handle this request */
1922 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1923 	if (pipe_info->num_sends_allowed < nfrags) {
1924 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1925 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1926 		return QDF_STATUS_E_RESOURCES;
1927 	}
1928 	pipe_info->num_sends_allowed -= nfrags;
1929 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1930 
1931 	if (qdf_unlikely(ce_hdl == NULL)) {
1932 		HIF_ERROR("%s: error CE handle is null", __func__);
1933 		return A_ERROR;
1934 	}
1935 
1936 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
1937 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
1938 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1939 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
1940 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
1941 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1942 
1943 	return status;
1944 }
1945 
1946 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1947 								int force)
1948 {
1949 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1950 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1951 
1952 	if (!force) {
1953 		int resources;
1954 		/*
1955 		 * Decide whether to actually poll for completions, or just
1956 		 * wait for a later chance. If there seem to be plenty of
1957 		 * resources left, then just wait, since checking involves
1958 		 * reading a CE register, which is a relatively expensive
1959 		 * operation.
1960 		 */
1961 		resources = hif_get_free_queue_number(hif_ctx, pipe);
1962 		/*
1963 		 * If at least 50% of the total resources are still available,
1964 		 * don't bother checking again yet.
1965 		 */
1966 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1967 									 1))
1968 			return;
1969 	}
1970 #if ATH_11AC_TXCOMPACT
1971 	ce_per_engine_servicereap(scn, pipe);
1972 #else
1973 	ce_per_engine_service(scn, pipe);
1974 #endif
1975 }
1976 
1977 uint16_t
1978 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
1979 {
1980 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1981 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1982 	uint16_t rv;
1983 
1984 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1985 	rv = pipe_info->num_sends_allowed;
1986 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1987 	return rv;
1988 }
1989 
1990 /* Called by lower (CE) layer when a send to Target completes. */
1991 static void
1992 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
1993 		     void *transfer_context, qdf_dma_addr_t CE_data,
1994 		     unsigned int nbytes, unsigned int transfer_id,
1995 		     unsigned int sw_index, unsigned int hw_index,
1996 		     unsigned int toeplitz_hash_result)
1997 {
1998 	struct HIF_CE_pipe_info *pipe_info =
1999 		(struct HIF_CE_pipe_info *)ce_context;
2000 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2001 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2002 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2003 	struct hif_msg_callbacks *msg_callbacks =
2004 		&pipe_info->pipe_callbacks;
2005 
2006 	do {
2007 		/*
2008 		 * The upper layer callback will be triggered
2009 		 * when last fragment is complteted.
2010 		 */
2011 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
2012 			if (scn->target_status == TARGET_STATUS_RESET) {
2013 
2014 				qdf_nbuf_unmap_single(scn->qdf_dev,
2015 						      transfer_context,
2016 						      QDF_DMA_TO_DEVICE);
2017 				qdf_nbuf_free(transfer_context);
2018 			} else
2019 				msg_callbacks->txCompletionHandler(
2020 					msg_callbacks->Context,
2021 					transfer_context, transfer_id,
2022 					toeplitz_hash_result);
2023 		}
2024 
2025 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2026 		pipe_info->num_sends_allowed++;
2027 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2028 	} while (ce_completed_send_next(copyeng,
2029 			&ce_context, &transfer_context,
2030 			&CE_data, &nbytes, &transfer_id,
2031 			&sw_idx, &hw_idx,
2032 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2033 }
2034 
2035 /**
2036  * hif_ce_do_recv(): send message from copy engine to upper layers
2037  * @msg_callbacks: structure containing callback and callback context
2038  * @netbuff: skb containing message
2039  * @nbytes: number of bytes in the message
2040  * @pipe_info: used for the pipe_number info
2041  *
2042  * Checks the packet length, configures the length in the netbuff,
2043  * and calls the upper layer callback.
2044  *
2045  * return: None
2046  */
2047 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2048 		qdf_nbuf_t netbuf, int nbytes,
2049 		struct HIF_CE_pipe_info *pipe_info) {
2050 	if (nbytes <= pipe_info->buf_sz) {
2051 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2052 		msg_callbacks->
2053 			rxCompletionHandler(msg_callbacks->Context,
2054 					netbuf, pipe_info->pipe_num);
2055 	} else {
2056 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
2057 				__func__, netbuf, nbytes);
2058 
2059 		qdf_nbuf_free(netbuf);
2060 	}
2061 }
2062 
2063 /* Called by lower (CE) layer when data is received from the Target. */
2064 static void
2065 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2066 		     void *transfer_context, qdf_dma_addr_t CE_data,
2067 		     unsigned int nbytes, unsigned int transfer_id,
2068 		     unsigned int flags)
2069 {
2070 	struct HIF_CE_pipe_info *pipe_info =
2071 		(struct HIF_CE_pipe_info *)ce_context;
2072 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2073 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2074 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2075 #ifdef HIF_PCI
2076 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
2077 #endif
2078 	struct hif_msg_callbacks *msg_callbacks =
2079 		 &pipe_info->pipe_callbacks;
2080 
2081 	do {
2082 #ifdef HIF_PCI
2083 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2084 #endif
2085 		qdf_nbuf_unmap_single(scn->qdf_dev,
2086 				      (qdf_nbuf_t) transfer_context,
2087 				      QDF_DMA_FROM_DEVICE);
2088 
2089 		atomic_inc(&pipe_info->recv_bufs_needed);
2090 		hif_post_recv_buffers_for_pipe(pipe_info);
2091 		if (scn->target_status == TARGET_STATUS_RESET)
2092 			qdf_nbuf_free(transfer_context);
2093 		else
2094 			hif_ce_do_recv(msg_callbacks, transfer_context,
2095 				nbytes, pipe_info);
2096 
2097 		/* Set up force_break flag if num of receices reaches
2098 		 * MAX_NUM_OF_RECEIVES
2099 		 */
2100 		ce_state->receive_count++;
2101 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2102 			ce_state->force_break = 1;
2103 			break;
2104 		}
2105 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2106 					&CE_data, &nbytes, &transfer_id,
2107 					&flags) == QDF_STATUS_SUCCESS);
2108 
2109 }
2110 
2111 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2112 
2113 void
2114 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2115 	      struct hif_msg_callbacks *callbacks)
2116 {
2117 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2118 
2119 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2120 	spin_lock_init(&pcie_access_log_lock);
2121 #endif
2122 	/* Save callbacks for later installation */
2123 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2124 		 sizeof(hif_state->msg_callbacks_pending));
2125 
2126 }
2127 
2128 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2129 {
2130 	struct CE_handle *ce_diag = hif_state->ce_diag;
2131 	int pipe_num;
2132 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2133 	struct hif_msg_callbacks *hif_msg_callbacks =
2134 		&hif_state->msg_callbacks_current;
2135 
2136 	/* daemonize("hif_compl_thread"); */
2137 
2138 	if (scn->ce_count == 0) {
2139 		HIF_ERROR("%s: Invalid ce_count", __func__);
2140 		return -EINVAL;
2141 	}
2142 
2143 	if (!hif_msg_callbacks ||
2144 			!hif_msg_callbacks->rxCompletionHandler ||
2145 			!hif_msg_callbacks->txCompletionHandler) {
2146 		HIF_ERROR("%s: no completion handler registered", __func__);
2147 		return -EFAULT;
2148 	}
2149 
2150 	A_TARGET_ACCESS_LIKELY(scn);
2151 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2152 		struct CE_attr attr;
2153 		struct HIF_CE_pipe_info *pipe_info;
2154 
2155 		pipe_info = &hif_state->pipe_info[pipe_num];
2156 		if (pipe_info->ce_hdl == ce_diag)
2157 			continue;       /* Handle Diagnostic CE specially */
2158 		attr = hif_state->host_ce_config[pipe_num];
2159 		if (attr.src_nentries) {
2160 			/* pipe used to send to target */
2161 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2162 					 __func__, pipe_num, pipe_info);
2163 			ce_send_cb_register(pipe_info->ce_hdl,
2164 					    hif_pci_ce_send_done, pipe_info,
2165 					    attr.flags & CE_ATTR_DISABLE_INTR);
2166 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2167 		}
2168 		if (attr.dest_nentries) {
2169 			/* pipe used to receive from target */
2170 			ce_recv_cb_register(pipe_info->ce_hdl,
2171 					    hif_pci_ce_recv_data, pipe_info,
2172 					    attr.flags & CE_ATTR_DISABLE_INTR);
2173 		}
2174 
2175 		if (attr.src_nentries)
2176 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2177 
2178 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2179 					sizeof(pipe_info->pipe_callbacks));
2180 	}
2181 
2182 	A_TARGET_ACCESS_UNLIKELY(scn);
2183 	return 0;
2184 }
2185 
2186 /*
2187  * Install pending msg callbacks.
2188  *
2189  * TBDXXX: This hack is needed because upper layers install msg callbacks
2190  * for use with HTC before BMI is done; yet this HIF implementation
2191  * needs to continue to use BMI msg callbacks. Really, upper layers
2192  * should not register HTC callbacks until AFTER BMI phase.
2193  */
2194 static void hif_msg_callbacks_install(struct hif_softc *scn)
2195 {
2196 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2197 
2198 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2199 		 &hif_state->msg_callbacks_pending,
2200 		 sizeof(hif_state->msg_callbacks_pending));
2201 }
2202 
2203 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2204 							uint8_t *DLPipe)
2205 {
2206 	int ul_is_polled, dl_is_polled;
2207 
2208 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2209 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2210 }
2211 
2212 /**
2213  * hif_dump_pipe_debug_count() - Log error count
2214  * @scn: hif_softc pointer.
2215  *
2216  * Output the pipe error counts of each pipe to log file
2217  *
2218  * Return: N/A
2219  */
2220 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2221 {
2222 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2223 	int pipe_num;
2224 
2225 	if (hif_state == NULL) {
2226 		HIF_ERROR("%s hif_state is NULL", __func__);
2227 		return;
2228 	}
2229 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2230 		struct HIF_CE_pipe_info *pipe_info;
2231 
2232 	pipe_info = &hif_state->pipe_info[pipe_num];
2233 
2234 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2235 			pipe_info->nbuf_dma_err_count > 0 ||
2236 			pipe_info->nbuf_ce_enqueue_err_count)
2237 		HIF_ERROR(
2238 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2239 			__func__, pipe_info->pipe_num,
2240 			atomic_read(&pipe_info->recv_bufs_needed),
2241 			pipe_info->nbuf_alloc_err_count,
2242 			pipe_info->nbuf_dma_err_count,
2243 			pipe_info->nbuf_ce_enqueue_err_count);
2244 	}
2245 }
2246 
2247 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2248 					  void *nbuf, uint32_t *error_cnt,
2249 					  enum hif_ce_event_type failure_type,
2250 					  const char *failure_type_string)
2251 {
2252 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2253 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2254 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2255 	int ce_id = CE_state->id;
2256 	uint32_t error_cnt_tmp;
2257 
2258 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2259 	error_cnt_tmp = ++(*error_cnt);
2260 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2261 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2262 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2263 		  failure_type_string);
2264 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2265 				 NULL, nbuf, bufs_needed_tmp, 0);
2266 	/* if we fail to allocate the last buffer for an rx pipe,
2267 	 *	there is no trigger to refill the ce and we will
2268 	 *	eventually crash
2269 	 */
2270 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2271 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2272 
2273 }
2274 
2275 
2276 
2277 
2278 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2279 {
2280 	struct CE_handle *ce_hdl;
2281 	qdf_size_t buf_sz;
2282 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2283 	QDF_STATUS status;
2284 	uint32_t bufs_posted = 0;
2285 
2286 	buf_sz = pipe_info->buf_sz;
2287 	if (buf_sz == 0) {
2288 		/* Unused Copy Engine */
2289 		return QDF_STATUS_SUCCESS;
2290 	}
2291 
2292 	ce_hdl = pipe_info->ce_hdl;
2293 
2294 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2295 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2296 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2297 		qdf_nbuf_t nbuf;
2298 
2299 		atomic_dec(&pipe_info->recv_bufs_needed);
2300 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2301 
2302 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2303 		if (!nbuf) {
2304 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2305 					&pipe_info->nbuf_alloc_err_count,
2306 					 HIF_RX_NBUF_ALLOC_FAILURE,
2307 					"HIF_RX_NBUF_ALLOC_FAILURE");
2308 			return QDF_STATUS_E_NOMEM;
2309 		}
2310 
2311 		/*
2312 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2313 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2314 		 * DMA_FROM_DEVICE);
2315 		 */
2316 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2317 					    QDF_DMA_FROM_DEVICE);
2318 
2319 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2320 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2321 					&pipe_info->nbuf_dma_err_count,
2322 					 HIF_RX_NBUF_MAP_FAILURE,
2323 					"HIF_RX_NBUF_MAP_FAILURE");
2324 			qdf_nbuf_free(nbuf);
2325 			return status;
2326 		}
2327 
2328 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2329 
2330 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2331 					       buf_sz, DMA_FROM_DEVICE);
2332 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2333 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2334 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2335 					&pipe_info->nbuf_ce_enqueue_err_count,
2336 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2337 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2338 
2339 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2340 						QDF_DMA_FROM_DEVICE);
2341 			qdf_nbuf_free(nbuf);
2342 			return status;
2343 		}
2344 
2345 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2346 		bufs_posted++;
2347 	}
2348 	pipe_info->nbuf_alloc_err_count =
2349 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2350 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2351 	pipe_info->nbuf_dma_err_count =
2352 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2353 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2354 	pipe_info->nbuf_ce_enqueue_err_count =
2355 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2356 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2357 
2358 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2359 
2360 	return QDF_STATUS_SUCCESS;
2361 }
2362 
2363 /*
2364  * Try to post all desired receive buffers for all pipes.
2365  * Returns 0 for non fastpath rx copy engine as
2366  * oom_allocation_work will be scheduled to recover any
2367  * failures, non-zero if unable to completely replenish
2368  * receive buffers for fastpath rx Copy engine.
2369  */
2370 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2371 {
2372 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2373 	int pipe_num;
2374 	struct CE_state *ce_state = NULL;
2375 	QDF_STATUS qdf_status;
2376 
2377 	A_TARGET_ACCESS_LIKELY(scn);
2378 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2379 		struct HIF_CE_pipe_info *pipe_info;
2380 
2381 		ce_state = scn->ce_id_to_state[pipe_num];
2382 		pipe_info = &hif_state->pipe_info[pipe_num];
2383 
2384 		if (hif_is_nss_wifi_enabled(scn) &&
2385 		    ce_state && (ce_state->htt_rx_data))
2386 			continue;
2387 
2388 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2389 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2390 			ce_state->htt_rx_data &&
2391 			scn->fastpath_mode_on) {
2392 			A_TARGET_ACCESS_UNLIKELY(scn);
2393 			return qdf_status;
2394 		}
2395 	}
2396 
2397 	A_TARGET_ACCESS_UNLIKELY(scn);
2398 
2399 	return QDF_STATUS_SUCCESS;
2400 }
2401 
2402 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2403 {
2404 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2405 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2406 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2407 
2408 	hif_update_fastpath_recv_bufs_cnt(scn);
2409 
2410 	hif_msg_callbacks_install(scn);
2411 
2412 	if (hif_completion_thread_startup(hif_state))
2413 		return QDF_STATUS_E_FAILURE;
2414 
2415 	/* enable buffer cleanup */
2416 	hif_state->started = true;
2417 
2418 	/* Post buffers once to start things off. */
2419 	qdf_status = hif_post_recv_buffers(scn);
2420 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2421 		/* cleanup is done in hif_ce_disable */
2422 		HIF_ERROR("%s:failed to post buffers", __func__);
2423 		return qdf_status;
2424 	}
2425 
2426 	return qdf_status;
2427 }
2428 
2429 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2430 {
2431 	struct hif_softc *scn;
2432 	struct CE_handle *ce_hdl;
2433 	uint32_t buf_sz;
2434 	struct HIF_CE_state *hif_state;
2435 	qdf_nbuf_t netbuf;
2436 	qdf_dma_addr_t CE_data;
2437 	void *per_CE_context;
2438 
2439 	buf_sz = pipe_info->buf_sz;
2440 	/* Unused Copy Engine */
2441 	if (buf_sz == 0)
2442 		return;
2443 
2444 
2445 	hif_state = pipe_info->HIF_CE_state;
2446 	if (!hif_state->started)
2447 		return;
2448 
2449 	scn = HIF_GET_SOFTC(hif_state);
2450 	ce_hdl = pipe_info->ce_hdl;
2451 
2452 	if (scn->qdf_dev == NULL)
2453 		return;
2454 	while (ce_revoke_recv_next
2455 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2456 			&CE_data) == QDF_STATUS_SUCCESS) {
2457 		if (netbuf) {
2458 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2459 					      QDF_DMA_FROM_DEVICE);
2460 			qdf_nbuf_free(netbuf);
2461 		}
2462 	}
2463 }
2464 
2465 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2466 {
2467 	struct CE_handle *ce_hdl;
2468 	struct HIF_CE_state *hif_state;
2469 	struct hif_softc *scn;
2470 	qdf_nbuf_t netbuf;
2471 	void *per_CE_context;
2472 	qdf_dma_addr_t CE_data;
2473 	unsigned int nbytes;
2474 	unsigned int id;
2475 	uint32_t buf_sz;
2476 	uint32_t toeplitz_hash_result;
2477 
2478 	buf_sz = pipe_info->buf_sz;
2479 	if (buf_sz == 0) {
2480 		/* Unused Copy Engine */
2481 		return;
2482 	}
2483 
2484 	hif_state = pipe_info->HIF_CE_state;
2485 	if (!hif_state->started) {
2486 		return;
2487 	}
2488 
2489 	scn = HIF_GET_SOFTC(hif_state);
2490 
2491 	ce_hdl = pipe_info->ce_hdl;
2492 
2493 	while (ce_cancel_send_next
2494 		       (ce_hdl, &per_CE_context,
2495 		       (void **)&netbuf, &CE_data, &nbytes,
2496 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2497 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2498 			/*
2499 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2500 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2501 			 * freed in htt_htc_misc_pkt_pool_free() in
2502 			 * wlantl_close(), so do not free them here again
2503 			 * by checking whether it's the endpoint
2504 			 * which they are queued in.
2505 			 */
2506 			if (id == scn->htc_htt_tx_endpoint)
2507 				return;
2508 			/* Indicate the completion to higher
2509 			 * layer to free the buffer
2510 			 */
2511 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2512 				pipe_info->pipe_callbacks.
2513 				    txCompletionHandler(pipe_info->
2514 					    pipe_callbacks.Context,
2515 					    netbuf, id, toeplitz_hash_result);
2516 		}
2517 	}
2518 }
2519 
2520 /*
2521  * Cleanup residual buffers for device shutdown:
2522  *    buffers that were enqueued for receive
2523  *    buffers that were to be sent
2524  * Note: Buffers that had completed but which were
2525  * not yet processed are on a completion queue. They
2526  * are handled when the completion thread shuts down.
2527  */
2528 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2529 {
2530 	int pipe_num;
2531 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2532 	struct CE_state *ce_state;
2533 
2534 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2535 		struct HIF_CE_pipe_info *pipe_info;
2536 
2537 		ce_state = scn->ce_id_to_state[pipe_num];
2538 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2539 				((ce_state->htt_tx_data) ||
2540 				 (ce_state->htt_rx_data))) {
2541 			continue;
2542 		}
2543 
2544 		pipe_info = &hif_state->pipe_info[pipe_num];
2545 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2546 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2547 	}
2548 }
2549 
2550 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2551 {
2552 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2553 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2554 
2555 	hif_buffer_cleanup(hif_state);
2556 }
2557 
2558 static void hif_destroy_oom_work(struct hif_softc *scn)
2559 {
2560 	struct CE_state *ce_state;
2561 	int ce_id;
2562 
2563 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2564 		ce_state = scn->ce_id_to_state[ce_id];
2565 		if (ce_state)
2566 			qdf_destroy_work(scn->qdf_dev,
2567 					 &ce_state->oom_allocation_work);
2568 	}
2569 }
2570 
2571 void hif_ce_stop(struct hif_softc *scn)
2572 {
2573 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2574 	int pipe_num;
2575 
2576 	/*
2577 	 * before cleaning up any memory, ensure irq &
2578 	 * bottom half contexts will not be re-entered
2579 	 */
2580 	hif_disable_isr(&scn->osc);
2581 	hif_destroy_oom_work(scn);
2582 	scn->hif_init_done = false;
2583 
2584 	/*
2585 	 * At this point, asynchronous threads are stopped,
2586 	 * The Target should not DMA nor interrupt, Host code may
2587 	 * not initiate anything more.  So we just need to clean
2588 	 * up Host-side state.
2589 	 */
2590 
2591 	if (scn->athdiag_procfs_inited) {
2592 		athdiag_procfs_remove();
2593 		scn->athdiag_procfs_inited = false;
2594 	}
2595 
2596 	hif_buffer_cleanup(hif_state);
2597 
2598 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2599 		struct HIF_CE_pipe_info *pipe_info;
2600 		struct CE_attr attr;
2601 		struct CE_handle *ce_diag = hif_state->ce_diag;
2602 
2603 		pipe_info = &hif_state->pipe_info[pipe_num];
2604 		if (pipe_info->ce_hdl) {
2605 			if (pipe_info->ce_hdl != ce_diag) {
2606 				attr = hif_state->host_ce_config[pipe_num];
2607 				if (attr.src_nentries)
2608 					qdf_spinlock_destroy(&pipe_info->
2609 							completion_freeq_lock);
2610 			}
2611 			ce_fini(pipe_info->ce_hdl);
2612 			pipe_info->ce_hdl = NULL;
2613 			pipe_info->buf_sz = 0;
2614 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2615 		}
2616 	}
2617 
2618 	if (hif_state->sleep_timer_init) {
2619 		qdf_timer_stop(&hif_state->sleep_timer);
2620 		qdf_timer_free(&hif_state->sleep_timer);
2621 		hif_state->sleep_timer_init = false;
2622 	}
2623 
2624 	hif_state->started = false;
2625 }
2626 
2627 #ifdef QCN7605_SUPPORT
2628 static inline
2629 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2630 				    **target_shadow_reg_cfg_ret,
2631 				    uint32_t *shadow_cfg_sz_ret)
2632 {
2633 	if (target_shadow_reg_cfg_ret)
2634 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605;
2635 	if (shadow_cfg_sz_ret)
2636 		*shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605);
2637 }
2638 #else
2639 static inline
2640 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2641 				    **target_shadow_reg_cfg_ret,
2642 				    uint32_t *shadow_cfg_sz_ret)
2643 {
2644 	HIF_ERROR("QCN7605 not supported");
2645 }
2646 #endif
2647 
2648 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2649 				   struct shadow_reg_cfg
2650 				   **target_shadow_reg_cfg_ret,
2651 				   uint32_t *shadow_cfg_sz_ret)
2652 {
2653 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2654 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2655 
2656 	switch (tgt_info->target_type) {
2657 	case TARGET_TYPE_QCN7605:
2658 		hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret,
2659 					       shadow_cfg_sz_ret);
2660 		break;
2661 	default:
2662 		if (target_shadow_reg_cfg_ret)
2663 			*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2664 		if (shadow_cfg_sz_ret)
2665 			*shadow_cfg_sz_ret = shadow_cfg_sz;
2666 	}
2667 }
2668 
2669 /**
2670  * hif_get_target_ce_config() - get copy engine configuration
2671  * @target_ce_config_ret: basic copy engine configuration
2672  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2673  * @target_service_to_ce_map_ret: service mapping for the copy engines
2674  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2675  * @target_shadow_reg_cfg_ret: shadow register configuration
2676  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2677  *
2678  * providing accessor to these values outside of this file.
2679  * currently these are stored in static pointers to const sections.
2680  * there are multiple configurations that are selected from at compile time.
2681  * Runtime selection would need to consider mode, target type and bus type.
2682  *
2683  * Return: return by parameter.
2684  */
2685 void hif_get_target_ce_config(struct hif_softc *scn,
2686 		struct CE_pipe_config **target_ce_config_ret,
2687 		uint32_t *target_ce_config_sz_ret,
2688 		struct service_to_pipe **target_service_to_ce_map_ret,
2689 		uint32_t *target_service_to_ce_map_sz_ret,
2690 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2691 		uint32_t *shadow_cfg_sz_ret)
2692 {
2693 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2694 
2695 	*target_ce_config_ret = hif_state->target_ce_config;
2696 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2697 
2698 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2699 				       target_service_to_ce_map_sz_ret);
2700 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2701 			       shadow_cfg_sz_ret);
2702 }
2703 
2704 #ifdef CONFIG_SHADOW_V2
2705 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2706 {
2707 	int i;
2708 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2709 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2710 
2711 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2712 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2713 		     "%s: i %d, val %x", __func__, i,
2714 		     cfg->shadow_reg_v2_cfg[i].addr);
2715 	}
2716 }
2717 
2718 #else
2719 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2720 {
2721 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2722 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2723 }
2724 #endif
2725 
2726 /**
2727  * hif_wlan_enable(): call the platform driver to enable wlan
2728  * @scn: HIF Context
2729  *
2730  * This function passes the con_mode and CE configuration to
2731  * platform driver to enable wlan.
2732  *
2733  * Return: linux error code
2734  */
2735 int hif_wlan_enable(struct hif_softc *scn)
2736 {
2737 	struct pld_wlan_enable_cfg cfg;
2738 	enum pld_driver_mode mode;
2739 	uint32_t con_mode = hif_get_conparam(scn);
2740 
2741 	hif_get_target_ce_config(scn,
2742 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2743 			&cfg.num_ce_tgt_cfg,
2744 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2745 			&cfg.num_ce_svc_pipe_cfg,
2746 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2747 			&cfg.num_shadow_reg_cfg);
2748 
2749 	/* translate from structure size to array size */
2750 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2751 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2752 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2753 
2754 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2755 			      &cfg.num_shadow_reg_v2_cfg);
2756 
2757 	hif_print_hal_shadow_register_cfg(&cfg);
2758 
2759 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2760 		mode = PLD_FTM;
2761 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2762 		mode = PLD_COLDBOOT_CALIBRATION;
2763 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2764 		mode = PLD_EPPING;
2765 	else
2766 		mode = PLD_MISSION;
2767 
2768 	if (BYPASS_QMI)
2769 		return 0;
2770 	else
2771 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2772 				       mode, QWLAN_VERSIONSTR);
2773 }
2774 
2775 #ifdef WLAN_FEATURE_EPPING
2776 
2777 #define CE_EPPING_USES_IRQ true
2778 
2779 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2780 {
2781 	if (CE_EPPING_USES_IRQ)
2782 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2783 	else
2784 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2785 	hif_state->target_ce_config = target_ce_config_wlan_epping;
2786 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2787 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2788 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2789 }
2790 #endif
2791 
2792 #ifdef QCN7605_SUPPORT
2793 static inline
2794 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2795 			       struct HIF_CE_state *hif_state)
2796 {
2797 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
2798 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
2799 	hif_state->target_ce_config_sz =
2800 				 sizeof(target_ce_config_wlan_qcn7605);
2801 	scn->ce_count = QCN7605_CE_COUNT;
2802 }
2803 #else
2804 static inline
2805 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2806 			       struct HIF_CE_state *hif_state)
2807 {
2808 	HIF_ERROR("QCN7605 not supported");
2809 }
2810 #endif
2811 
2812 #ifdef CE_SVC_CMN_INIT
2813 #ifdef QCA_WIFI_SUPPORT_SRNG
2814 static inline void hif_ce_service_init(void)
2815 {
2816 	ce_service_srng_init();
2817 }
2818 #else
2819 static inline void hif_ce_service_init(void)
2820 {
2821 	ce_service_legacy_init();
2822 }
2823 #endif
2824 #else
2825 static inline void hif_ce_service_init(void)
2826 {
2827 }
2828 #endif
2829 
2830 
2831 /**
2832  * hif_ce_prepare_config() - load the correct static tables.
2833  * @scn: hif context
2834  *
2835  * Epping uses different static attribute tables than mission mode.
2836  */
2837 void hif_ce_prepare_config(struct hif_softc *scn)
2838 {
2839 	uint32_t mode = hif_get_conparam(scn);
2840 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2841 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2842 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2843 
2844 	hif_ce_service_init();
2845 	hif_state->ce_services = ce_services_attach(scn);
2846 
2847 	scn->ce_count = HOST_CE_COUNT;
2848 	/* if epping is enabled we need to use the epping configuration. */
2849 	if (QDF_IS_EPPING_ENABLED(mode)) {
2850 		hif_ce_prepare_epping_config(hif_state);
2851 	}
2852 
2853 	switch (tgt_info->target_type) {
2854 	default:
2855 		hif_state->host_ce_config = host_ce_config_wlan;
2856 		hif_state->target_ce_config = target_ce_config_wlan;
2857 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
2858 		break;
2859 	case TARGET_TYPE_QCN7605:
2860 		hif_set_ce_config_qcn7605(scn, hif_state);
2861 		break;
2862 	case TARGET_TYPE_AR900B:
2863 	case TARGET_TYPE_QCA9984:
2864 	case TARGET_TYPE_IPQ4019:
2865 	case TARGET_TYPE_QCA9888:
2866 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2867 			hif_state->host_ce_config =
2868 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2869 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2870 			hif_state->host_ce_config =
2871 				host_lowdesc_ce_cfg_wlan_ar900b;
2872 		} else {
2873 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2874 		}
2875 
2876 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2877 		hif_state->target_ce_config_sz =
2878 				sizeof(target_ce_config_wlan_ar900b);
2879 
2880 		break;
2881 
2882 	case TARGET_TYPE_AR9888:
2883 	case TARGET_TYPE_AR9888V2:
2884 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2885 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2886 		} else {
2887 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2888 		}
2889 
2890 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2891 		hif_state->target_ce_config_sz =
2892 					sizeof(target_ce_config_wlan_ar9888);
2893 
2894 		break;
2895 
2896 	case TARGET_TYPE_QCA8074:
2897 	case TARGET_TYPE_QCA8074V2:
2898 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2899 			hif_state->host_ce_config =
2900 					host_ce_config_wlan_qca8074_pci;
2901 			hif_state->target_ce_config =
2902 				target_ce_config_wlan_qca8074_pci;
2903 			hif_state->target_ce_config_sz =
2904 				sizeof(target_ce_config_wlan_qca8074_pci);
2905 		} else {
2906 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2907 			hif_state->target_ce_config =
2908 					target_ce_config_wlan_qca8074;
2909 			hif_state->target_ce_config_sz =
2910 				sizeof(target_ce_config_wlan_qca8074);
2911 		}
2912 		break;
2913 	case TARGET_TYPE_QCA6290:
2914 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2915 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2916 		hif_state->target_ce_config_sz =
2917 					sizeof(target_ce_config_wlan_qca6290);
2918 
2919 		scn->ce_count = QCA_6290_CE_COUNT;
2920 		break;
2921 	case TARGET_TYPE_QCA6390:
2922 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
2923 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
2924 		hif_state->target_ce_config_sz =
2925 					sizeof(target_ce_config_wlan_qca6390);
2926 
2927 		scn->ce_count = QCA_6390_CE_COUNT;
2928 		break;
2929 	}
2930 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
2931 }
2932 
2933 /**
2934  * hif_ce_open() - do ce specific allocations
2935  * @hif_sc: pointer to hif context
2936  *
2937  * return: 0 for success or QDF_STATUS_E_NOMEM
2938  */
2939 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2940 {
2941 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2942 
2943 	qdf_spinlock_create(&hif_state->irq_reg_lock);
2944 	qdf_spinlock_create(&hif_state->keep_awake_lock);
2945 	return QDF_STATUS_SUCCESS;
2946 }
2947 
2948 /**
2949  * hif_ce_close() - do ce specific free
2950  * @hif_sc: pointer to hif context
2951  */
2952 void hif_ce_close(struct hif_softc *hif_sc)
2953 {
2954 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2955 
2956 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
2957 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
2958 }
2959 
2960 /**
2961  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2962  * @hif_sc: hif context
2963  *
2964  * uses state variables to support cleaning up when hif_config_ce fails.
2965  */
2966 void hif_unconfig_ce(struct hif_softc *hif_sc)
2967 {
2968 	int pipe_num;
2969 	struct HIF_CE_pipe_info *pipe_info;
2970 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2971 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
2972 
2973 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2974 		pipe_info = &hif_state->pipe_info[pipe_num];
2975 		if (pipe_info->ce_hdl) {
2976 			ce_unregister_irq(hif_state, (1 << pipe_num));
2977 		}
2978 	}
2979 	deinit_tasklet_workers(hif_hdl);
2980 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2981 		pipe_info = &hif_state->pipe_info[pipe_num];
2982 		if (pipe_info->ce_hdl) {
2983 			ce_fini(pipe_info->ce_hdl);
2984 			pipe_info->ce_hdl = NULL;
2985 			pipe_info->buf_sz = 0;
2986 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2987 		}
2988 	}
2989 	if (hif_sc->athdiag_procfs_inited) {
2990 		athdiag_procfs_remove();
2991 		hif_sc->athdiag_procfs_inited = false;
2992 	}
2993 }
2994 
2995 #ifdef CONFIG_BYPASS_QMI
2996 #define FW_SHARED_MEM (2 * 1024 * 1024)
2997 
2998 /**
2999  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3000  * @scn: pointer to HIF structure
3001  *
3002  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3003  *
3004  * Return: void
3005  */
3006 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3007 {
3008 	void *target_va;
3009 	phys_addr_t target_pa;
3010 
3011 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3012 				FW_SHARED_MEM, &target_pa);
3013 	if (NULL == target_va) {
3014 		HIF_TRACE("Memory allocation failed could not post target buf");
3015 		return;
3016 	}
3017 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3018 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
3019 }
3020 #else
3021 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3022 {
3023 }
3024 #endif
3025 
3026 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3027 				bool wait_for_it)
3028 {
3029 	/* todo */
3030 	return 0;
3031 }
3032 
3033 /**
3034  * hif_config_ce() - configure copy engines
3035  * @scn: hif context
3036  *
3037  * Prepares fw, copy engine hardware and host sw according
3038  * to the attributes selected by hif_ce_prepare_config.
3039  *
3040  * also calls athdiag_procfs_init
3041  *
3042  * return: 0 for success nonzero for failure.
3043  */
3044 int hif_config_ce(struct hif_softc *scn)
3045 {
3046 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3047 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3048 	struct HIF_CE_pipe_info *pipe_info;
3049 	int pipe_num;
3050 	struct CE_state *ce_state = NULL;
3051 
3052 #ifdef ADRASTEA_SHADOW_REGISTERS
3053 	int i;
3054 #endif
3055 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3056 
3057 	scn->notice_send = true;
3058 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3059 
3060 	hif_post_static_buf_to_target(scn);
3061 
3062 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3063 
3064 	hif_config_rri_on_ddr(scn);
3065 
3066 	if (ce_srng_based(scn))
3067 		scn->bus_ops.hif_target_sleep_state_adjust =
3068 			&hif_srng_sleep_state_adjust;
3069 
3070 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3071 	 * index. Disable data storing
3072 	 */
3073 	reset_ce_debug_history(scn);
3074 
3075 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3076 		struct CE_attr *attr;
3077 
3078 		pipe_info = &hif_state->pipe_info[pipe_num];
3079 		pipe_info->pipe_num = pipe_num;
3080 		pipe_info->HIF_CE_state = hif_state;
3081 		attr = &hif_state->host_ce_config[pipe_num];
3082 
3083 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3084 		ce_state = scn->ce_id_to_state[pipe_num];
3085 		if (!ce_state) {
3086 			A_TARGET_ACCESS_UNLIKELY(scn);
3087 			goto err;
3088 		}
3089 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3090 		QDF_ASSERT(pipe_info->ce_hdl != NULL);
3091 		if (pipe_info->ce_hdl == NULL) {
3092 			rv = QDF_STATUS_E_FAILURE;
3093 			A_TARGET_ACCESS_UNLIKELY(scn);
3094 			goto err;
3095 		}
3096 
3097 		ce_state->lro_data = qdf_lro_init();
3098 
3099 		if (attr->flags & CE_ATTR_DIAG) {
3100 			/* Reserve the ultimate CE for
3101 			 * Diagnostic Window support
3102 			 */
3103 			hif_state->ce_diag = pipe_info->ce_hdl;
3104 			continue;
3105 		}
3106 
3107 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3108 				(ce_state->htt_rx_data))
3109 			continue;
3110 
3111 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3112 		if (attr->dest_nentries > 0) {
3113 			atomic_set(&pipe_info->recv_bufs_needed,
3114 				   init_buffer_count(attr->dest_nentries - 1));
3115 			/*SRNG based CE has one entry less */
3116 			if (ce_srng_based(scn))
3117 				atomic_dec(&pipe_info->recv_bufs_needed);
3118 		} else {
3119 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3120 		}
3121 		ce_tasklet_init(hif_state, (1 << pipe_num));
3122 		ce_register_irq(hif_state, (1 << pipe_num));
3123 	}
3124 
3125 	if (athdiag_procfs_init(scn) != 0) {
3126 		A_TARGET_ACCESS_UNLIKELY(scn);
3127 		goto err;
3128 	}
3129 	scn->athdiag_procfs_inited = true;
3130 
3131 	HIF_DBG("%s: ce_init done", __func__);
3132 
3133 	init_tasklet_workers(hif_hdl);
3134 
3135 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3136 
3137 #ifdef ADRASTEA_SHADOW_REGISTERS
3138 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3139 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3140 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3141 			  __func__, i,
3142 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3143 	}
3144 #endif
3145 
3146 	return rv != QDF_STATUS_SUCCESS;
3147 
3148 err:
3149 	/* Failure, so clean up */
3150 	hif_unconfig_ce(scn);
3151 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3152 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3153 }
3154 
3155 #ifdef WLAN_FEATURE_FASTPATH
3156 /**
3157  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
3158  * @handler: Callback funtcion
3159  * @context: handle for callback function
3160  *
3161  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
3162  */
3163 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
3164 				fastpath_msg_handler handler,
3165 				void *context)
3166 {
3167 	struct CE_state *ce_state;
3168 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3169 	int i;
3170 
3171 	if (!scn) {
3172 		HIF_ERROR("%s: scn is NULL", __func__);
3173 		QDF_ASSERT(0);
3174 		return QDF_STATUS_E_FAILURE;
3175 	}
3176 
3177 	if (!scn->fastpath_mode_on) {
3178 		HIF_WARN("%s: Fastpath mode disabled", __func__);
3179 		return QDF_STATUS_E_FAILURE;
3180 	}
3181 
3182 	for (i = 0; i < scn->ce_count; i++) {
3183 		ce_state = scn->ce_id_to_state[i];
3184 		if (ce_state->htt_rx_data) {
3185 			ce_state->fastpath_handler = handler;
3186 			ce_state->context = context;
3187 		}
3188 	}
3189 
3190 	return QDF_STATUS_SUCCESS;
3191 }
3192 qdf_export_symbol(hif_ce_fastpath_cb_register);
3193 #endif
3194 
3195 #ifdef IPA_OFFLOAD
3196 /**
3197  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3198  * @scn: bus context
3199  * @ce_sr_base_paddr: copyengine source ring base physical address
3200  * @ce_sr_ring_size: copyengine source ring size
3201  * @ce_reg_paddr: copyengine register physical address
3202  *
3203  * IPA micro controller data path offload feature enabled,
3204  * HIF should release copy engine related resource information to IPA UC
3205  * IPA UC will access hardware resource with released information
3206  *
3207  * Return: None
3208  */
3209 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3210 			     qdf_shared_mem_t **ce_sr,
3211 			     uint32_t *ce_sr_ring_size,
3212 			     qdf_dma_addr_t *ce_reg_paddr)
3213 {
3214 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3215 	struct HIF_CE_pipe_info *pipe_info =
3216 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3217 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3218 
3219 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3220 			    ce_reg_paddr);
3221 }
3222 #endif /* IPA_OFFLOAD */
3223 
3224 
3225 #ifdef ADRASTEA_SHADOW_REGISTERS
3226 
3227 /*
3228  * Current shadow register config
3229  *
3230  * -----------------------------------------------------------
3231  * Shadow Register      |     CE   |    src/dst write index
3232  * -----------------------------------------------------------
3233  *         0            |     0    |           src
3234  *         1     No Config - Doesn't point to anything
3235  *         2     No Config - Doesn't point to anything
3236  *         3            |     3    |           src
3237  *         4            |     4    |           src
3238  *         5            |     5    |           src
3239  *         6     No Config - Doesn't point to anything
3240  *         7            |     7    |           src
3241  *         8     No Config - Doesn't point to anything
3242  *         9     No Config - Doesn't point to anything
3243  *         10    No Config - Doesn't point to anything
3244  *         11    No Config - Doesn't point to anything
3245  * -----------------------------------------------------------
3246  *         12    No Config - Doesn't point to anything
3247  *         13           |     1    |           dst
3248  *         14           |     2    |           dst
3249  *         15    No Config - Doesn't point to anything
3250  *         16    No Config - Doesn't point to anything
3251  *         17    No Config - Doesn't point to anything
3252  *         18    No Config - Doesn't point to anything
3253  *         19           |     7    |           dst
3254  *         20           |     8    |           dst
3255  *         21    No Config - Doesn't point to anything
3256  *         22    No Config - Doesn't point to anything
3257  *         23    No Config - Doesn't point to anything
3258  * -----------------------------------------------------------
3259  *
3260  *
3261  * ToDo - Move shadow register config to following in the future
3262  * This helps free up a block of shadow registers towards the end.
3263  * Can be used for other purposes
3264  *
3265  * -----------------------------------------------------------
3266  * Shadow Register      |     CE   |    src/dst write index
3267  * -----------------------------------------------------------
3268  *      0            |     0    |           src
3269  *      1            |     3    |           src
3270  *      2            |     4    |           src
3271  *      3            |     5    |           src
3272  *      4            |     7    |           src
3273  * -----------------------------------------------------------
3274  *      5            |     1    |           dst
3275  *      6            |     2    |           dst
3276  *      7            |     7    |           dst
3277  *      8            |     8    |           dst
3278  * -----------------------------------------------------------
3279  *      9     No Config - Doesn't point to anything
3280  *      12    No Config - Doesn't point to anything
3281  *      13    No Config - Doesn't point to anything
3282  *      14    No Config - Doesn't point to anything
3283  *      15    No Config - Doesn't point to anything
3284  *      16    No Config - Doesn't point to anything
3285  *      17    No Config - Doesn't point to anything
3286  *      18    No Config - Doesn't point to anything
3287  *      19    No Config - Doesn't point to anything
3288  *      20    No Config - Doesn't point to anything
3289  *      21    No Config - Doesn't point to anything
3290  *      22    No Config - Doesn't point to anything
3291  *      23    No Config - Doesn't point to anything
3292  * -----------------------------------------------------------
3293 */
3294 #ifndef QCN7605_SUPPORT
3295 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3296 {
3297 	u32 addr = 0;
3298 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3299 
3300 	switch (ce) {
3301 	case 0:
3302 		addr = SHADOW_VALUE0;
3303 		break;
3304 	case 3:
3305 		addr = SHADOW_VALUE3;
3306 		break;
3307 	case 4:
3308 		addr = SHADOW_VALUE4;
3309 		break;
3310 	case 5:
3311 		addr = SHADOW_VALUE5;
3312 		break;
3313 	case 7:
3314 		addr = SHADOW_VALUE7;
3315 		break;
3316 	default:
3317 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3318 		QDF_ASSERT(0);
3319 	}
3320 	return addr;
3321 
3322 }
3323 
3324 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3325 {
3326 	u32 addr = 0;
3327 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3328 
3329 	switch (ce) {
3330 	case 1:
3331 		addr = SHADOW_VALUE13;
3332 		break;
3333 	case 2:
3334 		addr = SHADOW_VALUE14;
3335 		break;
3336 	case 5:
3337 		addr = SHADOW_VALUE17;
3338 		break;
3339 	case 7:
3340 		addr = SHADOW_VALUE19;
3341 		break;
3342 	case 8:
3343 		addr = SHADOW_VALUE20;
3344 		break;
3345 	case 9:
3346 		addr = SHADOW_VALUE21;
3347 		break;
3348 	case 10:
3349 		addr = SHADOW_VALUE22;
3350 		break;
3351 	case 11:
3352 		addr = SHADOW_VALUE23;
3353 		break;
3354 	default:
3355 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3356 		QDF_ASSERT(0);
3357 	}
3358 
3359 	return addr;
3360 
3361 }
3362 #else
3363 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3364 {
3365 	u32 addr = 0;
3366 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3367 
3368 	switch (ce) {
3369 	case 0:
3370 		addr = SHADOW_VALUE0;
3371 		break;
3372 	case 4:
3373 		addr = SHADOW_VALUE4;
3374 		break;
3375 	case 5:
3376 		addr = SHADOW_VALUE5;
3377 		break;
3378 	default:
3379 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3380 		QDF_ASSERT(0);
3381 	}
3382 	return addr;
3383 }
3384 
3385 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3386 {
3387 	u32 addr = 0;
3388 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3389 
3390 	switch (ce) {
3391 	case 1:
3392 		addr = SHADOW_VALUE13;
3393 		break;
3394 	case 2:
3395 		addr = SHADOW_VALUE14;
3396 		break;
3397 	case 3:
3398 		addr = SHADOW_VALUE15;
3399 		break;
3400 	case 5:
3401 		addr = SHADOW_VALUE17;
3402 		break;
3403 	case 7:
3404 		addr = SHADOW_VALUE19;
3405 		break;
3406 	case 8:
3407 		addr = SHADOW_VALUE20;
3408 		break;
3409 	case 9:
3410 		addr = SHADOW_VALUE21;
3411 		break;
3412 	case 10:
3413 		addr = SHADOW_VALUE22;
3414 		break;
3415 	case 11:
3416 		addr = SHADOW_VALUE23;
3417 		break;
3418 	default:
3419 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3420 		QDF_ASSERT(0);
3421 	}
3422 
3423 	return addr;
3424 }
3425 #endif
3426 #endif
3427 
3428 #if defined(FEATURE_LRO)
3429 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3430 {
3431 	struct CE_state *ce_state;
3432 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3433 
3434 	ce_state = scn->ce_id_to_state[ctx_id];
3435 
3436 	return ce_state->lro_data;
3437 }
3438 #endif
3439 
3440 /**
3441  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3442  * this service
3443  * @scn: hif_softc pointer.
3444  * @svc_id: Service ID for which the mapping is needed.
3445  * @ul_pipe: address of the container in which ul pipe is returned.
3446  * @dl_pipe: address of the container in which dl pipe is returned.
3447  * @ul_is_polled: address of the container in which a bool
3448  *			indicating if the UL CE for this service
3449  *			is polled is returned.
3450  * @dl_is_polled: address of the container in which a bool
3451  *			indicating if the DL CE for this service
3452  *			is polled is returned.
3453  *
3454  * Return: Indicates whether the service has been found in the table.
3455  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3456  *         There will be warning logs if either leg has not been updated
3457  *         because it missed the entry in the table (but this is not an err).
3458  */
3459 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3460 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3461 			int *dl_is_polled)
3462 {
3463 	int status = QDF_STATUS_E_INVAL;
3464 	unsigned int i;
3465 	struct service_to_pipe element;
3466 	struct service_to_pipe *tgt_svc_map_to_use;
3467 	uint32_t sz_tgt_svc_map_to_use;
3468 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3469 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3470 	bool dl_updated = false;
3471 	bool ul_updated = false;
3472 
3473 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3474 				       &sz_tgt_svc_map_to_use);
3475 
3476 	*dl_is_polled = 0;  /* polling for received messages not supported */
3477 
3478 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3479 
3480 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3481 		if (element.service_id == svc_id) {
3482 			if (element.pipedir == PIPEDIR_OUT) {
3483 				*ul_pipe = element.pipenum;
3484 				*ul_is_polled =
3485 					(hif_state->host_ce_config[*ul_pipe].flags &
3486 					 CE_ATTR_DISABLE_INTR) != 0;
3487 				ul_updated = true;
3488 			} else if (element.pipedir == PIPEDIR_IN) {
3489 				*dl_pipe = element.pipenum;
3490 				dl_updated = true;
3491 			}
3492 			status = QDF_STATUS_SUCCESS;
3493 		}
3494 	}
3495 	if (ul_updated == false)
3496 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3497 	if (dl_updated == false)
3498 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3499 
3500 	return status;
3501 }
3502 
3503 #ifdef SHADOW_REG_DEBUG
3504 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3505 		uint32_t CE_ctrl_addr)
3506 {
3507 	uint32_t read_from_hw, srri_from_ddr = 0;
3508 
3509 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3510 
3511 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3512 
3513 	if (read_from_hw != srri_from_ddr) {
3514 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3515 		       __func__, srri_from_ddr, read_from_hw,
3516 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3517 		QDF_ASSERT(0);
3518 	}
3519 	return srri_from_ddr;
3520 }
3521 
3522 
3523 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3524 		uint32_t CE_ctrl_addr)
3525 {
3526 	uint32_t read_from_hw, drri_from_ddr = 0;
3527 
3528 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3529 
3530 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3531 
3532 	if (read_from_hw != drri_from_ddr) {
3533 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3534 		       drri_from_ddr, read_from_hw,
3535 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3536 		QDF_ASSERT(0);
3537 	}
3538 	return drri_from_ddr;
3539 }
3540 
3541 #endif
3542 
3543 #ifdef ADRASTEA_RRI_ON_DDR
3544 /**
3545  * hif_get_src_ring_read_index(): Called to get the SRRI
3546  *
3547  * @scn: hif_softc pointer
3548  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3549  *
3550  * This function returns the SRRI to the caller. For CEs that
3551  * dont have interrupts enabled, we look at the DDR based SRRI
3552  *
3553  * Return: SRRI
3554  */
3555 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3556 		uint32_t CE_ctrl_addr)
3557 {
3558 	struct CE_attr attr;
3559 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3560 
3561 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3562 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3563 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3564 	} else {
3565 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3566 			return A_TARGET_READ(scn,
3567 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3568 		else
3569 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3570 					CE_ctrl_addr);
3571 	}
3572 }
3573 
3574 /**
3575  * hif_get_dst_ring_read_index(): Called to get the DRRI
3576  *
3577  * @scn: hif_softc pointer
3578  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3579  *
3580  * This function returns the DRRI to the caller. For CEs that
3581  * dont have interrupts enabled, we look at the DDR based DRRI
3582  *
3583  * Return: DRRI
3584  */
3585 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3586 		uint32_t CE_ctrl_addr)
3587 {
3588 	struct CE_attr attr;
3589 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3590 
3591 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3592 
3593 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3594 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3595 	} else {
3596 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3597 			return A_TARGET_READ(scn,
3598 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3599 		else
3600 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3601 					CE_ctrl_addr);
3602 	}
3603 }
3604 
3605 /**
3606  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3607  *
3608  * @scn: hif_softc pointer
3609  *
3610  * This function allocates non cached memory on ddr and sends
3611  * the physical address of this memory to the CE hardware. The
3612  * hardware updates the RRI on this particular location.
3613  *
3614  * Return: None
3615  */
3616 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3617 {
3618 	unsigned int i;
3619 	qdf_dma_addr_t paddr_rri_on_ddr;
3620 	uint32_t high_paddr, low_paddr;
3621 
3622 	scn->vaddr_rri_on_ddr =
3623 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3624 		scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3625 		&paddr_rri_on_ddr);
3626 
3627 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3628 	low_paddr  = BITS0_TO_31(paddr_rri_on_ddr);
3629 	high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3630 
3631 	HIF_DBG("%s using srri and drri from DDR", __func__);
3632 
3633 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3634 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3635 
3636 	for (i = 0; i < CE_COUNT; i++)
3637 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3638 
3639 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
3640 
3641 }
3642 #else
3643 
3644 /**
3645  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3646  *
3647  * @scn: hif_softc pointer
3648  *
3649  * This is a dummy implementation for platforms that don't
3650  * support this functionality.
3651  *
3652  * Return: None
3653  */
3654 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3655 {
3656 }
3657 #endif
3658 
3659 /**
3660  * hif_dump_ce_registers() - dump ce registers
3661  * @scn: hif_opaque_softc pointer.
3662  *
3663  * Output the copy engine registers
3664  *
3665  * Return: 0 for success or error code
3666  */
3667 int hif_dump_ce_registers(struct hif_softc *scn)
3668 {
3669 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3670 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3671 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3672 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3673 	uint16_t i;
3674 	QDF_STATUS status;
3675 
3676 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3677 		if (scn->ce_id_to_state[i] == NULL) {
3678 			HIF_DBG("CE%d not used.", i);
3679 			continue;
3680 		}
3681 
3682 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3683 					   (uint8_t *) &ce_reg_values[0],
3684 					   ce_reg_word_size * sizeof(uint32_t));
3685 
3686 		if (status != QDF_STATUS_SUCCESS) {
3687 			HIF_ERROR("Dumping CE register failed!");
3688 			return -EACCES;
3689 		}
3690 		HIF_ERROR("CE%d=>\n", i);
3691 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3692 				   (uint8_t *) &ce_reg_values[0],
3693 				   ce_reg_word_size * sizeof(uint32_t));
3694 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3695 				+ SR_WR_INDEX_ADDRESS),
3696 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3697 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3698 				+ CURRENT_SRRI_ADDRESS),
3699 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3700 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3701 				+ DST_WR_INDEX_ADDRESS),
3702 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3703 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3704 				+ CURRENT_DRRI_ADDRESS),
3705 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3706 		qdf_print("---");
3707 	}
3708 	return 0;
3709 }
3710 qdf_export_symbol(hif_dump_ce_registers);
3711 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3712 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3713 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3714 {
3715 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3716 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3717 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3718 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3719 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3720 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3721 	struct CE_ring_state *src_ring = ce_state->src_ring;
3722 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3723 
3724 	if (src_ring) {
3725 		hif_info->ul_pipe.nentries = src_ring->nentries;
3726 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3727 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3728 		hif_info->ul_pipe.write_index = src_ring->write_index;
3729 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3730 		hif_info->ul_pipe.base_addr_CE_space =
3731 			src_ring->base_addr_CE_space;
3732 		hif_info->ul_pipe.base_addr_owner_space =
3733 			src_ring->base_addr_owner_space;
3734 	}
3735 
3736 
3737 	if (dest_ring) {
3738 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3739 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3740 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3741 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3742 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3743 		hif_info->dl_pipe.base_addr_CE_space =
3744 			dest_ring->base_addr_CE_space;
3745 		hif_info->dl_pipe.base_addr_owner_space =
3746 			dest_ring->base_addr_owner_space;
3747 	}
3748 
3749 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3750 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3751 
3752 	return hif_info;
3753 }
3754 qdf_export_symbol(hif_get_addl_pipe_info);
3755 
3756 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3757 {
3758 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3759 
3760 	scn->nss_wifi_ol_mode = mode;
3761 	return 0;
3762 }
3763 qdf_export_symbol(hif_set_nss_wifiol_mode);
3764 #endif
3765 
3766 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3767 {
3768 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3769 	scn->hif_attribute = hif_attrib;
3770 }
3771 
3772 
3773 /* disable interrupts (only applicable for legacy copy engine currently */
3774 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3775 {
3776 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3777 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3778 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3779 
3780 	Q_TARGET_ACCESS_BEGIN(scn);
3781 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3782 	Q_TARGET_ACCESS_END(scn);
3783 }
3784 qdf_export_symbol(hif_disable_interrupt);
3785 
3786 /**
3787  * hif_fw_event_handler() - hif fw event handler
3788  * @hif_state: pointer to hif ce state structure
3789  *
3790  * Process fw events and raise HTC callback to process fw events.
3791  *
3792  * Return: none
3793  */
3794 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3795 {
3796 	struct hif_msg_callbacks *msg_callbacks =
3797 		&hif_state->msg_callbacks_current;
3798 
3799 	if (!msg_callbacks->fwEventHandler)
3800 		return;
3801 
3802 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3803 			QDF_STATUS_E_FAILURE);
3804 }
3805 
3806 #ifndef QCA_WIFI_3_0
3807 /**
3808  * hif_fw_interrupt_handler() - FW interrupt handler
3809  * @irq: irq number
3810  * @arg: the user pointer
3811  *
3812  * Called from the PCI interrupt handler when a
3813  * firmware-generated interrupt to the Host.
3814  *
3815  * only registered for legacy ce devices
3816  *
3817  * Return: status of handled irq
3818  */
3819 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3820 {
3821 	struct hif_softc *scn = arg;
3822 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3823 	uint32_t fw_indicator_address, fw_indicator;
3824 
3825 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3826 		return ATH_ISR_NOSCHED;
3827 
3828 	fw_indicator_address = hif_state->fw_indicator_address;
3829 	/* For sudden unplug this will return ~0 */
3830 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3831 
3832 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3833 		/* ACK: clear Target-side pending event */
3834 		A_TARGET_WRITE(scn, fw_indicator_address,
3835 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3836 		if (Q_TARGET_ACCESS_END(scn) < 0)
3837 			return ATH_ISR_SCHED;
3838 
3839 		if (hif_state->started) {
3840 			hif_fw_event_handler(hif_state);
3841 		} else {
3842 			/*
3843 			 * Probable Target failure before we're prepared
3844 			 * to handle it.  Generally unexpected.
3845 			 * fw_indicator used as bitmap, and defined as below:
3846 			 *     FW_IND_EVENT_PENDING    0x1
3847 			 *     FW_IND_INITIALIZED      0x2
3848 			 *     FW_IND_NEEDRECOVER      0x4
3849 			 */
3850 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3851 				("%s: Early firmware event indicated 0x%x\n",
3852 				 __func__, fw_indicator));
3853 		}
3854 	} else {
3855 		if (Q_TARGET_ACCESS_END(scn) < 0)
3856 			return ATH_ISR_SCHED;
3857 	}
3858 
3859 	return ATH_ISR_SCHED;
3860 }
3861 #else
3862 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3863 {
3864 	return ATH_ISR_SCHED;
3865 }
3866 #endif /* #ifdef QCA_WIFI_3_0 */
3867 
3868 
3869 /**
3870  * hif_wlan_disable(): call the platform driver to disable wlan
3871  * @scn: HIF Context
3872  *
3873  * This function passes the con_mode to platform driver to disable
3874  * wlan.
3875  *
3876  * Return: void
3877  */
3878 void hif_wlan_disable(struct hif_softc *scn)
3879 {
3880 	enum pld_driver_mode mode;
3881 	uint32_t con_mode = hif_get_conparam(scn);
3882 
3883 	if (scn->target_status == TARGET_STATUS_RESET)
3884 		return;
3885 
3886 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3887 		mode = PLD_FTM;
3888 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3889 		mode = PLD_EPPING;
3890 	else
3891 		mode = PLD_MISSION;
3892 
3893 	pld_wlan_disable(scn->qdf_dev->dev, mode);
3894 }
3895 
3896 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3897 {
3898 	QDF_STATUS status;
3899 	uint8_t ul_pipe, dl_pipe;
3900 	int ul_is_polled, dl_is_polled;
3901 
3902 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3903 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3904 					 HTC_CTRL_RSVD_SVC,
3905 					 &ul_pipe, &dl_pipe,
3906 					 &ul_is_polled, &dl_is_polled);
3907 	if (status) {
3908 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3909 		return qdf_status_to_os_return(status);
3910 	}
3911 
3912 	*ce_id = dl_pipe;
3913 
3914 	return 0;
3915 }
3916