xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #include "qdf_module.h"
41 
42 #define CE_POLL_TIMEOUT 10      /* ms */
43 
44 #define AGC_DUMP         1
45 #define CHANINFO_DUMP    2
46 #define BB_WATCHDOG_DUMP 3
47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48 #define PCIE_ACCESS_DUMP 4
49 #endif
50 #include "mp_dev.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include "qdf_hang_event_notifier.h"
53 #endif
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
56 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018)) && \
57 	!defined(QCA_WIFI_SUPPORT_SRNG)
58 #define QCA_WIFI_SUPPORT_SRNG
59 #endif
60 
61 #ifdef QCA_WIFI_SUPPORT_SRNG
62 #include <hal_api.h>
63 #endif
64 
65 /* Forward references */
66 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
67 
68 /*
69  * Fix EV118783, poll to check whether a BMI response comes
70  * other than waiting for the interruption which may be lost.
71  */
72 /* #define BMI_RSP_POLLING */
73 #define BMI_RSP_TO_MILLISEC  1000
74 
75 #ifdef CONFIG_BYPASS_QMI
76 #define BYPASS_QMI 1
77 #else
78 #define BYPASS_QMI 0
79 #endif
80 
81 #ifdef ENABLE_10_4_FW_HDR
82 #if (ENABLE_10_4_FW_HDR == 1)
83 #define WDI_IPA_SERVICE_GROUP 5
84 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
85 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
86 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
87 #endif /* ENABLE_10_4_FW_HDR == 1 */
88 #endif /* ENABLE_10_4_FW_HDR */
89 
90 static void hif_config_rri_on_ddr(struct hif_softc *scn);
91 
92 /**
93  * hif_target_access_log_dump() - dump access log
94  *
95  * dump access log
96  *
97  * Return: n/a
98  */
99 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
100 static void hif_target_access_log_dump(void)
101 {
102 	hif_target_dump_access_log();
103 }
104 #endif
105 
106 
107 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
108 		      uint8_t cmd_id, bool start)
109 {
110 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
111 
112 	switch (cmd_id) {
113 	case AGC_DUMP:
114 		if (start)
115 			priv_start_agc(scn);
116 		else
117 			priv_dump_agc(scn);
118 		break;
119 	case CHANINFO_DUMP:
120 		if (start)
121 			priv_start_cap_chaninfo(scn);
122 		else
123 			priv_dump_chaninfo(scn);
124 		break;
125 	case BB_WATCHDOG_DUMP:
126 		priv_dump_bbwatchdog(scn);
127 		break;
128 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
129 	case PCIE_ACCESS_DUMP:
130 		hif_target_access_log_dump();
131 		break;
132 #endif
133 	default:
134 		hif_err("Invalid htc dump command: %d", cmd_id);
135 		break;
136 	}
137 }
138 
139 static void ce_poll_timeout(void *arg)
140 {
141 	struct CE_state *CE_state = (struct CE_state *)arg;
142 
143 	if (CE_state->timer_inited) {
144 		ce_per_engine_service(CE_state->scn, CE_state->id);
145 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
146 	}
147 }
148 
149 static unsigned int roundup_pwr2(unsigned int n)
150 {
151 	int i;
152 	unsigned int test_pwr2;
153 
154 	if (!(n & (n - 1)))
155 		return n; /* already a power of 2 */
156 
157 	test_pwr2 = 4;
158 	for (i = 0; i < 29; i++) {
159 		if (test_pwr2 > n)
160 			return test_pwr2;
161 		test_pwr2 = test_pwr2 << 1;
162 	}
163 
164 	QDF_ASSERT(0); /* n too large */
165 	return 0;
166 }
167 
168 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
169 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
170 
171 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
172 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
174 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
178 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
179 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
181 #ifdef QCA_WIFI_3_0_ADRASTEA
182 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
183 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
184 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
185 #endif
186 };
187 
188 #ifdef QCN7605_SUPPORT
189 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
190 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
191 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
194 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
195 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
196 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
197 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
198 };
199 #endif
200 
201 #ifdef WLAN_FEATURE_EPPING
202 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
203 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
204 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
205 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
206 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
207 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
208 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
209 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
210 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
211 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
212 };
213 #endif
214 
215 /* CE_PCI TABLE */
216 /*
217  * NOTE: the table below is out of date, though still a useful reference.
218  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
219  * mapping of HTC services to HIF pipes.
220  */
221 /*
222  * This authoritative table defines Copy Engine configuration and the mapping
223  * of services/endpoints to CEs.  A subset of this information is passed to
224  * the Target during startup as a prerequisite to entering BMI phase.
225  * See:
226  *    target_service_to_ce_map - Target-side mapping
227  *    hif_map_service_to_pipe      - Host-side mapping
228  *    target_ce_config         - Target-side configuration
229  *    host_ce_config           - Host-side configuration
230    ============================================================================
231    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
232  |                      |      | ctio | Size     | Frequency
233  |                      |      | n    |          |
234    ============================================================================
235    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
236    descriptor |                      |      |      | O(100B)  | and regular
237    download   |                      |      |      |          |
238    ----------------------------------------------------------------------------
239    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
240    indication |                      |      |      | O(10B)   | regular
241    upload     |                      |      |      |          |
242    ----------------------------------------------------------------------------
243    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
244    upload     |                      |      |      | O(1000B) | (frequent
245    e.g. noise |                      |      |      |          | during IP1.0
246    packets    |                      |      |      |          | testing)
247    ----------------------------------------------------------------------------
248    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
249    download   |                      |      |      | O(1000B) | (frequent
250    e.g.       |                      |      |      |          | during IP1.0
251    misdirecte |                      |      |      |          | testing)
252    d EAPOL    |                      |      |      |          |
253    packets    |                      |      |      |          |
254    ----------------------------------------------------------------------------
255    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
256  | DATA_VO (uplink)     |      |      |          |
257    ----------------------------------------------------------------------------
258    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
259  | DATA_VO (downlink)   |      |      |          |
260    ----------------------------------------------------------------------------
261    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
262  |                      |      |      | O(100B)  |
263    ----------------------------------------------------------------------------
264    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
265    messages   | (downlink)           |      |      | O(100B)  |
266  |                      |      |      |          |
267    ----------------------------------------------------------------------------
268    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
269  | HTC_RAW_STREAMS      |      |      |          |
270  | (uplink)             |      |      |          |
271    ----------------------------------------------------------------------------
272    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
273  | HTC_RAW_STREAMS      |      |      |          |
274  | (downlink)           |      |      |          |
275    ----------------------------------------------------------------------------
276    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
277  |                      |      |      |          | infrequent
278    ============================================================================
279  */
280 
281 /*
282  * Map from service/endpoint to Copy Engine.
283  * This table is derived from the CE_PCI TABLE, above.
284  * It is passed to the Target at startup for use by firmware.
285  */
286 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
287 	{
288 		WMI_DATA_VO_SVC,
289 		PIPEDIR_OUT,    /* out = UL = host -> target */
290 		3,
291 	},
292 	{
293 		WMI_DATA_VO_SVC,
294 		PIPEDIR_IN,     /* in = DL = target -> host */
295 		2,
296 	},
297 	{
298 		WMI_DATA_BK_SVC,
299 		PIPEDIR_OUT,    /* out = UL = host -> target */
300 		3,
301 	},
302 	{
303 		WMI_DATA_BK_SVC,
304 		PIPEDIR_IN,     /* in = DL = target -> host */
305 		2,
306 	},
307 	{
308 		WMI_DATA_BE_SVC,
309 		PIPEDIR_OUT,    /* out = UL = host -> target */
310 		3,
311 	},
312 	{
313 		WMI_DATA_BE_SVC,
314 		PIPEDIR_IN,     /* in = DL = target -> host */
315 		2,
316 	},
317 	{
318 		WMI_DATA_VI_SVC,
319 		PIPEDIR_OUT,    /* out = UL = host -> target */
320 		3,
321 	},
322 	{
323 		WMI_DATA_VI_SVC,
324 		PIPEDIR_IN,     /* in = DL = target -> host */
325 		2,
326 	},
327 	{
328 		WMI_CONTROL_SVC,
329 		PIPEDIR_OUT,    /* out = UL = host -> target */
330 		3,
331 	},
332 	{
333 		WMI_CONTROL_SVC,
334 		PIPEDIR_IN,     /* in = DL = target -> host */
335 		2,
336 	},
337 	{
338 		HTC_CTRL_RSVD_SVC,
339 		PIPEDIR_OUT,    /* out = UL = host -> target */
340 		0,              /* could be moved to 3 (share with WMI) */
341 	},
342 	{
343 		HTC_CTRL_RSVD_SVC,
344 		PIPEDIR_IN,     /* in = DL = target -> host */
345 		2,
346 	},
347 	{
348 		HTC_RAW_STREAMS_SVC, /* not currently used */
349 		PIPEDIR_OUT,    /* out = UL = host -> target */
350 		0,
351 	},
352 	{
353 		HTC_RAW_STREAMS_SVC, /* not currently used */
354 		PIPEDIR_IN,     /* in = DL = target -> host */
355 		2,
356 	},
357 	{
358 		HTT_DATA_MSG_SVC,
359 		PIPEDIR_OUT,    /* out = UL = host -> target */
360 		4,
361 	},
362 	{
363 		HTT_DATA_MSG_SVC,
364 		PIPEDIR_IN,     /* in = DL = target -> host */
365 		1,
366 	},
367 	{
368 		WDI_IPA_TX_SVC,
369 		PIPEDIR_OUT,    /* in = DL = target -> host */
370 		5,
371 	},
372 #if defined(QCA_WIFI_3_0_ADRASTEA)
373 	{
374 		HTT_DATA2_MSG_SVC,
375 		PIPEDIR_IN,    /* in = DL = target -> host */
376 		9,
377 	},
378 	{
379 		HTT_DATA3_MSG_SVC,
380 		PIPEDIR_IN,    /* in = DL = target -> host */
381 		10,
382 	},
383 	{
384 		PACKET_LOG_SVC,
385 		PIPEDIR_IN,    /* in = DL = target -> host */
386 		11,
387 	},
388 #endif
389 	/* (Additions here) */
390 
391 	{                       /* Must be last */
392 		0,
393 		0,
394 		0,
395 	},
396 };
397 
398 /* PIPEDIR_OUT = HOST to Target */
399 /* PIPEDIR_IN  = TARGET to HOST */
400 #if (defined(QCA_WIFI_QCA8074))
401 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
402 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
403 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
404 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
405 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
406 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
407 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
408 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
409 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
410 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
411 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
412 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
413 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
414 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
415 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
416 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
417 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
418 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
419 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
420 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
421 	/* (Additions here) */
422 	{ 0, 0, 0, },
423 };
424 #else
425 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
426 };
427 #endif
428 
429 #if (defined(QCA_WIFI_QCA8074V2))
430 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
431 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
432 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
433 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
434 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
435 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
436 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
437 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
438 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
439 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
440 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
441 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
442 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
443 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
444 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
445 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
446 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
447 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
448 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
449 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
450 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
451 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
452 	/* (Additions here) */
453 	{ 0, 0, 0, },
454 };
455 #else
456 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
457 };
458 #endif
459 
460 #if (defined(QCA_WIFI_QCA6018))
461 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
462 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
463 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
464 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
465 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
466 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
467 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
468 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
469 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
470 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
471 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
472 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
473 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
474 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
475 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
476 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
477 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
478 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
479 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
480 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
481 	/* (Additions here) */
482 	{ 0, 0, 0, },
483 };
484 #else
485 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
486 };
487 #endif
488 
489 #if (defined(QCA_WIFI_QCN9000))
490 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
491 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
492 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
493 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
494 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
495 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
496 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
497 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
498 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
499 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
500 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
501 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
502 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
503 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
504 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
505 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
506 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
507 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
508 	/* (Additions here) */
509 	{ 0, 0, 0, },
510 };
511 #else
512 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
513 };
514 #endif
515 
516 #if (defined(QCA_WIFI_QCA5018))
517 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
518 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
519 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
520 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
521 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
522 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
523 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
524 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
525 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
526 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
527 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
528 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
529 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
530 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
531 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
532 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
533 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
534 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
535 	/* (Additions here) */
536 	{ 0, 0, 0, },
537 };
538 #else
539 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
540 };
541 #endif
542 
543 /* PIPEDIR_OUT = HOST to Target */
544 /* PIPEDIR_IN  = TARGET to HOST */
545 #ifdef QCN7605_SUPPORT
546 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
547 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
548 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
549 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
550 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
551 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
552 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
553 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
554 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
555 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
556 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
557 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
558 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
559 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
560 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
561 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
562 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
563 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
564 #ifdef IPA_OFFLOAD
565 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
566 #else
567 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
568 #endif
569 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
570 	/* (Additions here) */
571 	{ 0, 0, 0, },
572 };
573 #endif
574 
575 #if (defined(QCA_WIFI_QCA6290))
576 #ifdef QCA_6290_AP_MODE
577 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
578 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
579 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
580 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
581 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
582 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
583 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
584 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
585 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
586 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
587 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
588 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
589 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
590 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
591 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
592 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
593 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
594 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
595 	/* (Additions here) */
596 	{ 0, 0, 0, },
597 };
598 #else
599 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
600 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
601 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
602 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
603 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
604 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
605 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
606 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
607 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
608 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
609 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
610 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
611 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
612 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
613 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
614 	/* (Additions here) */
615 	{ 0, 0, 0, },
616 };
617 #endif
618 #else
619 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
620 };
621 #endif
622 
623 #if (defined(QCA_WIFI_QCA6390))
624 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
625 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
626 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
627 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
628 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
629 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
630 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
631 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
632 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
633 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
634 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
635 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
636 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
637 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
638 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
639 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
640 	/* (Additions here) */
641 	{ 0, 0, 0, },
642 };
643 #else
644 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
645 };
646 #endif
647 
648 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
649 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
650 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
651 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
652 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
653 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
654 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
655 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
656 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
657 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
658 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
659 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
660 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
661 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
662 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
663 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
664 	/* (Additions here) */
665 	{ 0, 0, 0, },
666 };
667 
668 #if (defined(QCA_WIFI_QCA6750))
669 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
670 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
671 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
672 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
673 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
674 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
675 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
676 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
677 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
678 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
679 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
680 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
681 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
682 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
683 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
684 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
685 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
686 	{ WMI_CONTROL_DIAG_SVC, PIPEDIR_IN, 7, },
687 #endif
688 	/* (Additions here) */
689 	{ 0, 0, 0, },
690 };
691 #else
692 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
693 };
694 #endif
695 
696 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
697 	{
698 		WMI_DATA_VO_SVC,
699 		PIPEDIR_OUT,    /* out = UL = host -> target */
700 		3,
701 	},
702 	{
703 		WMI_DATA_VO_SVC,
704 		PIPEDIR_IN,     /* in = DL = target -> host */
705 		2,
706 	},
707 	{
708 		WMI_DATA_BK_SVC,
709 		PIPEDIR_OUT,    /* out = UL = host -> target */
710 		3,
711 	},
712 	{
713 		WMI_DATA_BK_SVC,
714 		PIPEDIR_IN,     /* in = DL = target -> host */
715 		2,
716 	},
717 	{
718 		WMI_DATA_BE_SVC,
719 		PIPEDIR_OUT,    /* out = UL = host -> target */
720 		3,
721 	},
722 	{
723 		WMI_DATA_BE_SVC,
724 		PIPEDIR_IN,     /* in = DL = target -> host */
725 		2,
726 	},
727 	{
728 		WMI_DATA_VI_SVC,
729 		PIPEDIR_OUT,    /* out = UL = host -> target */
730 		3,
731 	},
732 	{
733 		WMI_DATA_VI_SVC,
734 		PIPEDIR_IN,     /* in = DL = target -> host */
735 		2,
736 	},
737 	{
738 		WMI_CONTROL_SVC,
739 		PIPEDIR_OUT,    /* out = UL = host -> target */
740 		3,
741 	},
742 	{
743 		WMI_CONTROL_SVC,
744 		PIPEDIR_IN,     /* in = DL = target -> host */
745 		2,
746 	},
747 	{
748 		HTC_CTRL_RSVD_SVC,
749 		PIPEDIR_OUT,    /* out = UL = host -> target */
750 		0,              /* could be moved to 3 (share with WMI) */
751 	},
752 	{
753 		HTC_CTRL_RSVD_SVC,
754 		PIPEDIR_IN,     /* in = DL = target -> host */
755 		1,
756 	},
757 	{
758 		HTC_RAW_STREAMS_SVC, /* not currently used */
759 		PIPEDIR_OUT,    /* out = UL = host -> target */
760 		0,
761 	},
762 	{
763 		HTC_RAW_STREAMS_SVC, /* not currently used */
764 		PIPEDIR_IN,     /* in = DL = target -> host */
765 		1,
766 	},
767 	{
768 		HTT_DATA_MSG_SVC,
769 		PIPEDIR_OUT,    /* out = UL = host -> target */
770 		4,
771 	},
772 #ifdef WLAN_FEATURE_FASTPATH
773 	{
774 		HTT_DATA_MSG_SVC,
775 		PIPEDIR_IN,     /* in = DL = target -> host */
776 		5,
777 	},
778 #else /* WLAN_FEATURE_FASTPATH */
779 	{
780 		HTT_DATA_MSG_SVC,
781 		PIPEDIR_IN,  /* in = DL = target -> host */
782 		1,
783 	},
784 #endif /* WLAN_FEATURE_FASTPATH */
785 
786 	/* (Additions here) */
787 
788 	{                       /* Must be last */
789 		0,
790 		0,
791 		0,
792 	},
793 };
794 
795 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
796 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
797 
798 #ifdef WLAN_FEATURE_EPPING
799 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
800 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
801 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
802 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
803 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
804 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
805 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
806 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
807 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
808 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
809 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
810 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
811 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
812 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
813 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
814 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
815 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
816 	{0, 0, 0,},             /* Must be last */
817 };
818 
819 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
820 					   **tgt_svc_map_to_use,
821 					   uint32_t *sz_tgt_svc_map_to_use)
822 {
823 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
824 	*sz_tgt_svc_map_to_use =
825 			sizeof(target_service_to_ce_map_wlan_epping);
826 }
827 #endif
828 
829 #ifdef QCN7605_SUPPORT
830 static inline
831 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
832 			       uint32_t *sz_tgt_svc_map_to_use)
833 {
834 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
835 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
836 }
837 #else
838 static inline
839 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
840 			       uint32_t *sz_tgt_svc_map_to_use)
841 {
842 	hif_err("QCN7605 not supported");
843 }
844 #endif
845 
846 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
847 				    struct service_to_pipe **tgt_svc_map_to_use,
848 				    uint32_t *sz_tgt_svc_map_to_use)
849 {
850 	uint32_t mode = hif_get_conparam(scn);
851 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
852 	struct hif_target_info *tgt_info = &scn->target_info;
853 
854 	if (QDF_IS_EPPING_ENABLED(mode)) {
855 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
856 						      sz_tgt_svc_map_to_use);
857 	} else {
858 		switch (tgt_info->target_type) {
859 		default:
860 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
861 			*sz_tgt_svc_map_to_use =
862 				sizeof(target_service_to_ce_map_wlan);
863 			break;
864 		case TARGET_TYPE_QCN7605:
865 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
866 						  sz_tgt_svc_map_to_use);
867 			break;
868 		case TARGET_TYPE_AR900B:
869 		case TARGET_TYPE_QCA9984:
870 		case TARGET_TYPE_IPQ4019:
871 		case TARGET_TYPE_QCA9888:
872 		case TARGET_TYPE_AR9888:
873 		case TARGET_TYPE_AR9888V2:
874 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
875 			*sz_tgt_svc_map_to_use =
876 				sizeof(target_service_to_ce_map_ar900b);
877 			break;
878 		case TARGET_TYPE_QCA6290:
879 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
880 			*sz_tgt_svc_map_to_use =
881 				sizeof(target_service_to_ce_map_qca6290);
882 			break;
883 		case TARGET_TYPE_QCA6390:
884 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
885 			*sz_tgt_svc_map_to_use =
886 				sizeof(target_service_to_ce_map_qca6390);
887 			break;
888 		case TARGET_TYPE_QCA6490:
889 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
890 			*sz_tgt_svc_map_to_use =
891 				sizeof(target_service_to_ce_map_qca6490);
892 			break;
893 		case TARGET_TYPE_QCA6750:
894 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
895 			*sz_tgt_svc_map_to_use =
896 				sizeof(target_service_to_ce_map_qca6750);
897 			break;
898 		case TARGET_TYPE_QCA8074:
899 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
900 			*sz_tgt_svc_map_to_use =
901 				sizeof(target_service_to_ce_map_qca8074);
902 			break;
903 		case TARGET_TYPE_QCA8074V2:
904 			*tgt_svc_map_to_use =
905 				target_service_to_ce_map_qca8074_v2;
906 			*sz_tgt_svc_map_to_use =
907 				sizeof(target_service_to_ce_map_qca8074_v2);
908 			break;
909 		case TARGET_TYPE_QCA6018:
910 			*tgt_svc_map_to_use =
911 				target_service_to_ce_map_qca6018;
912 			*sz_tgt_svc_map_to_use =
913 				sizeof(target_service_to_ce_map_qca6018);
914 			break;
915 		case TARGET_TYPE_QCN9000:
916 			*tgt_svc_map_to_use =
917 				target_service_to_ce_map_qcn9000;
918 			*sz_tgt_svc_map_to_use =
919 				sizeof(target_service_to_ce_map_qcn9000);
920 			break;
921 		case TARGET_TYPE_QCA5018:
922 		case TARGET_TYPE_QCN9100:
923 			*tgt_svc_map_to_use =
924 				target_service_to_ce_map_qca5018;
925 			*sz_tgt_svc_map_to_use =
926 				sizeof(target_service_to_ce_map_qca5018);
927 			break;
928 		}
929 	}
930 	hif_state->tgt_svc_map = *tgt_svc_map_to_use;
931 	hif_state->sz_tgt_svc_map = *sz_tgt_svc_map_to_use /
932 					sizeof(struct service_to_pipe);
933 }
934 
935 /**
936  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
937  * @ce_state : pointer to the state context of the CE
938  *
939  * Description:
940  *   Sets htt_rx_data attribute of the state structure if the
941  *   CE serves one of the HTT DATA services.
942  *
943  * Return:
944  *  false (attribute set to false)
945  *  true  (attribute set to true);
946  */
947 static bool ce_mark_datapath(struct CE_state *ce_state)
948 {
949 	struct service_to_pipe *svc_map;
950 	uint32_t map_sz, map_len;
951 	int    i;
952 	bool   rc = false;
953 
954 	if (ce_state) {
955 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
956 					       &map_sz);
957 
958 		map_len = map_sz / sizeof(struct service_to_pipe);
959 		for (i = 0; i < map_len; i++) {
960 			if ((svc_map[i].pipenum == ce_state->id) &&
961 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
962 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
963 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
964 				/* HTT CEs are unidirectional */
965 				if (svc_map[i].pipedir == PIPEDIR_IN)
966 					ce_state->htt_rx_data = true;
967 				else
968 					ce_state->htt_tx_data = true;
969 				rc = true;
970 			}
971 		}
972 	}
973 	return rc;
974 }
975 
976 /**
977  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
978  * @ce_id: ce in question
979  * @ring: ring state being examined
980  * @type: "src_ring" or "dest_ring" string for identifying the ring
981  *
982  * Warns on non-zero index values.
983  * Causes a kernel panic if the ring is not empty durring initialization.
984  */
985 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
986 					 char *type)
987 {
988 	if (ring->write_index != 0 || ring->sw_index != 0)
989 		hif_err("ce %d, %s, initial sw_index = %d, initial write_index =%d",
990 			  ce_id, type, ring->sw_index, ring->write_index);
991 	if (ring->write_index != ring->sw_index)
992 		QDF_BUG(0);
993 }
994 
995 #ifdef IPA_OFFLOAD
996 /**
997  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
998  * @scn: softc instance
999  * @ce_id: ce in question
1000  * @base_addr: pointer to copyengine ring base address
1001  * @ce_ring: copyengine instance
1002  * @nentries: number of entries should be allocated
1003  * @desc_size: ce desc size
1004  *
1005  * Return: QDF_STATUS_SUCCESS - for success
1006  */
1007 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1008 				     qdf_dma_addr_t *base_addr,
1009 				     struct CE_ring_state *ce_ring,
1010 				     unsigned int nentries, uint32_t desc_size)
1011 {
1012 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1013 	    !ce_srng_based(scn)) {
1014 		if (!scn->ipa_ce_ring) {
1015 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1016 				scn->qdf_dev,
1017 				nentries * desc_size + CE_DESC_RING_ALIGN);
1018 			if (!scn->ipa_ce_ring) {
1019 				hif_err(
1020 				"Failed to allocate memory for IPA ce ring");
1021 				return QDF_STATUS_E_NOMEM;
1022 			}
1023 		}
1024 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1025 						&scn->ipa_ce_ring->mem_info);
1026 		ce_ring->base_addr_owner_space_unaligned =
1027 						scn->ipa_ce_ring->vaddr;
1028 	} else {
1029 		ce_ring->base_addr_owner_space_unaligned =
1030 			hif_mem_alloc_consistent_unaligned
1031 					(scn,
1032 					 (nentries * desc_size +
1033 					  CE_DESC_RING_ALIGN),
1034 					 base_addr,
1035 					 ce_ring->hal_ring_type,
1036 					 &ce_ring->is_ring_prealloc);
1037 
1038 		if (!ce_ring->base_addr_owner_space_unaligned) {
1039 			hif_err("Failed to allocate DMA memory for ce ring id: %u",
1040 			       CE_id);
1041 			return QDF_STATUS_E_NOMEM;
1042 		}
1043 	}
1044 	return QDF_STATUS_SUCCESS;
1045 }
1046 
1047 /**
1048  * ce_free_desc_ring() - Frees copyengine descriptor ring
1049  * @scn: softc instance
1050  * @ce_id: ce in question
1051  * @ce_ring: copyengine instance
1052  * @desc_size: ce desc size
1053  *
1054  * Return: None
1055  */
1056 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1057 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1058 {
1059 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1060 	    !ce_srng_based(scn)) {
1061 		if (scn->ipa_ce_ring) {
1062 			qdf_mem_shared_mem_free(scn->qdf_dev,
1063 						scn->ipa_ce_ring);
1064 			scn->ipa_ce_ring = NULL;
1065 		}
1066 		ce_ring->base_addr_owner_space_unaligned = NULL;
1067 	} else {
1068 		hif_mem_free_consistent_unaligned
1069 			(scn,
1070 			 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1071 			 ce_ring->base_addr_owner_space_unaligned,
1072 			 ce_ring->base_addr_CE_space, 0,
1073 			 ce_ring->is_ring_prealloc);
1074 		ce_ring->base_addr_owner_space_unaligned = NULL;
1075 	}
1076 }
1077 #else
1078 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1079 				     qdf_dma_addr_t *base_addr,
1080 				     struct CE_ring_state *ce_ring,
1081 				     unsigned int nentries, uint32_t desc_size)
1082 {
1083 	ce_ring->base_addr_owner_space_unaligned =
1084 			hif_mem_alloc_consistent_unaligned
1085 					(scn,
1086 					 (nentries * desc_size +
1087 					  CE_DESC_RING_ALIGN),
1088 					 base_addr,
1089 					 ce_ring->hal_ring_type,
1090 					 &ce_ring->is_ring_prealloc);
1091 
1092 	if (!ce_ring->base_addr_owner_space_unaligned) {
1093 		hif_err("Failed to allocate DMA memory for ce ring id: %u",
1094 		       CE_id);
1095 		return QDF_STATUS_E_NOMEM;
1096 	}
1097 	return QDF_STATUS_SUCCESS;
1098 }
1099 
1100 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1101 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1102 {
1103 	hif_mem_free_consistent_unaligned
1104 		(scn,
1105 		 ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1106 		 ce_ring->base_addr_owner_space_unaligned,
1107 		 ce_ring->base_addr_CE_space, 0,
1108 		 ce_ring->is_ring_prealloc);
1109 	ce_ring->base_addr_owner_space_unaligned = NULL;
1110 }
1111 #endif /* IPA_OFFLOAD */
1112 
1113 /*
1114  * TODO: Need to explore the possibility of having this as part of a
1115  * target context instead of a global array.
1116  */
1117 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1118 
1119 void ce_service_register_module(enum ce_target_type target_type,
1120 				struct ce_ops* (*ce_attach)(void))
1121 {
1122 	if (target_type < CE_MAX_TARGET_TYPE)
1123 		ce_attach_register[target_type] = ce_attach;
1124 }
1125 
1126 qdf_export_symbol(ce_service_register_module);
1127 
1128 /**
1129  * ce_srng_based() - Does this target use srng
1130  * @ce_state : pointer to the state context of the CE
1131  *
1132  * Description:
1133  *   returns true if the target is SRNG based
1134  *
1135  * Return:
1136  *  false (attribute set to false)
1137  *  true  (attribute set to true);
1138  */
1139 bool ce_srng_based(struct hif_softc *scn)
1140 {
1141 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1142 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1143 
1144 	switch (tgt_info->target_type) {
1145 	case TARGET_TYPE_QCA8074:
1146 	case TARGET_TYPE_QCA8074V2:
1147 	case TARGET_TYPE_QCA6290:
1148 	case TARGET_TYPE_QCA6390:
1149 	case TARGET_TYPE_QCA6490:
1150 	case TARGET_TYPE_QCA6750:
1151 	case TARGET_TYPE_QCA6018:
1152 	case TARGET_TYPE_QCN9000:
1153 	case TARGET_TYPE_QCN9100:
1154 	case TARGET_TYPE_QCA5018:
1155 		return true;
1156 	default:
1157 		return false;
1158 	}
1159 	return false;
1160 }
1161 qdf_export_symbol(ce_srng_based);
1162 
1163 #ifdef QCA_WIFI_SUPPORT_SRNG
1164 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1165 {
1166 	struct ce_ops *ops = NULL;
1167 
1168 	if (ce_srng_based(scn)) {
1169 		if (ce_attach_register[CE_SVC_SRNG])
1170 			ops = ce_attach_register[CE_SVC_SRNG]();
1171 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1172 		ops = ce_attach_register[CE_SVC_LEGACY]();
1173 	}
1174 
1175 	return ops;
1176 }
1177 
1178 
1179 #else	/* QCA_LITHIUM */
1180 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1181 {
1182 	if (ce_attach_register[CE_SVC_LEGACY])
1183 		return ce_attach_register[CE_SVC_LEGACY]();
1184 
1185 	return NULL;
1186 }
1187 #endif /* QCA_LITHIUM */
1188 
1189 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1190 		struct pld_shadow_reg_v2_cfg **shadow_config,
1191 		int *num_shadow_registers_configured) {
1192 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1193 
1194 	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1195 			scn, shadow_config, num_shadow_registers_configured);
1196 
1197 	return;
1198 }
1199 
1200 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1201 						uint8_t ring_type)
1202 {
1203 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1204 
1205 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1206 }
1207 
1208 #ifdef QCA_WIFI_SUPPORT_SRNG
1209 static inline int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1210 {
1211 	switch (ce_ring_type) {
1212 	case CE_RING_SRC:
1213 		return CE_SRC;
1214 	case CE_RING_DEST:
1215 		return CE_DST;
1216 	case CE_RING_STATUS:
1217 		return CE_DST_STATUS;
1218 	default:
1219 		return -EINVAL;
1220 	}
1221 }
1222 #else
1223 static int32_t ce_ring_type_to_hal_ring_type(uint32_t ce_ring_type)
1224 {
1225 	return 0;
1226 }
1227 #endif
1228 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1229 		uint8_t ring_type, uint32_t nentries)
1230 {
1231 	uint32_t ce_nbytes;
1232 	char *ptr;
1233 	qdf_dma_addr_t base_addr;
1234 	struct CE_ring_state *ce_ring;
1235 	uint32_t desc_size;
1236 	struct hif_softc *scn = CE_state->scn;
1237 
1238 	ce_nbytes = sizeof(struct CE_ring_state)
1239 		+ (nentries * sizeof(void *));
1240 	ptr = qdf_mem_malloc(ce_nbytes);
1241 	if (!ptr)
1242 		return NULL;
1243 
1244 	ce_ring = (struct CE_ring_state *)ptr;
1245 	ptr += sizeof(struct CE_ring_state);
1246 	ce_ring->nentries = nentries;
1247 	ce_ring->nentries_mask = nentries - 1;
1248 
1249 	ce_ring->low_water_mark_nentries = 0;
1250 	ce_ring->high_water_mark_nentries = nentries;
1251 	ce_ring->per_transfer_context = (void **)ptr;
1252 	ce_ring->hal_ring_type = ce_ring_type_to_hal_ring_type(ring_type);
1253 
1254 	desc_size = ce_get_desc_size(scn, ring_type);
1255 
1256 	/* Legacy platforms that do not support cache
1257 	 * coherent DMA are unsupported
1258 	 */
1259 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1260 			       ce_ring, nentries,
1261 			       desc_size) !=
1262 	    QDF_STATUS_SUCCESS) {
1263 		hif_err("ring has no DMA mem");
1264 		qdf_mem_free(ce_ring);
1265 		return NULL;
1266 	}
1267 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1268 
1269 	/* Correctly initialize memory to 0 to
1270 	 * prevent garbage data crashing system
1271 	 * when download firmware
1272 	 */
1273 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1274 			nentries * desc_size +
1275 			CE_DESC_RING_ALIGN);
1276 
1277 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1278 
1279 		ce_ring->base_addr_CE_space =
1280 			(ce_ring->base_addr_CE_space_unaligned +
1281 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1282 
1283 		ce_ring->base_addr_owner_space = (void *)
1284 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1285 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1286 	} else {
1287 		ce_ring->base_addr_CE_space =
1288 				ce_ring->base_addr_CE_space_unaligned;
1289 		ce_ring->base_addr_owner_space =
1290 				ce_ring->base_addr_owner_space_unaligned;
1291 	}
1292 
1293 	return ce_ring;
1294 }
1295 
1296 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1297 			uint32_t ce_id, struct CE_ring_state *ring,
1298 			struct CE_attr *attr)
1299 {
1300 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1301 
1302 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1303 					      ring, attr);
1304 }
1305 
1306 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1307 {
1308 	uint8_t ul_pipe, dl_pipe;
1309 	int ce_id, status, ul_is_polled, dl_is_polled;
1310 	struct CE_state *ce_state;
1311 
1312 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1313 					 &ul_pipe, &dl_pipe,
1314 					 &ul_is_polled, &dl_is_polled);
1315 	if (status) {
1316 		hif_err("pipe_mapping failure");
1317 		return status;
1318 	}
1319 
1320 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1321 		if (ce_id == ul_pipe)
1322 			continue;
1323 		if (ce_id == dl_pipe)
1324 			continue;
1325 
1326 		ce_state = scn->ce_id_to_state[ce_id];
1327 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1328 		if (ce_state->state == CE_RUNNING)
1329 			ce_state->state = CE_PAUSED;
1330 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1331 	}
1332 
1333 	return status;
1334 }
1335 
1336 int hif_ce_bus_late_resume(struct hif_softc *scn)
1337 {
1338 	int ce_id;
1339 	struct CE_state *ce_state;
1340 	int write_index = 0;
1341 	bool index_updated;
1342 
1343 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1344 		ce_state = scn->ce_id_to_state[ce_id];
1345 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1346 		if (ce_state->state == CE_PENDING) {
1347 			write_index = ce_state->src_ring->write_index;
1348 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1349 					write_index);
1350 			ce_state->state = CE_RUNNING;
1351 			index_updated = true;
1352 		} else {
1353 			index_updated = false;
1354 		}
1355 
1356 		if (ce_state->state == CE_PAUSED)
1357 			ce_state->state = CE_RUNNING;
1358 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1359 
1360 		if (index_updated)
1361 			hif_record_ce_desc_event(scn, ce_id,
1362 				RESUME_WRITE_INDEX_UPDATE,
1363 				NULL, NULL, write_index, 0);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 /**
1370  * ce_oom_recovery() - try to recover rx ce from oom condition
1371  * @context: CE_state of the CE with oom rx ring
1372  *
1373  * the executing work Will continue to be rescheduled until
1374  * at least 1 descriptor is successfully posted to the rx ring.
1375  *
1376  * return: none
1377  */
1378 static void ce_oom_recovery(void *context)
1379 {
1380 	struct CE_state *ce_state = context;
1381 	struct hif_softc *scn = ce_state->scn;
1382 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1383 	struct HIF_CE_pipe_info *pipe_info =
1384 		&ce_softc->pipe_info[ce_state->id];
1385 
1386 	hif_post_recv_buffers_for_pipe(pipe_info);
1387 }
1388 
1389 #ifdef HIF_CE_DEBUG_DATA_BUF
1390 /**
1391  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1392  * the CE descriptors.
1393  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1394  * @scn: hif scn handle
1395  * ce_id: Copy Engine Id
1396  *
1397  * Return: QDF_STATUS
1398  */
1399 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1400 {
1401 	struct hif_ce_desc_event *event = NULL;
1402 	struct hif_ce_desc_event *hist_ev = NULL;
1403 	uint32_t index = 0;
1404 
1405 	hist_ev =
1406 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1407 
1408 	if (!hist_ev)
1409 		return QDF_STATUS_E_NOMEM;
1410 
1411 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1412 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1413 		event = &hist_ev[index];
1414 		event->data =
1415 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1416 		if (!event->data) {
1417 			hif_err_rl("ce debug data alloc failed");
1418 			return QDF_STATUS_E_NOMEM;
1419 		}
1420 	}
1421 	return QDF_STATUS_SUCCESS;
1422 }
1423 
1424 /**
1425  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1426  * the CE descriptors.
1427  * @scn: hif scn handle
1428  * ce_id: Copy Engine Id
1429  *
1430  * Return:
1431  */
1432 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1433 {
1434 	struct hif_ce_desc_event *event = NULL;
1435 	struct hif_ce_desc_event *hist_ev = NULL;
1436 	uint32_t index = 0;
1437 
1438 	hist_ev =
1439 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1440 
1441 	if (!hist_ev)
1442 		return;
1443 
1444 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1445 		event = &hist_ev[index];
1446 		if (event->data)
1447 			qdf_mem_free(event->data);
1448 		event->data = NULL;
1449 		event = NULL;
1450 	}
1451 
1452 }
1453 #endif /* HIF_CE_DEBUG_DATA_BUF */
1454 
1455 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
1456 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1457 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1458 
1459 /**
1460  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1461  * @scn: hif scn handle
1462  * @ce_id: Copy Engine Id
1463  * @src_nentries: source ce ring entries
1464  * Return: QDF_STATUS
1465  */
1466 static QDF_STATUS
1467 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1468 			   uint32_t src_nentries)
1469 {
1470 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1471 
1472 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1473 	ce_hist->enable[ce_id] = 1;
1474 
1475 	if (src_nentries)
1476 		alloc_mem_ce_debug_hist_data(scn, ce_id);
1477 	else
1478 		ce_hist->data_enable[ce_id] = false;
1479 
1480 	return QDF_STATUS_SUCCESS;
1481 }
1482 
1483 /**
1484  * free_mem_ce_debug_history() - Free CE descriptor history
1485  * @scn: hif scn handle
1486  * @ce_id: Copy Engine Id
1487  *
1488  * Return: None
1489  */
1490 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1491 {
1492 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1493 
1494 	ce_hist->enable[ce_id] = 0;
1495 	if (ce_hist->data_enable[ce_id]) {
1496 		ce_hist->data_enable[ce_id] = false;
1497 		free_mem_ce_debug_hist_data(scn, ce_id);
1498 	}
1499 	ce_hist->hist_ev[ce_id] = NULL;
1500 }
1501 #else
1502 static inline QDF_STATUS
1503 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1504 			   uint32_t src_nentries)
1505 {
1506 	return QDF_STATUS_SUCCESS;
1507 }
1508 
1509 static inline void
1510 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1511 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
1512 #else
1513 #if defined(HIF_CE_DEBUG_DATA_BUF)
1514 
1515 static QDF_STATUS
1516 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1517 			   uint32_t src_nentries)
1518 {
1519 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1520 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1521 
1522 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
1523 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1524 		return QDF_STATUS_E_NOMEM;
1525 	} else {
1526 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1527 		return QDF_STATUS_SUCCESS;
1528 	}
1529 }
1530 
1531 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1532 {
1533 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1534 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1535 
1536 	if (!hist_ev)
1537 		return;
1538 
1539 	if (ce_hist->data_enable[CE_id]) {
1540 		ce_hist->data_enable[CE_id] = false;
1541 		free_mem_ce_debug_hist_data(scn, CE_id);
1542 	}
1543 
1544 	ce_hist->enable[CE_id] = 0;
1545 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1546 	ce_hist->hist_ev[CE_id] = NULL;
1547 }
1548 
1549 #else
1550 
1551 static inline QDF_STATUS
1552 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1553 			   uint32_t src_nentries)
1554 {
1555 	return QDF_STATUS_SUCCESS;
1556 }
1557 
1558 static inline void
1559 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1560 #endif /* HIF_CE_DEBUG_DATA_BUF */
1561 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
1562 
1563 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1564 /**
1565  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1566  * CE records on the console using sysfs.
1567  * @scn: hif scn handle
1568  *
1569  * Return:
1570  */
1571 static inline void reset_ce_debug_history(struct hif_softc *scn)
1572 {
1573 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1574 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1575 	 * index. Disable data storing
1576 	 */
1577 	ce_hist->hist_index = 0;
1578 	ce_hist->hist_id = 0;
1579 }
1580 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1581 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
1582 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1583 
1584 void ce_enable_polling(void *cestate)
1585 {
1586 	struct CE_state *CE_state = (struct CE_state *)cestate;
1587 
1588 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1589 		CE_state->timer_inited = true;
1590 }
1591 
1592 void ce_disable_polling(void *cestate)
1593 {
1594 	struct CE_state *CE_state = (struct CE_state *)cestate;
1595 
1596 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1597 		CE_state->timer_inited = false;
1598 }
1599 
1600 /*
1601  * Initialize a Copy Engine based on caller-supplied attributes.
1602  * This may be called once to initialize both source and destination
1603  * rings or it may be called twice for separate source and destination
1604  * initialization. It may be that only one side or the other is
1605  * initialized by software/firmware.
1606  *
1607  * This should be called durring the initialization sequence before
1608  * interupts are enabled, so we don't have to worry about thread safety.
1609  */
1610 struct CE_handle *ce_init(struct hif_softc *scn,
1611 			  unsigned int CE_id, struct CE_attr *attr)
1612 {
1613 	struct CE_state *CE_state;
1614 	uint32_t ctrl_addr;
1615 	unsigned int nentries;
1616 	bool malloc_CE_state = false;
1617 	bool malloc_src_ring = false;
1618 	int status;
1619 
1620 	QDF_ASSERT(CE_id < scn->ce_count);
1621 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1622 	CE_state = scn->ce_id_to_state[CE_id];
1623 
1624 	if (!CE_state) {
1625 		CE_state =
1626 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1627 		if (!CE_state)
1628 			return NULL;
1629 
1630 		malloc_CE_state = true;
1631 		qdf_spinlock_create(&CE_state->ce_index_lock);
1632 
1633 		CE_state->id = CE_id;
1634 		CE_state->ctrl_addr = ctrl_addr;
1635 		CE_state->state = CE_RUNNING;
1636 		CE_state->attr_flags = attr->flags;
1637 	}
1638 	CE_state->scn = scn;
1639 	CE_state->service = ce_engine_service_reg;
1640 
1641 	qdf_atomic_init(&CE_state->rx_pending);
1642 	if (!attr) {
1643 		/* Already initialized; caller wants the handle */
1644 		return (struct CE_handle *)CE_state;
1645 	}
1646 
1647 	if (CE_state->src_sz_max)
1648 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1649 	else
1650 		CE_state->src_sz_max = attr->src_sz_max;
1651 
1652 	ce_init_ce_desc_event_log(scn, CE_id,
1653 				  attr->src_nentries + attr->dest_nentries);
1654 
1655 	/* source ring setup */
1656 	nentries = attr->src_nentries;
1657 	if (nentries) {
1658 		struct CE_ring_state *src_ring;
1659 
1660 		nentries = roundup_pwr2(nentries);
1661 		if (CE_state->src_ring) {
1662 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1663 		} else {
1664 			src_ring = CE_state->src_ring =
1665 				ce_alloc_ring_state(CE_state,
1666 						CE_RING_SRC,
1667 						nentries);
1668 			if (!src_ring) {
1669 				/* cannot allocate src ring. If the
1670 				 * CE_state is allocated locally free
1671 				 * CE_State and return error.
1672 				 */
1673 				hif_err("src ring has no mem");
1674 				if (malloc_CE_state) {
1675 					/* allocated CE_state locally */
1676 					qdf_mem_free(CE_state);
1677 					malloc_CE_state = false;
1678 				}
1679 				return NULL;
1680 			}
1681 			/* we can allocate src ring. Mark that the src ring is
1682 			 * allocated locally
1683 			 */
1684 			malloc_src_ring = true;
1685 
1686 			/*
1687 			 * Also allocate a shadow src ring in
1688 			 * regular mem to use for faster access.
1689 			 */
1690 			src_ring->shadow_base_unaligned =
1691 				qdf_mem_malloc(nentries *
1692 					       sizeof(struct CE_src_desc) +
1693 					       CE_DESC_RING_ALIGN);
1694 			if (!src_ring->shadow_base_unaligned)
1695 				goto error_no_dma_mem;
1696 
1697 			src_ring->shadow_base = (struct CE_src_desc *)
1698 				(((size_t) src_ring->shadow_base_unaligned +
1699 				CE_DESC_RING_ALIGN - 1) &
1700 				 ~(CE_DESC_RING_ALIGN - 1));
1701 
1702 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1703 					       src_ring, attr);
1704 			if (status < 0)
1705 				goto error_target_access;
1706 
1707 			ce_ring_test_initial_indexes(CE_id, src_ring,
1708 						     "src_ring");
1709 		}
1710 	}
1711 
1712 	/* destination ring setup */
1713 	nentries = attr->dest_nentries;
1714 	if (nentries) {
1715 		struct CE_ring_state *dest_ring;
1716 
1717 		nentries = roundup_pwr2(nentries);
1718 		if (CE_state->dest_ring) {
1719 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1720 		} else {
1721 			dest_ring = CE_state->dest_ring =
1722 				ce_alloc_ring_state(CE_state,
1723 						CE_RING_DEST,
1724 						nentries);
1725 			if (!dest_ring) {
1726 				/* cannot allocate dst ring. If the CE_state
1727 				 * or src ring is allocated locally free
1728 				 * CE_State and src ring and return error.
1729 				 */
1730 				hif_err("dest ring has no mem");
1731 				goto error_no_dma_mem;
1732 			}
1733 
1734 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1735 				      dest_ring, attr);
1736 			if (status < 0)
1737 				goto error_target_access;
1738 
1739 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1740 						     "dest_ring");
1741 
1742 			/* For srng based target, init status ring here */
1743 			if (ce_srng_based(CE_state->scn)) {
1744 				CE_state->status_ring =
1745 					ce_alloc_ring_state(CE_state,
1746 							CE_RING_STATUS,
1747 							nentries);
1748 				if (!CE_state->status_ring) {
1749 					/*Allocation failed. Cleanup*/
1750 					qdf_mem_free(CE_state->dest_ring);
1751 					if (malloc_src_ring) {
1752 						qdf_mem_free
1753 							(CE_state->src_ring);
1754 						CE_state->src_ring = NULL;
1755 						malloc_src_ring = false;
1756 					}
1757 					if (malloc_CE_state) {
1758 						/* allocated CE_state locally */
1759 						scn->ce_id_to_state[CE_id] =
1760 							NULL;
1761 						qdf_mem_free(CE_state);
1762 						malloc_CE_state = false;
1763 					}
1764 
1765 					return NULL;
1766 				}
1767 
1768 				status = ce_ring_setup(scn, CE_RING_STATUS,
1769 					       CE_id, CE_state->status_ring,
1770 					       attr);
1771 				if (status < 0)
1772 					goto error_target_access;
1773 
1774 			}
1775 
1776 			/* epping */
1777 			/* poll timer */
1778 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1779 				qdf_timer_init(scn->qdf_dev,
1780 						&CE_state->poll_timer,
1781 						ce_poll_timeout,
1782 						CE_state,
1783 						QDF_TIMER_TYPE_WAKE_APPS);
1784 				ce_enable_polling(CE_state);
1785 				qdf_timer_mod(&CE_state->poll_timer,
1786 						      CE_POLL_TIMEOUT);
1787 			}
1788 		}
1789 	}
1790 
1791 	if (!ce_srng_based(scn)) {
1792 		/* Enable CE error interrupts */
1793 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1794 			goto error_target_access;
1795 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1796 		if (Q_TARGET_ACCESS_END(scn) < 0)
1797 			goto error_target_access;
1798 	}
1799 
1800 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1801 			ce_oom_recovery, CE_state);
1802 
1803 	/* update the htt_data attribute */
1804 	ce_mark_datapath(CE_state);
1805 	scn->ce_id_to_state[CE_id] = CE_state;
1806 
1807 	alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
1808 
1809 	return (struct CE_handle *)CE_state;
1810 
1811 error_target_access:
1812 error_no_dma_mem:
1813 	ce_fini((struct CE_handle *)CE_state);
1814 	return NULL;
1815 }
1816 
1817 /**
1818  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1819  * @hif_ctx: HIF Context
1820  *
1821  * API to check if polling is enabled on all CEs. Returns true when polling
1822  * is enabled on all CEs.
1823  *
1824  * Return: bool
1825  */
1826 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1827 {
1828 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1829 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1830 	struct CE_attr *attr;
1831 	int id;
1832 
1833 	for (id = 0; id < scn->ce_count; id++) {
1834 		attr = &hif_state->host_ce_config[id];
1835 		if (attr && (attr->dest_nentries) &&
1836 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1837 			return false;
1838 	}
1839 	return true;
1840 }
1841 qdf_export_symbol(hif_is_polled_mode_enabled);
1842 
1843 static int hif_get_pktlog_ce_num(struct hif_softc *scn)
1844 {
1845 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1846 	int id;
1847 
1848 	for (id = 0; id < hif_state->sz_tgt_svc_map; id++) {
1849 		if (hif_state->tgt_svc_map[id].service_id ==  PACKET_LOG_SVC)
1850 			return hif_state->tgt_svc_map[id].pipenum;
1851 	}
1852 	return -EINVAL;
1853 }
1854 
1855 #ifdef WLAN_FEATURE_FASTPATH
1856 /**
1857  * hif_enable_fastpath() Update that we have enabled fastpath mode
1858  * @hif_ctx: HIF context
1859  *
1860  * For use in data path
1861  *
1862  * Retrun: void
1863  */
1864 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1865 {
1866 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1867 
1868 	if (ce_srng_based(scn)) {
1869 		hif_warn("srng rings do not support fastpath");
1870 		return;
1871 	}
1872 	hif_debug("Enabling fastpath mode");
1873 	scn->fastpath_mode_on = true;
1874 }
1875 
1876 /**
1877  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1878  * @hif_ctx: HIF Context
1879  *
1880  * For use in data path to skip HTC
1881  *
1882  * Return: bool
1883  */
1884 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1885 {
1886 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1887 
1888 	return scn->fastpath_mode_on;
1889 }
1890 
1891 /**
1892  * hif_get_ce_handle - API to get CE handle for FastPath mode
1893  * @hif_ctx: HIF Context
1894  * @id: CopyEngine Id
1895  *
1896  * API to return CE handle for fastpath mode
1897  *
1898  * Return: void
1899  */
1900 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1901 {
1902 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1903 
1904 	return scn->ce_id_to_state[id];
1905 }
1906 qdf_export_symbol(hif_get_ce_handle);
1907 
1908 /**
1909  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1910  * No processing is required inside this function.
1911  * @ce_hdl: Cope engine handle
1912  * Using an assert, this function makes sure that,
1913  * the TX CE has been processed completely.
1914  *
1915  * This is called while dismantling CE structures. No other thread
1916  * should be using these structures while dismantling is occurring
1917  * therfore no locking is needed.
1918  *
1919  * Return: none
1920  */
1921 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1922 {
1923 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1924 	struct CE_ring_state *src_ring = ce_state->src_ring;
1925 	struct hif_softc *sc = ce_state->scn;
1926 	uint32_t sw_index, write_index;
1927 
1928 	if (hif_is_nss_wifi_enabled(sc))
1929 		return;
1930 
1931 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1932 		hif_debug("Fastpath mode ON, Cleaning up HTT Tx CE");
1933 		sw_index = src_ring->sw_index;
1934 		write_index = src_ring->sw_index;
1935 
1936 		/* At this point Tx CE should be clean */
1937 		qdf_assert_always(sw_index == write_index);
1938 	}
1939 }
1940 
1941 /**
1942  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1943  * @ce_hdl: Handle to CE
1944  *
1945  * These buffers are never allocated on the fly, but
1946  * are allocated only once during HIF start and freed
1947  * only once during HIF stop.
1948  * NOTE:
1949  * The assumption here is there is no in-flight DMA in progress
1950  * currently, so that buffers can be freed up safely.
1951  *
1952  * Return: NONE
1953  */
1954 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1955 {
1956 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1957 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1958 	qdf_nbuf_t nbuf;
1959 	int i;
1960 
1961 	if (ce_state->scn->fastpath_mode_on == false)
1962 		return;
1963 
1964 	if (!ce_state->htt_rx_data)
1965 		return;
1966 
1967 	/*
1968 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1969 	 * this CE is completely full: does not leave one blank space, to
1970 	 * distinguish between empty queue & full queue. So free all the
1971 	 * entries.
1972 	 */
1973 	for (i = 0; i < dst_ring->nentries; i++) {
1974 		nbuf = dst_ring->per_transfer_context[i];
1975 
1976 		/*
1977 		 * The reasons for doing this check are:
1978 		 * 1) Protect against calling cleanup before allocating buffers
1979 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1980 		 *    could have a partially filled ring, because of a memory
1981 		 *    allocation failure in the middle of allocating ring.
1982 		 *    This check accounts for that case, checking
1983 		 *    fastpath_mode_on flag or started flag would not have
1984 		 *    covered that case. This is not in performance path,
1985 		 *    so OK to do this.
1986 		 */
1987 		if (nbuf) {
1988 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1989 					      QDF_DMA_FROM_DEVICE);
1990 			qdf_nbuf_free(nbuf);
1991 		}
1992 	}
1993 }
1994 
1995 /**
1996  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1997  * @scn: HIF handle
1998  *
1999  * Datapath Rx CEs are special case, where we reuse all the message buffers.
2000  * Hence we have to post all the entries in the pipe, even, in the beginning
2001  * unlike for other CE pipes where one less than dest_nentries are filled in
2002  * the beginning.
2003  *
2004  * Return: None
2005  */
2006 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2007 {
2008 	int pipe_num;
2009 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2010 
2011 	if (scn->fastpath_mode_on == false)
2012 		return;
2013 
2014 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2015 		struct HIF_CE_pipe_info *pipe_info =
2016 			&hif_state->pipe_info[pipe_num];
2017 		struct CE_state *ce_state =
2018 			scn->ce_id_to_state[pipe_info->pipe_num];
2019 
2020 		if (ce_state->htt_rx_data)
2021 			atomic_inc(&pipe_info->recv_bufs_needed);
2022 	}
2023 }
2024 #else
2025 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
2026 {
2027 }
2028 
2029 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
2030 {
2031 	return false;
2032 }
2033 #endif /* WLAN_FEATURE_FASTPATH */
2034 
2035 void ce_fini(struct CE_handle *copyeng)
2036 {
2037 	struct CE_state *CE_state = (struct CE_state *)copyeng;
2038 	unsigned int CE_id = CE_state->id;
2039 	struct hif_softc *scn = CE_state->scn;
2040 	uint32_t desc_size;
2041 
2042 	bool inited = CE_state->timer_inited;
2043 	CE_state->state = CE_UNUSED;
2044 	scn->ce_id_to_state[CE_id] = NULL;
2045 	/* Set the flag to false first to stop processing in ce_poll_timeout */
2046 	ce_disable_polling(CE_state);
2047 
2048 	qdf_lro_deinit(CE_state->lro_data);
2049 
2050 	if (CE_state->src_ring) {
2051 		/* Cleanup the datapath Tx ring */
2052 		ce_h2t_tx_ce_cleanup(copyeng);
2053 
2054 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2055 		if (CE_state->src_ring->shadow_base_unaligned)
2056 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2057 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2058 			ce_free_desc_ring(scn, CE_state->id,
2059 					  CE_state->src_ring,
2060 					  desc_size);
2061 		qdf_mem_free(CE_state->src_ring);
2062 	}
2063 	if (CE_state->dest_ring) {
2064 		/* Cleanup the datapath Rx ring */
2065 		ce_t2h_msg_ce_cleanup(copyeng);
2066 
2067 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
2068 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
2069 			ce_free_desc_ring(scn, CE_state->id,
2070 					  CE_state->dest_ring,
2071 					  desc_size);
2072 		qdf_mem_free(CE_state->dest_ring);
2073 
2074 		/* epping */
2075 		if (inited) {
2076 			qdf_timer_free(&CE_state->poll_timer);
2077 		}
2078 	}
2079 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
2080 		/* Cleanup the datapath Tx ring */
2081 		ce_h2t_tx_ce_cleanup(copyeng);
2082 
2083 		if (CE_state->status_ring->shadow_base_unaligned)
2084 			qdf_mem_free(
2085 				CE_state->status_ring->shadow_base_unaligned);
2086 
2087 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
2088 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
2089 			ce_free_desc_ring(scn, CE_state->id,
2090 					  CE_state->status_ring,
2091 					  desc_size);
2092 		qdf_mem_free(CE_state->status_ring);
2093 	}
2094 
2095 	free_mem_ce_debug_history(scn, CE_id);
2096 	reset_ce_debug_history(scn);
2097 	ce_deinit_ce_desc_event_log(scn, CE_id);
2098 
2099 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2100 	qdf_mem_free(CE_state);
2101 }
2102 
2103 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2104 {
2105 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2106 
2107 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2108 		  sizeof(hif_state->msg_callbacks_pending));
2109 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2110 		  sizeof(hif_state->msg_callbacks_current));
2111 }
2112 
2113 /* Send the first nbytes bytes of the buffer */
2114 QDF_STATUS
2115 hif_send_head(struct hif_opaque_softc *hif_ctx,
2116 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2117 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2118 {
2119 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2120 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2121 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2122 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2123 	int bytes = nbytes, nfrags = 0;
2124 	struct ce_sendlist sendlist;
2125 	int i = 0;
2126 	QDF_STATUS status;
2127 	unsigned int mux_id = 0;
2128 
2129 	if (nbytes > qdf_nbuf_len(nbuf)) {
2130 		hif_err("nbytes: %d nbuf_len: %d", nbytes,
2131 		       (uint32_t)qdf_nbuf_len(nbuf));
2132 		QDF_ASSERT(0);
2133 	}
2134 
2135 	transfer_id =
2136 		(mux_id & MUX_ID_MASK) |
2137 		(transfer_id & TRANSACTION_ID_MASK);
2138 	data_attr &= DESC_DATA_FLAG_MASK;
2139 	/*
2140 	 * The common case involves sending multiple fragments within a
2141 	 * single download (the tx descriptor and the tx frame header).
2142 	 * So, optimize for the case of multiple fragments by not even
2143 	 * checking whether it's necessary to use a sendlist.
2144 	 * The overhead of using a sendlist for a single buffer download
2145 	 * is not a big deal, since it happens rarely (for WMI messages).
2146 	 */
2147 	ce_sendlist_init(&sendlist);
2148 	do {
2149 		qdf_dma_addr_t frag_paddr;
2150 		int frag_bytes;
2151 
2152 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2153 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2154 		/*
2155 		 * Clear the packet offset for all but the first CE desc.
2156 		 */
2157 		if (i++ > 0)
2158 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2159 
2160 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2161 				    frag_bytes >
2162 				    bytes ? bytes : frag_bytes,
2163 				    qdf_nbuf_get_frag_is_wordstream
2164 				    (nbuf,
2165 				    nfrags) ? 0 :
2166 				    CE_SEND_FLAG_SWAP_DISABLE,
2167 				    data_attr);
2168 		if (status != QDF_STATUS_SUCCESS) {
2169 			hif_err("frag_num: %d larger than limit (status=%d)",
2170 			       nfrags, status);
2171 			return status;
2172 		}
2173 		bytes -= frag_bytes;
2174 		nfrags++;
2175 	} while (bytes > 0);
2176 
2177 	/* Make sure we have resources to handle this request */
2178 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2179 	if (pipe_info->num_sends_allowed < nfrags) {
2180 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2181 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2182 		return QDF_STATUS_E_RESOURCES;
2183 	}
2184 	pipe_info->num_sends_allowed -= nfrags;
2185 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2186 
2187 	if (qdf_unlikely(!ce_hdl)) {
2188 		hif_err("CE handle is null");
2189 		return A_ERROR;
2190 	}
2191 
2192 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2193 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2194 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2195 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2196 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2197 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2198 
2199 	return status;
2200 }
2201 
2202 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2203 								int force)
2204 {
2205 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2206 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2207 
2208 	if (!force) {
2209 		int resources;
2210 		/*
2211 		 * Decide whether to actually poll for completions, or just
2212 		 * wait for a later chance. If there seem to be plenty of
2213 		 * resources left, then just wait, since checking involves
2214 		 * reading a CE register, which is a relatively expensive
2215 		 * operation.
2216 		 */
2217 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2218 		/*
2219 		 * If at least 50% of the total resources are still available,
2220 		 * don't bother checking again yet.
2221 		 */
2222 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2223 									 1))
2224 			return;
2225 	}
2226 #if ATH_11AC_TXCOMPACT
2227 	ce_per_engine_servicereap(scn, pipe);
2228 #else
2229 	ce_per_engine_service(scn, pipe);
2230 #endif
2231 }
2232 
2233 uint16_t
2234 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2235 {
2236 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2237 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2238 	uint16_t rv;
2239 
2240 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2241 	rv = pipe_info->num_sends_allowed;
2242 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2243 	return rv;
2244 }
2245 
2246 /* Called by lower (CE) layer when a send to Target completes. */
2247 static void
2248 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2249 		     void *transfer_context, qdf_dma_addr_t CE_data,
2250 		     unsigned int nbytes, unsigned int transfer_id,
2251 		     unsigned int sw_index, unsigned int hw_index,
2252 		     unsigned int toeplitz_hash_result)
2253 {
2254 	struct HIF_CE_pipe_info *pipe_info =
2255 		(struct HIF_CE_pipe_info *)ce_context;
2256 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2257 	struct hif_msg_callbacks *msg_callbacks =
2258 		&pipe_info->pipe_callbacks;
2259 
2260 	do {
2261 		/*
2262 		 * The upper layer callback will be triggered
2263 		 * when last fragment is complteted.
2264 		 */
2265 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2266 			msg_callbacks->txCompletionHandler(
2267 				msg_callbacks->Context,
2268 				transfer_context, transfer_id,
2269 				toeplitz_hash_result);
2270 
2271 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2272 		pipe_info->num_sends_allowed++;
2273 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2274 	} while (ce_completed_send_next(copyeng,
2275 			&ce_context, &transfer_context,
2276 			&CE_data, &nbytes, &transfer_id,
2277 			&sw_idx, &hw_idx,
2278 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2279 }
2280 
2281 /**
2282  * hif_ce_do_recv(): send message from copy engine to upper layers
2283  * @msg_callbacks: structure containing callback and callback context
2284  * @netbuff: skb containing message
2285  * @nbytes: number of bytes in the message
2286  * @pipe_info: used for the pipe_number info
2287  *
2288  * Checks the packet length, configures the length in the netbuff,
2289  * and calls the upper layer callback.
2290  *
2291  * return: None
2292  */
2293 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2294 		qdf_nbuf_t netbuf, int nbytes,
2295 		struct HIF_CE_pipe_info *pipe_info) {
2296 	if (nbytes <= pipe_info->buf_sz) {
2297 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2298 		msg_callbacks->
2299 			rxCompletionHandler(msg_callbacks->Context,
2300 					netbuf, pipe_info->pipe_num);
2301 	} else {
2302 		hif_err("Invalid Rx msg buf: %pK nbytes: %d", netbuf, nbytes);
2303 		qdf_nbuf_free(netbuf);
2304 	}
2305 }
2306 
2307 /* Called by lower (CE) layer when data is received from the Target. */
2308 static void
2309 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2310 		     void *transfer_context, qdf_dma_addr_t CE_data,
2311 		     unsigned int nbytes, unsigned int transfer_id,
2312 		     unsigned int flags)
2313 {
2314 	struct HIF_CE_pipe_info *pipe_info =
2315 		(struct HIF_CE_pipe_info *)ce_context;
2316 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2317 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2318 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2319 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2320 	struct hif_msg_callbacks *msg_callbacks =
2321 		 &pipe_info->pipe_callbacks;
2322 
2323 	do {
2324 		hif_pm_runtime_mark_last_busy(hif_ctx);
2325 		qdf_nbuf_unmap_single(scn->qdf_dev,
2326 				      (qdf_nbuf_t) transfer_context,
2327 				      QDF_DMA_FROM_DEVICE);
2328 
2329 		atomic_inc(&pipe_info->recv_bufs_needed);
2330 		hif_post_recv_buffers_for_pipe(pipe_info);
2331 		if (scn->target_status == TARGET_STATUS_RESET)
2332 			qdf_nbuf_free(transfer_context);
2333 		else
2334 			hif_ce_do_recv(msg_callbacks, transfer_context,
2335 				nbytes, pipe_info);
2336 
2337 		/* Set up force_break flag if num of receices reaches
2338 		 * MAX_NUM_OF_RECEIVES
2339 		 */
2340 		ce_state->receive_count++;
2341 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2342 			ce_state->force_break = 1;
2343 			break;
2344 		}
2345 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2346 					&CE_data, &nbytes, &transfer_id,
2347 					&flags) == QDF_STATUS_SUCCESS);
2348 
2349 }
2350 
2351 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2352 
2353 void
2354 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2355 	      struct hif_msg_callbacks *callbacks)
2356 {
2357 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2358 
2359 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2360 	spin_lock_init(&pcie_access_log_lock);
2361 #endif
2362 	/* Save callbacks for later installation */
2363 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2364 		 sizeof(hif_state->msg_callbacks_pending));
2365 
2366 }
2367 
2368 static int hif_completion_thread_startup_by_ceid(struct HIF_CE_state *hif_state,
2369 						 int pipe_num)
2370 {
2371 	struct CE_attr attr;
2372 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2373 	struct hif_msg_callbacks *hif_msg_callbacks =
2374 		&hif_state->msg_callbacks_current;
2375 	struct HIF_CE_pipe_info *pipe_info;
2376 	struct CE_state *ce_state;
2377 
2378 	if (pipe_num >= CE_COUNT_MAX)
2379 		return -EINVAL;
2380 
2381 	pipe_info = &hif_state->pipe_info[pipe_num];
2382 	ce_state = scn->ce_id_to_state[pipe_num];
2383 
2384 	if (!hif_msg_callbacks ||
2385 	    !hif_msg_callbacks->rxCompletionHandler ||
2386 	    !hif_msg_callbacks->txCompletionHandler) {
2387 		hif_err("%s: no completion handler registered", __func__);
2388 		return -EFAULT;
2389 	}
2390 
2391 	attr = hif_state->host_ce_config[pipe_num];
2392 	if (attr.src_nentries) {
2393 		/* pipe used to send to target */
2394 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
2395 			  __func__, pipe_num, pipe_info);
2396 		ce_send_cb_register(pipe_info->ce_hdl,
2397 				    hif_pci_ce_send_done, pipe_info,
2398 				    attr.flags & CE_ATTR_DISABLE_INTR);
2399 		pipe_info->num_sends_allowed = attr.src_nentries - 1;
2400 	}
2401 	if (attr.dest_nentries) {
2402 		hif_debug("%s: pipe_num:%d pipe_info:0x%pK\n",
2403 			  __func__, pipe_num, pipe_info);
2404 		/* pipe used to receive from target */
2405 		ce_recv_cb_register(pipe_info->ce_hdl,
2406 				    hif_pci_ce_recv_data, pipe_info,
2407 				    attr.flags & CE_ATTR_DISABLE_INTR);
2408 	}
2409 
2410 	if (attr.src_nentries)
2411 		qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2412 
2413 	if (!(ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND))
2414 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2415 			     sizeof(pipe_info->pipe_callbacks));
2416 
2417 	return 0;
2418 }
2419 
2420 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2421 {
2422 	struct CE_handle *ce_diag = hif_state->ce_diag;
2423 	int pipe_num, ret;
2424 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2425 
2426 	/* daemonize("hif_compl_thread"); */
2427 
2428 	if (scn->ce_count == 0) {
2429 		hif_err("ce_count is 0");
2430 		return -EINVAL;
2431 	}
2432 
2433 
2434 	A_TARGET_ACCESS_LIKELY(scn);
2435 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2436 		struct HIF_CE_pipe_info *pipe_info;
2437 
2438 		pipe_info = &hif_state->pipe_info[pipe_num];
2439 		if (pipe_info->ce_hdl == ce_diag)
2440 			continue;       /* Handle Diagnostic CE specially */
2441 
2442 		ret = hif_completion_thread_startup_by_ceid(hif_state,
2443 							    pipe_num);
2444 		if (ret < 0)
2445 			return ret;
2446 
2447 	}
2448 
2449 	A_TARGET_ACCESS_UNLIKELY(scn);
2450 	return 0;
2451 }
2452 
2453 /*
2454  * Install pending msg callbacks.
2455  *
2456  * TBDXXX: This hack is needed because upper layers install msg callbacks
2457  * for use with HTC before BMI is done; yet this HIF implementation
2458  * needs to continue to use BMI msg callbacks. Really, upper layers
2459  * should not register HTC callbacks until AFTER BMI phase.
2460  */
2461 static void hif_msg_callbacks_install(struct hif_softc *scn)
2462 {
2463 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2464 
2465 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2466 		 &hif_state->msg_callbacks_pending,
2467 		 sizeof(hif_state->msg_callbacks_pending));
2468 }
2469 
2470 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2471 							uint8_t *DLPipe)
2472 {
2473 	int ul_is_polled, dl_is_polled;
2474 
2475 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2476 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2477 }
2478 
2479 /**
2480  * hif_dump_pipe_debug_count() - Log error count
2481  * @scn: hif_softc pointer.
2482  *
2483  * Output the pipe error counts of each pipe to log file
2484  *
2485  * Return: N/A
2486  */
2487 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2488 {
2489 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2490 	int pipe_num;
2491 
2492 	if (!hif_state) {
2493 		hif_err("hif_state is NULL");
2494 		return;
2495 	}
2496 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2497 		struct HIF_CE_pipe_info *pipe_info;
2498 
2499 	pipe_info = &hif_state->pipe_info[pipe_num];
2500 
2501 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2502 			pipe_info->nbuf_dma_err_count > 0 ||
2503 			pipe_info->nbuf_ce_enqueue_err_count)
2504 		hif_err(
2505 			"pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2506 			pipe_info->pipe_num,
2507 			atomic_read(&pipe_info->recv_bufs_needed),
2508 			pipe_info->nbuf_alloc_err_count,
2509 			pipe_info->nbuf_dma_err_count,
2510 			pipe_info->nbuf_ce_enqueue_err_count);
2511 	}
2512 }
2513 
2514 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2515 					  void *nbuf, uint32_t *error_cnt,
2516 					  enum hif_ce_event_type failure_type,
2517 					  const char *failure_type_string)
2518 {
2519 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2520 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2521 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2522 	int ce_id = CE_state->id;
2523 	uint32_t error_cnt_tmp;
2524 
2525 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2526 	error_cnt_tmp = ++(*error_cnt);
2527 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2528 	hif_debug("pipe_num: %d, needed: %d, err_cnt: %u, fail_type: %s",
2529 		  pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2530 		  failure_type_string);
2531 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2532 				 NULL, nbuf, bufs_needed_tmp, 0);
2533 	/* if we fail to allocate the last buffer for an rx pipe,
2534 	 *	there is no trigger to refill the ce and we will
2535 	 *	eventually crash
2536 	 */
2537 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
2538 	    (ce_srng_based(scn) &&
2539 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
2540 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2541 
2542 }
2543 
2544 
2545 
2546 
2547 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2548 {
2549 	struct CE_handle *ce_hdl;
2550 	qdf_size_t buf_sz;
2551 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2552 	QDF_STATUS status;
2553 	uint32_t bufs_posted = 0;
2554 	unsigned int ce_id;
2555 
2556 	buf_sz = pipe_info->buf_sz;
2557 	if (buf_sz == 0) {
2558 		/* Unused Copy Engine */
2559 		return QDF_STATUS_SUCCESS;
2560 	}
2561 
2562 	ce_hdl = pipe_info->ce_hdl;
2563 	ce_id = ((struct CE_state *)ce_hdl)->id;
2564 
2565 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2566 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2567 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2568 		qdf_nbuf_t nbuf;
2569 
2570 		atomic_dec(&pipe_info->recv_bufs_needed);
2571 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2572 
2573 		hif_record_ce_desc_event(scn, ce_id,
2574 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
2575 					 0, 0);
2576 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2577 		if (!nbuf) {
2578 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2579 					&pipe_info->nbuf_alloc_err_count,
2580 					 HIF_RX_NBUF_ALLOC_FAILURE,
2581 					"HIF_RX_NBUF_ALLOC_FAILURE");
2582 			return QDF_STATUS_E_NOMEM;
2583 		}
2584 
2585 		hif_record_ce_desc_event(scn, ce_id,
2586 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
2587 					 0, 0);
2588 		/*
2589 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2590 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2591 		 * DMA_FROM_DEVICE);
2592 		 */
2593 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2594 					    QDF_DMA_FROM_DEVICE);
2595 
2596 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2597 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2598 					&pipe_info->nbuf_dma_err_count,
2599 					 HIF_RX_NBUF_MAP_FAILURE,
2600 					"HIF_RX_NBUF_MAP_FAILURE");
2601 			qdf_nbuf_free(nbuf);
2602 			return status;
2603 		}
2604 
2605 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2606 		hif_record_ce_desc_event(scn, ce_id,
2607 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
2608 					 0, 0);
2609 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2610 					       buf_sz, DMA_FROM_DEVICE);
2611 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2612 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2613 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2614 					&pipe_info->nbuf_ce_enqueue_err_count,
2615 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2616 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2617 
2618 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2619 						QDF_DMA_FROM_DEVICE);
2620 			qdf_nbuf_free(nbuf);
2621 			return status;
2622 		}
2623 
2624 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2625 		bufs_posted++;
2626 	}
2627 	pipe_info->nbuf_alloc_err_count =
2628 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2629 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2630 	pipe_info->nbuf_dma_err_count =
2631 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2632 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2633 	pipe_info->nbuf_ce_enqueue_err_count =
2634 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2635 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2636 
2637 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2638 
2639 	return QDF_STATUS_SUCCESS;
2640 }
2641 
2642 /*
2643  * Try to post all desired receive buffers for all pipes.
2644  * Returns 0 for non fastpath rx copy engine as
2645  * oom_allocation_work will be scheduled to recover any
2646  * failures, non-zero if unable to completely replenish
2647  * receive buffers for fastpath rx Copy engine.
2648  */
2649 static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2650 {
2651 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2652 	int pipe_num;
2653 	struct CE_state *ce_state = NULL;
2654 	QDF_STATUS qdf_status;
2655 
2656 	A_TARGET_ACCESS_LIKELY(scn);
2657 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2658 		struct HIF_CE_pipe_info *pipe_info;
2659 
2660 		ce_state = scn->ce_id_to_state[pipe_num];
2661 		pipe_info = &hif_state->pipe_info[pipe_num];
2662 
2663 		if (!ce_state)
2664 			continue;
2665 
2666 		/* Do not init dynamic CEs, during initial load */
2667 		if (ce_state->attr_flags & CE_ATTR_INIT_ON_DEMAND)
2668 			continue;
2669 
2670 		if (hif_is_nss_wifi_enabled(scn) &&
2671 		    ce_state && (ce_state->htt_rx_data))
2672 			continue;
2673 
2674 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2675 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2676 			ce_state->htt_rx_data &&
2677 			scn->fastpath_mode_on) {
2678 			A_TARGET_ACCESS_UNLIKELY(scn);
2679 			return qdf_status;
2680 		}
2681 	}
2682 
2683 	A_TARGET_ACCESS_UNLIKELY(scn);
2684 
2685 	return QDF_STATUS_SUCCESS;
2686 }
2687 
2688 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2689 {
2690 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2691 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2692 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2693 
2694 	hif_update_fastpath_recv_bufs_cnt(scn);
2695 
2696 	hif_msg_callbacks_install(scn);
2697 
2698 	if (hif_completion_thread_startup(hif_state))
2699 		return QDF_STATUS_E_FAILURE;
2700 
2701 	/* enable buffer cleanup */
2702 	hif_state->started = true;
2703 
2704 	/* Post buffers once to start things off. */
2705 	qdf_status = hif_post_recv_buffers(scn);
2706 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2707 		/* cleanup is done in hif_ce_disable */
2708 		hif_err("Failed to post buffers");
2709 		return qdf_status;
2710 	}
2711 
2712 	return qdf_status;
2713 }
2714 
2715 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2716 {
2717 	struct hif_softc *scn;
2718 	struct CE_handle *ce_hdl;
2719 	uint32_t buf_sz;
2720 	struct HIF_CE_state *hif_state;
2721 	qdf_nbuf_t netbuf;
2722 	qdf_dma_addr_t CE_data;
2723 	void *per_CE_context;
2724 
2725 	buf_sz = pipe_info->buf_sz;
2726 	/* Unused Copy Engine */
2727 	if (buf_sz == 0)
2728 		return;
2729 
2730 
2731 	hif_state = pipe_info->HIF_CE_state;
2732 	if (!hif_state->started)
2733 		return;
2734 
2735 	scn = HIF_GET_SOFTC(hif_state);
2736 	ce_hdl = pipe_info->ce_hdl;
2737 
2738 	if (!scn->qdf_dev)
2739 		return;
2740 	while (ce_revoke_recv_next
2741 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2742 			&CE_data) == QDF_STATUS_SUCCESS) {
2743 		if (netbuf) {
2744 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2745 					      QDF_DMA_FROM_DEVICE);
2746 			qdf_nbuf_free(netbuf);
2747 		}
2748 	}
2749 }
2750 
2751 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2752 {
2753 	struct CE_handle *ce_hdl;
2754 	struct HIF_CE_state *hif_state;
2755 	struct hif_softc *scn;
2756 	qdf_nbuf_t netbuf;
2757 	void *per_CE_context;
2758 	qdf_dma_addr_t CE_data;
2759 	unsigned int nbytes;
2760 	unsigned int id;
2761 	uint32_t buf_sz;
2762 	uint32_t toeplitz_hash_result;
2763 
2764 	buf_sz = pipe_info->buf_sz;
2765 	if (buf_sz == 0) {
2766 		/* Unused Copy Engine */
2767 		return;
2768 	}
2769 
2770 	hif_state = pipe_info->HIF_CE_state;
2771 	if (!hif_state->started) {
2772 		return;
2773 	}
2774 
2775 	scn = HIF_GET_SOFTC(hif_state);
2776 
2777 	ce_hdl = pipe_info->ce_hdl;
2778 
2779 	while (ce_cancel_send_next
2780 		       (ce_hdl, &per_CE_context,
2781 		       (void **)&netbuf, &CE_data, &nbytes,
2782 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2783 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2784 			/*
2785 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2786 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2787 			 * freed in htt_htc_misc_pkt_pool_free() in
2788 			 * wlantl_close(), so do not free them here again
2789 			 * by checking whether it's the endpoint
2790 			 * which they are queued in.
2791 			 */
2792 			if (id == scn->htc_htt_tx_endpoint)
2793 				return;
2794 			/* Indicate the completion to higher
2795 			 * layer to free the buffer
2796 			 */
2797 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2798 				pipe_info->pipe_callbacks.
2799 				    txCompletionHandler(pipe_info->
2800 					    pipe_callbacks.Context,
2801 					    netbuf, id, toeplitz_hash_result);
2802 		}
2803 	}
2804 }
2805 
2806 /*
2807  * Cleanup residual buffers for device shutdown:
2808  *    buffers that were enqueued for receive
2809  *    buffers that were to be sent
2810  * Note: Buffers that had completed but which were
2811  * not yet processed are on a completion queue. They
2812  * are handled when the completion thread shuts down.
2813  */
2814 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2815 {
2816 	int pipe_num;
2817 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2818 	struct CE_state *ce_state;
2819 
2820 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2821 		struct HIF_CE_pipe_info *pipe_info;
2822 
2823 		ce_state = scn->ce_id_to_state[pipe_num];
2824 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2825 				((ce_state->htt_tx_data) ||
2826 				 (ce_state->htt_rx_data))) {
2827 			continue;
2828 		}
2829 
2830 		pipe_info = &hif_state->pipe_info[pipe_num];
2831 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2832 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2833 	}
2834 }
2835 
2836 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2837 {
2838 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2839 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2840 
2841 	hif_buffer_cleanup(hif_state);
2842 }
2843 
2844 static void hif_destroy_oom_work(struct hif_softc *scn)
2845 {
2846 	struct CE_state *ce_state;
2847 	int ce_id;
2848 
2849 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2850 		ce_state = scn->ce_id_to_state[ce_id];
2851 		if (ce_state)
2852 			qdf_destroy_work(scn->qdf_dev,
2853 					 &ce_state->oom_allocation_work);
2854 	}
2855 }
2856 
2857 void hif_ce_stop(struct hif_softc *scn)
2858 {
2859 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2860 	int pipe_num;
2861 
2862 	/*
2863 	 * before cleaning up any memory, ensure irq &
2864 	 * bottom half contexts will not be re-entered
2865 	 */
2866 	hif_disable_isr(&scn->osc);
2867 	hif_destroy_oom_work(scn);
2868 	scn->hif_init_done = false;
2869 
2870 	/*
2871 	 * At this point, asynchronous threads are stopped,
2872 	 * The Target should not DMA nor interrupt, Host code may
2873 	 * not initiate anything more.  So we just need to clean
2874 	 * up Host-side state.
2875 	 */
2876 
2877 	if (scn->athdiag_procfs_inited) {
2878 		athdiag_procfs_remove();
2879 		scn->athdiag_procfs_inited = false;
2880 	}
2881 
2882 	hif_buffer_cleanup(hif_state);
2883 
2884 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2885 		struct HIF_CE_pipe_info *pipe_info;
2886 		struct CE_attr attr;
2887 		struct CE_handle *ce_diag = hif_state->ce_diag;
2888 
2889 		pipe_info = &hif_state->pipe_info[pipe_num];
2890 		if (pipe_info->ce_hdl) {
2891 			if (pipe_info->ce_hdl != ce_diag &&
2892 			    hif_state->started) {
2893 				attr = hif_state->host_ce_config[pipe_num];
2894 				if (attr.src_nentries)
2895 					qdf_spinlock_destroy(&pipe_info->
2896 							completion_freeq_lock);
2897 			}
2898 			ce_fini(pipe_info->ce_hdl);
2899 			pipe_info->ce_hdl = NULL;
2900 			pipe_info->buf_sz = 0;
2901 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2902 		}
2903 	}
2904 
2905 	if (hif_state->sleep_timer_init) {
2906 		qdf_timer_stop(&hif_state->sleep_timer);
2907 		qdf_timer_free(&hif_state->sleep_timer);
2908 		hif_state->sleep_timer_init = false;
2909 	}
2910 
2911 	hif_state->started = false;
2912 }
2913 
2914 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2915 				   struct shadow_reg_cfg
2916 				   **target_shadow_reg_cfg_ret,
2917 				   uint32_t *shadow_cfg_sz_ret)
2918 {
2919 	if (target_shadow_reg_cfg_ret)
2920 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2921 	if (shadow_cfg_sz_ret)
2922 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2923 }
2924 
2925 /**
2926  * hif_get_target_ce_config() - get copy engine configuration
2927  * @target_ce_config_ret: basic copy engine configuration
2928  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2929  * @target_service_to_ce_map_ret: service mapping for the copy engines
2930  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2931  * @target_shadow_reg_cfg_ret: shadow register configuration
2932  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2933  *
2934  * providing accessor to these values outside of this file.
2935  * currently these are stored in static pointers to const sections.
2936  * there are multiple configurations that are selected from at compile time.
2937  * Runtime selection would need to consider mode, target type and bus type.
2938  *
2939  * Return: return by parameter.
2940  */
2941 void hif_get_target_ce_config(struct hif_softc *scn,
2942 		struct CE_pipe_config **target_ce_config_ret,
2943 		uint32_t *target_ce_config_sz_ret,
2944 		struct service_to_pipe **target_service_to_ce_map_ret,
2945 		uint32_t *target_service_to_ce_map_sz_ret,
2946 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2947 		uint32_t *shadow_cfg_sz_ret)
2948 {
2949 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2950 
2951 	*target_ce_config_ret = hif_state->target_ce_config;
2952 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2953 
2954 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2955 				       target_service_to_ce_map_sz_ret);
2956 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2957 			       shadow_cfg_sz_ret);
2958 }
2959 
2960 #ifdef CONFIG_SHADOW_V2
2961 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2962 {
2963 	int i;
2964 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2965 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2966 
2967 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2968 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2969 		     "%s: i %d, val %x", __func__, i,
2970 		     cfg->shadow_reg_v2_cfg[i].addr);
2971 	}
2972 }
2973 
2974 #else
2975 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2976 {
2977 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2978 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2979 }
2980 #endif
2981 
2982 #ifdef ADRASTEA_RRI_ON_DDR
2983 /**
2984  * hif_get_src_ring_read_index(): Called to get the SRRI
2985  *
2986  * @scn: hif_softc pointer
2987  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2988  *
2989  * This function returns the SRRI to the caller. For CEs that
2990  * dont have interrupts enabled, we look at the DDR based SRRI
2991  *
2992  * Return: SRRI
2993  */
2994 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2995 		uint32_t CE_ctrl_addr)
2996 {
2997 	struct CE_attr attr;
2998 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2999 
3000 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3001 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3002 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3003 	} else {
3004 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3005 			return A_TARGET_READ(scn,
3006 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3007 		else
3008 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3009 					CE_ctrl_addr);
3010 	}
3011 }
3012 
3013 /**
3014  * hif_get_dst_ring_read_index(): Called to get the DRRI
3015  *
3016  * @scn: hif_softc pointer
3017  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3018  *
3019  * This function returns the DRRI to the caller. For CEs that
3020  * dont have interrupts enabled, we look at the DDR based DRRI
3021  *
3022  * Return: DRRI
3023  */
3024 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3025 		uint32_t CE_ctrl_addr)
3026 {
3027 	struct CE_attr attr;
3028 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3029 
3030 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3031 
3032 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3033 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3034 	} else {
3035 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3036 			return A_TARGET_READ(scn,
3037 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3038 		else
3039 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3040 					CE_ctrl_addr);
3041 	}
3042 }
3043 
3044 /**
3045  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
3046  * @scn: hif_softc pointer
3047  *
3048  * Return: qdf status
3049  */
3050 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
3051 {
3052 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
3053 
3054 	scn->vaddr_rri_on_ddr =
3055 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3056 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
3057 		&paddr_rri_on_ddr);
3058 
3059 	if (!scn->vaddr_rri_on_ddr) {
3060 		hif_err("dmaable page alloc fail");
3061 		return QDF_STATUS_E_NOMEM;
3062 	}
3063 
3064 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3065 
3066 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
3067 
3068 	return QDF_STATUS_SUCCESS;
3069 }
3070 #endif
3071 
3072 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
3073 /**
3074  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3075  *
3076  * @scn: hif_softc pointer
3077  *
3078  * This function allocates non cached memory on ddr and sends
3079  * the physical address of this memory to the CE hardware. The
3080  * hardware updates the RRI on this particular location.
3081  *
3082  * Return: None
3083  */
3084 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3085 {
3086 	unsigned int i;
3087 	uint32_t high_paddr, low_paddr;
3088 
3089 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3090 		return;
3091 
3092 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
3093 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
3094 
3095 	hif_debug("using srri and drri from DDR");
3096 
3097 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3098 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3099 
3100 	for (i = 0; i < CE_COUNT; i++)
3101 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3102 }
3103 #else
3104 /**
3105  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3106  *
3107  * @scn: hif_softc pointer
3108  *
3109  * This is a dummy implementation for platforms that don't
3110  * support this functionality.
3111  *
3112  * Return: None
3113  */
3114 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3115 {
3116 }
3117 #endif
3118 
3119 /**
3120  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
3121  *                                    QMI command
3122  * @scn: hif context
3123  * @cfg: wlan enable config
3124  *
3125  * In case of Genoa, rri_over_ddr memory configuration is passed
3126  * to firmware through QMI configure command.
3127  */
3128 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3129 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3130 					   struct pld_wlan_enable_cfg *cfg)
3131 {
3132 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3133 		return;
3134 
3135 	cfg->rri_over_ddr_cfg_valid = true;
3136 	cfg->rri_over_ddr_cfg.base_addr_low =
3137 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3138 	cfg->rri_over_ddr_cfg.base_addr_high =
3139 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3140 }
3141 #else
3142 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3143 					   struct pld_wlan_enable_cfg *cfg)
3144 {
3145 }
3146 #endif
3147 
3148 /**
3149  * hif_wlan_enable(): call the platform driver to enable wlan
3150  * @scn: HIF Context
3151  *
3152  * This function passes the con_mode and CE configuration to
3153  * platform driver to enable wlan.
3154  *
3155  * Return: linux error code
3156  */
3157 int hif_wlan_enable(struct hif_softc *scn)
3158 {
3159 	struct pld_wlan_enable_cfg cfg;
3160 	enum pld_driver_mode mode;
3161 	uint32_t con_mode = hif_get_conparam(scn);
3162 
3163 	hif_get_target_ce_config(scn,
3164 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3165 			&cfg.num_ce_tgt_cfg,
3166 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3167 			&cfg.num_ce_svc_pipe_cfg,
3168 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3169 			&cfg.num_shadow_reg_cfg);
3170 
3171 	/* translate from structure size to array size */
3172 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3173 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3174 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3175 
3176 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3177 			      &cfg.num_shadow_reg_v2_cfg);
3178 
3179 	hif_print_hal_shadow_register_cfg(&cfg);
3180 
3181 	hif_update_rri_over_ddr_config(scn, &cfg);
3182 
3183 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3184 		mode = PLD_FTM;
3185 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3186 		mode = PLD_COLDBOOT_CALIBRATION;
3187 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3188 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3189 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3190 		mode = PLD_EPPING;
3191 	else
3192 		mode = PLD_MISSION;
3193 
3194 	if (BYPASS_QMI)
3195 		return 0;
3196 	else
3197 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3198 }
3199 
3200 #ifdef WLAN_FEATURE_EPPING
3201 
3202 #define CE_EPPING_USES_IRQ true
3203 
3204 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3205 {
3206 	if (CE_EPPING_USES_IRQ)
3207 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3208 	else
3209 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3210 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3211 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3212 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3213 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3214 }
3215 #endif
3216 
3217 #ifdef QCN7605_SUPPORT
3218 static inline
3219 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3220 			       struct HIF_CE_state *hif_state)
3221 {
3222 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3223 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3224 	hif_state->target_ce_config_sz =
3225 				 sizeof(target_ce_config_wlan_qcn7605);
3226 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3227 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3228 	scn->ce_count = QCN7605_CE_COUNT;
3229 }
3230 #else
3231 static inline
3232 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3233 			       struct HIF_CE_state *hif_state)
3234 {
3235 	hif_err("QCN7605 not supported");
3236 }
3237 #endif
3238 
3239 #ifdef CE_SVC_CMN_INIT
3240 #ifdef QCA_WIFI_SUPPORT_SRNG
3241 static inline void hif_ce_service_init(void)
3242 {
3243 	ce_service_srng_init();
3244 }
3245 #else
3246 static inline void hif_ce_service_init(void)
3247 {
3248 	ce_service_legacy_init();
3249 }
3250 #endif
3251 #else
3252 static inline void hif_ce_service_init(void)
3253 {
3254 }
3255 #endif
3256 
3257 
3258 /**
3259  * hif_ce_prepare_config() - load the correct static tables.
3260  * @scn: hif context
3261  *
3262  * Epping uses different static attribute tables than mission mode.
3263  */
3264 void hif_ce_prepare_config(struct hif_softc *scn)
3265 {
3266 	uint32_t mode = hif_get_conparam(scn);
3267 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3268 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3269 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3270 
3271 	hif_ce_service_init();
3272 	hif_state->ce_services = ce_services_attach(scn);
3273 
3274 	scn->ce_count = HOST_CE_COUNT;
3275 	/* if epping is enabled we need to use the epping configuration. */
3276 	if (QDF_IS_EPPING_ENABLED(mode)) {
3277 		hif_ce_prepare_epping_config(hif_state);
3278 		return;
3279 	}
3280 
3281 	switch (tgt_info->target_type) {
3282 	default:
3283 		hif_state->host_ce_config = host_ce_config_wlan;
3284 		hif_state->target_ce_config = target_ce_config_wlan;
3285 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3286 		break;
3287 	case TARGET_TYPE_QCN7605:
3288 		hif_set_ce_config_qcn7605(scn, hif_state);
3289 		break;
3290 	case TARGET_TYPE_AR900B:
3291 	case TARGET_TYPE_QCA9984:
3292 	case TARGET_TYPE_IPQ4019:
3293 	case TARGET_TYPE_QCA9888:
3294 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3295 			hif_state->host_ce_config =
3296 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3297 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3298 			hif_state->host_ce_config =
3299 				host_lowdesc_ce_cfg_wlan_ar900b;
3300 		} else {
3301 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3302 		}
3303 
3304 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3305 		hif_state->target_ce_config_sz =
3306 				sizeof(target_ce_config_wlan_ar900b);
3307 
3308 		break;
3309 
3310 	case TARGET_TYPE_AR9888:
3311 	case TARGET_TYPE_AR9888V2:
3312 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3313 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3314 		} else {
3315 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3316 		}
3317 
3318 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3319 		hif_state->target_ce_config_sz =
3320 					sizeof(target_ce_config_wlan_ar9888);
3321 
3322 		break;
3323 
3324 	case TARGET_TYPE_QCA8074:
3325 	case TARGET_TYPE_QCA8074V2:
3326 	case TARGET_TYPE_QCA6018:
3327 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3328 			hif_state->host_ce_config =
3329 					host_ce_config_wlan_qca8074_pci;
3330 			hif_state->target_ce_config =
3331 				target_ce_config_wlan_qca8074_pci;
3332 			hif_state->target_ce_config_sz =
3333 				sizeof(target_ce_config_wlan_qca8074_pci);
3334 		} else {
3335 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3336 			hif_state->target_ce_config =
3337 					target_ce_config_wlan_qca8074;
3338 			hif_state->target_ce_config_sz =
3339 				sizeof(target_ce_config_wlan_qca8074);
3340 		}
3341 		break;
3342 	case TARGET_TYPE_QCA6290:
3343 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3344 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3345 		hif_state->target_ce_config_sz =
3346 					sizeof(target_ce_config_wlan_qca6290);
3347 
3348 		scn->ce_count = QCA_6290_CE_COUNT;
3349 		break;
3350 	case TARGET_TYPE_QCN9000:
3351 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3352 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3353 		hif_state->target_ce_config_sz =
3354 					sizeof(target_ce_config_wlan_qcn9000);
3355 		scn->ce_count = QCN_9000_CE_COUNT;
3356 		scn->disable_wake_irq = 1;
3357 		break;
3358 	case TARGET_TYPE_QCN9100:
3359 		hif_state->host_ce_config = host_ce_config_wlan_qcn9100;
3360 		hif_state->target_ce_config = target_ce_config_wlan_qcn9100;
3361 		hif_state->target_ce_config_sz =
3362 					sizeof(target_ce_config_wlan_qcn9100);
3363 		scn->ce_count = QCN_9100_CE_COUNT;
3364 		scn->disable_wake_irq = 1;
3365 		break;
3366 	case TARGET_TYPE_QCA5018:
3367 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
3368 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
3369 		hif_state->target_ce_config_sz =
3370 					sizeof(target_ce_config_wlan_qca5018);
3371 		scn->ce_count = QCA_5018_CE_COUNT;
3372 		break;
3373 	case TARGET_TYPE_QCA6390:
3374 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3375 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3376 		hif_state->target_ce_config_sz =
3377 					sizeof(target_ce_config_wlan_qca6390);
3378 
3379 		scn->ce_count = QCA_6390_CE_COUNT;
3380 		break;
3381 	case TARGET_TYPE_QCA6490:
3382 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3383 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3384 		hif_state->target_ce_config_sz =
3385 					sizeof(target_ce_config_wlan_qca6490);
3386 
3387 		scn->ce_count = QCA_6490_CE_COUNT;
3388 		break;
3389 	case TARGET_TYPE_QCA6750:
3390 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
3391 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
3392 		hif_state->target_ce_config_sz =
3393 					sizeof(target_ce_config_wlan_qca6750);
3394 
3395 		scn->ce_count = QCA_6750_CE_COUNT;
3396 		break;
3397 	case TARGET_TYPE_ADRASTEA:
3398 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3399 			hif_state->host_ce_config =
3400 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3401 			hif_state->target_ce_config =
3402 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3403 			hif_state->target_ce_config_sz =
3404 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3405 		} else {
3406 			hif_state->host_ce_config =
3407 				host_ce_config_wlan_adrastea;
3408 			hif_state->target_ce_config =
3409 					target_ce_config_wlan_adrastea;
3410 			hif_state->target_ce_config_sz =
3411 					sizeof(target_ce_config_wlan_adrastea);
3412 		}
3413 		break;
3414 
3415 	}
3416 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3417 }
3418 
3419 /**
3420  * hif_ce_open() - do ce specific allocations
3421  * @hif_sc: pointer to hif context
3422  *
3423  * return: 0 for success or QDF_STATUS_E_NOMEM
3424  */
3425 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3426 {
3427 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3428 
3429 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3430 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3431 	return QDF_STATUS_SUCCESS;
3432 }
3433 
3434 /**
3435  * hif_ce_close() - do ce specific free
3436  * @hif_sc: pointer to hif context
3437  */
3438 void hif_ce_close(struct hif_softc *hif_sc)
3439 {
3440 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3441 
3442 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3443 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3444 }
3445 
3446 /**
3447  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3448  * @hif_sc: hif context
3449  *
3450  * uses state variables to support cleaning up when hif_config_ce fails.
3451  */
3452 void hif_unconfig_ce(struct hif_softc *hif_sc)
3453 {
3454 	int pipe_num;
3455 	struct HIF_CE_pipe_info *pipe_info;
3456 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3457 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3458 
3459 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3460 		pipe_info = &hif_state->pipe_info[pipe_num];
3461 		if (pipe_info->ce_hdl) {
3462 			ce_unregister_irq(hif_state, (1 << pipe_num));
3463 		}
3464 	}
3465 	deinit_tasklet_workers(hif_hdl);
3466 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3467 		pipe_info = &hif_state->pipe_info[pipe_num];
3468 		if (pipe_info->ce_hdl) {
3469 			ce_fini(pipe_info->ce_hdl);
3470 			pipe_info->ce_hdl = NULL;
3471 			pipe_info->buf_sz = 0;
3472 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3473 		}
3474 	}
3475 	if (hif_sc->athdiag_procfs_inited) {
3476 		athdiag_procfs_remove();
3477 		hif_sc->athdiag_procfs_inited = false;
3478 	}
3479 }
3480 
3481 #ifdef CONFIG_BYPASS_QMI
3482 #ifdef QCN7605_SUPPORT
3483 /**
3484  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3485  * @scn: pointer to HIF structure
3486  *
3487  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3488  *
3489  * Return: void
3490  */
3491 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3492 {
3493 	phys_addr_t target_pa;
3494 	struct ce_info *ce_info_ptr;
3495 	uint32_t msi_data_start;
3496 	uint32_t msi_data_count;
3497 	uint32_t msi_irq_start;
3498 	uint32_t i = 0;
3499 	int ret;
3500 
3501 	scn->vaddr_qmi_bypass =
3502 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3503 							     scn->qdf_dev->dev,
3504 							     FW_SHARED_MEM,
3505 							     &target_pa);
3506 	if (!scn->vaddr_qmi_bypass) {
3507 		hif_err("Memory allocation failed could not post target buf");
3508 		return;
3509 	}
3510 
3511 	scn->paddr_qmi_bypass = target_pa;
3512 
3513 	ce_info_ptr = (struct ce_info *)scn->vaddr_qmi_bypass;
3514 
3515 	if (scn->vaddr_rri_on_ddr) {
3516 		ce_info_ptr->rri_over_ddr_low_paddr  =
3517 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3518 		ce_info_ptr->rri_over_ddr_high_paddr =
3519 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3520 	}
3521 
3522 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3523 					  &msi_data_count, &msi_data_start,
3524 					  &msi_irq_start);
3525 	if (ret) {
3526 		hif_err("Failed to get CE msi config");
3527 		return;
3528 	}
3529 
3530 	for (i = 0; i < CE_COUNT_MAX; i++) {
3531 		ce_info_ptr->cfg[i].ce_id = i;
3532 		ce_info_ptr->cfg[i].msi_vector =
3533 			 (i % msi_data_count) + msi_irq_start;
3534 	}
3535 
3536 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3537 	hif_info("target va %pK target pa %pa", scn->vaddr_qmi_bypass,
3538 		 &target_pa);
3539 }
3540 
3541 /**
3542  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
3543  * @scn: pointer to HIF structure
3544  *
3545  *
3546  * Return: void
3547  */
3548 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
3549 {
3550 	void *target_va = scn->vaddr_qmi_bypass;
3551 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
3552 
3553 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3554 				FW_SHARED_MEM, target_va,
3555 				target_pa, 0);
3556 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
3557 }
3558 #else
3559 /**
3560  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3561  * @scn: pointer to HIF structure
3562  *
3563  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3564  *
3565  * Return: void
3566  */
3567 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3568 {
3569 	qdf_dma_addr_t target_pa;
3570 
3571 	scn->vaddr_qmi_bypass =
3572 			(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3573 							     scn->qdf_dev->dev,
3574 							     FW_SHARED_MEM,
3575 							     &target_pa);
3576 	if (!scn->vaddr_qmi_bypass) {
3577 		hif_err("Memory allocation failed could not post target buf");
3578 		return;
3579 	}
3580 
3581 	scn->paddr_qmi_bypass = target_pa;
3582 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3583 }
3584 
3585 /**
3586  * hif_cleanup_static_buf_to_target() -  clean up static buffer to WLAN FW
3587  * @scn: pointer to HIF structure
3588  *
3589  *
3590  * Return: void
3591  */
3592 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
3593 {
3594 	void *target_va = scn->vaddr_qmi_bypass;
3595 	phys_addr_t target_pa = scn->paddr_qmi_bypass;
3596 
3597 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3598 				FW_SHARED_MEM, target_va,
3599 				target_pa, 0);
3600 	hif_write32_mb(snc, scn->mem + BYPASS_QMI_TEMP_REGISTER, 0);
3601 }
3602 #endif
3603 
3604 #else
3605 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3606 {
3607 }
3608 
3609 void hif_cleanup_static_buf_to_target(struct hif_softc *scn)
3610 {
3611 }
3612 #endif
3613 
3614 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3615 				bool wait_for_it)
3616 {
3617 	/* todo */
3618 	return 0;
3619 }
3620 
3621 int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num)
3622 {
3623 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3624 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3625 	struct HIF_CE_pipe_info *pipe_info;
3626 	struct CE_state *ce_state = NULL;
3627 	struct CE_attr *attr;
3628 	int rv = 0;
3629 
3630 	if (pipe_num >= CE_COUNT_MAX)
3631 		return -EINVAL;
3632 
3633 	pipe_info = &hif_state->pipe_info[pipe_num];
3634 	pipe_info->pipe_num = pipe_num;
3635 	pipe_info->HIF_CE_state = hif_state;
3636 	attr = &hif_state->host_ce_config[pipe_num];
3637 	ce_state = scn->ce_id_to_state[pipe_num];
3638 
3639 	if (ce_state) {
3640 		/* Do not reinitialize the CE if its done already */
3641 		rv = QDF_STATUS_E_BUSY;
3642 		goto err;
3643 	}
3644 
3645 	pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3646 	ce_state = scn->ce_id_to_state[pipe_num];
3647 	if (!ce_state) {
3648 		A_TARGET_ACCESS_UNLIKELY(scn);
3649 		goto err;
3650 	}
3651 	qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3652 	QDF_ASSERT(pipe_info->ce_hdl);
3653 	if (!pipe_info->ce_hdl) {
3654 		rv = QDF_STATUS_E_FAILURE;
3655 		A_TARGET_ACCESS_UNLIKELY(scn);
3656 		goto err;
3657 	}
3658 
3659 	ce_state->lro_data = qdf_lro_init();
3660 
3661 	if (attr->flags & CE_ATTR_DIAG) {
3662 		/* Reserve the ultimate CE for
3663 		 * Diagnostic Window support
3664 		 */
3665 		hif_state->ce_diag = pipe_info->ce_hdl;
3666 		goto skip;
3667 	}
3668 
3669 	if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3670 	    (ce_state->htt_rx_data)) {
3671 		goto skip;
3672 	}
3673 
3674 	pipe_info->buf_sz = (qdf_size_t)(attr->src_sz_max);
3675 	if (attr->dest_nentries > 0) {
3676 		atomic_set(&pipe_info->recv_bufs_needed,
3677 			   init_buffer_count(attr->dest_nentries - 1));
3678 		/*SRNG based CE has one entry less */
3679 		if (ce_srng_based(scn))
3680 			atomic_dec(&pipe_info->recv_bufs_needed);
3681 	} else {
3682 		atomic_set(&pipe_info->recv_bufs_needed, 0);
3683 	}
3684 	ce_tasklet_init(hif_state, (1 << pipe_num));
3685 	ce_register_irq(hif_state, (1 << pipe_num));
3686 
3687 	init_tasklet_worker_by_ceid(hif_hdl, pipe_num);
3688 skip:
3689 	return 0;
3690 err:
3691 	return rv;
3692 }
3693 
3694 /**
3695  * hif_config_ce() - configure copy engines
3696  * @scn: hif context
3697  *
3698  * Prepares fw, copy engine hardware and host sw according
3699  * to the attributes selected by hif_ce_prepare_config.
3700  *
3701  * also calls athdiag_procfs_init
3702  *
3703  * return: 0 for success nonzero for failure.
3704  */
3705 int hif_config_ce(struct hif_softc *scn)
3706 {
3707 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3708 	struct HIF_CE_pipe_info *pipe_info;
3709 	int pipe_num;
3710 
3711 #ifdef ADRASTEA_SHADOW_REGISTERS
3712 	int i;
3713 #endif
3714 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3715 
3716 	scn->notice_send = true;
3717 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3718 
3719 	hif_post_static_buf_to_target(scn);
3720 
3721 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3722 
3723 	hif_config_rri_on_ddr(scn);
3724 
3725 	if (ce_srng_based(scn))
3726 		scn->bus_ops.hif_target_sleep_state_adjust =
3727 			&hif_srng_sleep_state_adjust;
3728 
3729 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3730 	 * index. Disable data storing
3731 	 */
3732 	reset_ce_debug_history(scn);
3733 
3734 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3735 		struct CE_attr *attr;
3736 
3737 		pipe_info = &hif_state->pipe_info[pipe_num];
3738 		attr = &hif_state->host_ce_config[pipe_num];
3739 
3740 		if (attr->flags & CE_ATTR_INIT_ON_DEMAND)
3741 			continue;
3742 
3743 		if (hif_config_ce_by_id(scn, pipe_num))
3744 			goto err;
3745 	}
3746 
3747 	if (athdiag_procfs_init(scn) != 0) {
3748 		A_TARGET_ACCESS_UNLIKELY(scn);
3749 		goto err;
3750 	}
3751 	scn->athdiag_procfs_inited = true;
3752 
3753 	hif_debug("ce_init done");
3754 	hif_debug("%s: X, ret = %d", __func__, rv);
3755 
3756 #ifdef ADRASTEA_SHADOW_REGISTERS
3757 	hif_debug("Using Shadow Registers instead of CE Registers");
3758 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3759 		hif_debug("Shadow Register%d is mapped to address %x",
3760 			  i,
3761 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3762 	}
3763 #endif
3764 
3765 	return rv != QDF_STATUS_SUCCESS;
3766 err:
3767 	/* Failure, so clean up */
3768 	hif_unconfig_ce(scn);
3769 	hif_info("X, ret = %d", rv);
3770 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3771 }
3772 
3773 /**
3774  * hif_config_ce_pktlog() - configure copy engines
3775  * @scn: hif context
3776  *
3777  * Prepares fw, copy engine hardware and host sw according
3778  * to the attributes selected by hif_ce_prepare_config.
3779  *
3780  * also calls athdiag_procfs_init
3781  *
3782  * return: 0 for success nonzero for failure.
3783  */
3784 int hif_config_ce_pktlog(struct hif_opaque_softc *hif_hdl)
3785 {
3786 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3787 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3788 	int pipe_num;
3789 	QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
3790 	struct HIF_CE_pipe_info *pipe_info;
3791 
3792 	if (!scn)
3793 		goto err;
3794 
3795 	if (scn->pktlog_init)
3796 		return QDF_STATUS_SUCCESS;
3797 
3798 	pipe_num =  hif_get_pktlog_ce_num(scn);
3799 	if (pipe_num < 0) {
3800 		qdf_status = QDF_STATUS_E_FAILURE;
3801 		goto err;
3802 	}
3803 
3804 	pipe_info = &hif_state->pipe_info[pipe_num];
3805 
3806 	qdf_status = hif_config_ce_by_id(scn, pipe_num);
3807 	/* CE Already initialized. Do not try to reinitialized again */
3808 	if (qdf_status == QDF_STATUS_E_BUSY)
3809 		return QDF_STATUS_SUCCESS;
3810 
3811 	qdf_status = hif_config_irq_by_ceid(scn, pipe_num);
3812 	if (qdf_status < 0)
3813 		goto err;
3814 
3815 	qdf_status = hif_completion_thread_startup_by_ceid(hif_state, pipe_num);
3816 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3817 		hif_err("%s:failed to start hif thread", __func__);
3818 		goto err;
3819 	}
3820 
3821 	/* Post buffers for pktlog copy engine. */
3822 	qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
3823 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
3824 		/* cleanup is done in hif_ce_disable */
3825 		hif_err("%s:failed to post buffers", __func__);
3826 		return qdf_status;
3827 	}
3828 	scn->pktlog_init = true;
3829 	return qdf_status != QDF_STATUS_SUCCESS;
3830 
3831 err:
3832 	hif_debug("%s: X, ret = %d", __func__, qdf_status);
3833 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3834 }
3835 
3836 #ifdef IPA_OFFLOAD
3837 /**
3838  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3839  * @scn: bus context
3840  * @ce_sr_base_paddr: copyengine source ring base physical address
3841  * @ce_sr_ring_size: copyengine source ring size
3842  * @ce_reg_paddr: copyengine register physical address
3843  *
3844  * IPA micro controller data path offload feature enabled,
3845  * HIF should release copy engine related resource information to IPA UC
3846  * IPA UC will access hardware resource with released information
3847  *
3848  * Return: None
3849  */
3850 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3851 			     qdf_shared_mem_t **ce_sr,
3852 			     uint32_t *ce_sr_ring_size,
3853 			     qdf_dma_addr_t *ce_reg_paddr)
3854 {
3855 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3856 	struct HIF_CE_pipe_info *pipe_info =
3857 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3858 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3859 
3860 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3861 			    ce_reg_paddr);
3862 }
3863 #endif /* IPA_OFFLOAD */
3864 
3865 
3866 #ifdef ADRASTEA_SHADOW_REGISTERS
3867 
3868 /*
3869  * Current shadow register config
3870  *
3871  * -----------------------------------------------------------
3872  * Shadow Register      |     CE   |    src/dst write index
3873  * -----------------------------------------------------------
3874  *         0            |     0    |           src
3875  *         1     No Config - Doesn't point to anything
3876  *         2     No Config - Doesn't point to anything
3877  *         3            |     3    |           src
3878  *         4            |     4    |           src
3879  *         5            |     5    |           src
3880  *         6     No Config - Doesn't point to anything
3881  *         7            |     7    |           src
3882  *         8     No Config - Doesn't point to anything
3883  *         9     No Config - Doesn't point to anything
3884  *         10    No Config - Doesn't point to anything
3885  *         11    No Config - Doesn't point to anything
3886  * -----------------------------------------------------------
3887  *         12    No Config - Doesn't point to anything
3888  *         13           |     1    |           dst
3889  *         14           |     2    |           dst
3890  *         15    No Config - Doesn't point to anything
3891  *         16    No Config - Doesn't point to anything
3892  *         17    No Config - Doesn't point to anything
3893  *         18    No Config - Doesn't point to anything
3894  *         19           |     7    |           dst
3895  *         20           |     8    |           dst
3896  *         21    No Config - Doesn't point to anything
3897  *         22    No Config - Doesn't point to anything
3898  *         23    No Config - Doesn't point to anything
3899  * -----------------------------------------------------------
3900  *
3901  *
3902  * ToDo - Move shadow register config to following in the future
3903  * This helps free up a block of shadow registers towards the end.
3904  * Can be used for other purposes
3905  *
3906  * -----------------------------------------------------------
3907  * Shadow Register      |     CE   |    src/dst write index
3908  * -----------------------------------------------------------
3909  *      0            |     0    |           src
3910  *      1            |     3    |           src
3911  *      2            |     4    |           src
3912  *      3            |     5    |           src
3913  *      4            |     7    |           src
3914  * -----------------------------------------------------------
3915  *      5            |     1    |           dst
3916  *      6            |     2    |           dst
3917  *      7            |     7    |           dst
3918  *      8            |     8    |           dst
3919  * -----------------------------------------------------------
3920  *      9     No Config - Doesn't point to anything
3921  *      12    No Config - Doesn't point to anything
3922  *      13    No Config - Doesn't point to anything
3923  *      14    No Config - Doesn't point to anything
3924  *      15    No Config - Doesn't point to anything
3925  *      16    No Config - Doesn't point to anything
3926  *      17    No Config - Doesn't point to anything
3927  *      18    No Config - Doesn't point to anything
3928  *      19    No Config - Doesn't point to anything
3929  *      20    No Config - Doesn't point to anything
3930  *      21    No Config - Doesn't point to anything
3931  *      22    No Config - Doesn't point to anything
3932  *      23    No Config - Doesn't point to anything
3933  * -----------------------------------------------------------
3934 */
3935 #ifndef QCN7605_SUPPORT
3936 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3937 {
3938 	u32 addr = 0;
3939 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3940 
3941 	switch (ce) {
3942 	case 0:
3943 		addr = SHADOW_VALUE0;
3944 		break;
3945 	case 3:
3946 		addr = SHADOW_VALUE3;
3947 		break;
3948 	case 4:
3949 		addr = SHADOW_VALUE4;
3950 		break;
3951 	case 5:
3952 		addr = SHADOW_VALUE5;
3953 		break;
3954 	case 7:
3955 		addr = SHADOW_VALUE7;
3956 		break;
3957 	default:
3958 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
3959 		QDF_ASSERT(0);
3960 	}
3961 	return addr;
3962 
3963 }
3964 
3965 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3966 {
3967 	u32 addr = 0;
3968 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3969 
3970 	switch (ce) {
3971 	case 1:
3972 		addr = SHADOW_VALUE13;
3973 		break;
3974 	case 2:
3975 		addr = SHADOW_VALUE14;
3976 		break;
3977 	case 5:
3978 		addr = SHADOW_VALUE17;
3979 		break;
3980 	case 7:
3981 		addr = SHADOW_VALUE19;
3982 		break;
3983 	case 8:
3984 		addr = SHADOW_VALUE20;
3985 		break;
3986 	case 9:
3987 		addr = SHADOW_VALUE21;
3988 		break;
3989 	case 10:
3990 		addr = SHADOW_VALUE22;
3991 		break;
3992 	case 11:
3993 		addr = SHADOW_VALUE23;
3994 		break;
3995 	default:
3996 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
3997 		QDF_ASSERT(0);
3998 	}
3999 
4000 	return addr;
4001 
4002 }
4003 #else
4004 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4005 {
4006 	u32 addr = 0;
4007 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4008 
4009 	switch (ce) {
4010 	case 0:
4011 		addr = SHADOW_VALUE0;
4012 		break;
4013 	case 4:
4014 		addr = SHADOW_VALUE4;
4015 		break;
4016 	case 5:
4017 		addr = SHADOW_VALUE5;
4018 		break;
4019 	default:
4020 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4021 		QDF_ASSERT(0);
4022 	}
4023 	return addr;
4024 }
4025 
4026 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
4027 {
4028 	u32 addr = 0;
4029 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
4030 
4031 	switch (ce) {
4032 	case 1:
4033 		addr = SHADOW_VALUE13;
4034 		break;
4035 	case 2:
4036 		addr = SHADOW_VALUE14;
4037 		break;
4038 	case 3:
4039 		addr = SHADOW_VALUE15;
4040 		break;
4041 	case 5:
4042 		addr = SHADOW_VALUE17;
4043 		break;
4044 	case 7:
4045 		addr = SHADOW_VALUE19;
4046 		break;
4047 	case 8:
4048 		addr = SHADOW_VALUE20;
4049 		break;
4050 	case 9:
4051 		addr = SHADOW_VALUE21;
4052 		break;
4053 	case 10:
4054 		addr = SHADOW_VALUE22;
4055 		break;
4056 	case 11:
4057 		addr = SHADOW_VALUE23;
4058 		break;
4059 	default:
4060 		hif_err("Invalid CE ctrl_addr (CE=%d)", ce);
4061 		QDF_ASSERT(0);
4062 	}
4063 
4064 	return addr;
4065 }
4066 #endif
4067 #endif
4068 
4069 #if defined(FEATURE_LRO)
4070 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
4071 {
4072 	struct CE_state *ce_state;
4073 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4074 
4075 	ce_state = scn->ce_id_to_state[ctx_id];
4076 
4077 	return ce_state->lro_data;
4078 }
4079 #endif
4080 
4081 /**
4082  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
4083  * this service
4084  * @scn: hif_softc pointer.
4085  * @svc_id: Service ID for which the mapping is needed.
4086  * @ul_pipe: address of the container in which ul pipe is returned.
4087  * @dl_pipe: address of the container in which dl pipe is returned.
4088  * @ul_is_polled: address of the container in which a bool
4089  *			indicating if the UL CE for this service
4090  *			is polled is returned.
4091  * @dl_is_polled: address of the container in which a bool
4092  *			indicating if the DL CE for this service
4093  *			is polled is returned.
4094  *
4095  * Return: Indicates whether the service has been found in the table.
4096  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
4097  *         There will be warning logs if either leg has not been updated
4098  *         because it missed the entry in the table (but this is not an err).
4099  */
4100 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
4101 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
4102 			int *dl_is_polled)
4103 {
4104 	int status = -EINVAL;
4105 	unsigned int i;
4106 	struct service_to_pipe element;
4107 	struct service_to_pipe *tgt_svc_map_to_use;
4108 	uint32_t sz_tgt_svc_map_to_use;
4109 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
4110 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4111 	bool dl_updated = false;
4112 	bool ul_updated = false;
4113 
4114 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
4115 				       &sz_tgt_svc_map_to_use);
4116 
4117 	*dl_is_polled = 0;  /* polling for received messages not supported */
4118 
4119 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
4120 
4121 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
4122 		if (element.service_id == svc_id) {
4123 			if (element.pipedir == PIPEDIR_OUT) {
4124 				*ul_pipe = element.pipenum;
4125 				*ul_is_polled =
4126 					(hif_state->host_ce_config[*ul_pipe].flags &
4127 					 CE_ATTR_DISABLE_INTR) != 0;
4128 				ul_updated = true;
4129 			} else if (element.pipedir == PIPEDIR_IN) {
4130 				*dl_pipe = element.pipenum;
4131 				dl_updated = true;
4132 			}
4133 			status = 0;
4134 		}
4135 	}
4136 	if (ul_updated == false)
4137 		hif_debug("ul pipe is NOT updated for service %d", svc_id);
4138 	if (dl_updated == false)
4139 		hif_debug("dl pipe is NOT updated for service %d", svc_id);
4140 
4141 	return status;
4142 }
4143 
4144 #ifdef SHADOW_REG_DEBUG
4145 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
4146 		uint32_t CE_ctrl_addr)
4147 {
4148 	uint32_t read_from_hw, srri_from_ddr = 0;
4149 
4150 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
4151 
4152 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4153 
4154 	if (read_from_hw != srri_from_ddr) {
4155 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4156 		       srri_from_ddr, read_from_hw,
4157 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4158 		QDF_ASSERT(0);
4159 	}
4160 	return srri_from_ddr;
4161 }
4162 
4163 
4164 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
4165 		uint32_t CE_ctrl_addr)
4166 {
4167 	uint32_t read_from_hw, drri_from_ddr = 0;
4168 
4169 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
4170 
4171 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
4172 
4173 	if (read_from_hw != drri_from_ddr) {
4174 		hif_err("read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
4175 		       drri_from_ddr, read_from_hw,
4176 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
4177 		QDF_ASSERT(0);
4178 	}
4179 	return drri_from_ddr;
4180 }
4181 
4182 #endif
4183 
4184 /**
4185  * hif_dump_ce_registers() - dump ce registers
4186  * @scn: hif_opaque_softc pointer.
4187  *
4188  * Output the copy engine registers
4189  *
4190  * Return: 0 for success or error code
4191  */
4192 int hif_dump_ce_registers(struct hif_softc *scn)
4193 {
4194 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
4195 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
4196 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
4197 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
4198 	uint16_t i;
4199 	QDF_STATUS status;
4200 
4201 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
4202 		if (!scn->ce_id_to_state[i]) {
4203 			hif_debug("CE%d not used", i);
4204 			continue;
4205 		}
4206 
4207 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
4208 					   (uint8_t *) &ce_reg_values[0],
4209 					   ce_reg_word_size * sizeof(uint32_t));
4210 
4211 		if (status != QDF_STATUS_SUCCESS) {
4212 			hif_err("Dumping CE register failed!");
4213 			return -EACCES;
4214 		}
4215 		hif_debug("CE%d=>", i);
4216 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
4217 				   (uint8_t *) &ce_reg_values[0],
4218 				   ce_reg_word_size * sizeof(uint32_t));
4219 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
4220 				+ SR_WR_INDEX_ADDRESS),
4221 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
4222 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
4223 				+ CURRENT_SRRI_ADDRESS),
4224 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
4225 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
4226 				+ DST_WR_INDEX_ADDRESS),
4227 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
4228 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
4229 				+ CURRENT_DRRI_ADDRESS),
4230 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
4231 		qdf_print("---");
4232 	}
4233 	return 0;
4234 }
4235 qdf_export_symbol(hif_dump_ce_registers);
4236 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
4237 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
4238 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
4239 {
4240 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4241 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4242 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
4243 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
4244 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4245 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
4246 	struct CE_ring_state *src_ring = ce_state->src_ring;
4247 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
4248 
4249 	if (src_ring) {
4250 		hif_info->ul_pipe.nentries = src_ring->nentries;
4251 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
4252 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
4253 		hif_info->ul_pipe.write_index = src_ring->write_index;
4254 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
4255 		hif_info->ul_pipe.base_addr_CE_space =
4256 			src_ring->base_addr_CE_space;
4257 		hif_info->ul_pipe.base_addr_owner_space =
4258 			src_ring->base_addr_owner_space;
4259 	}
4260 
4261 
4262 	if (dest_ring) {
4263 		hif_info->dl_pipe.nentries = dest_ring->nentries;
4264 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
4265 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
4266 		hif_info->dl_pipe.write_index = dest_ring->write_index;
4267 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
4268 		hif_info->dl_pipe.base_addr_CE_space =
4269 			dest_ring->base_addr_CE_space;
4270 		hif_info->dl_pipe.base_addr_owner_space =
4271 			dest_ring->base_addr_owner_space;
4272 	}
4273 
4274 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
4275 	hif_info->ctrl_addr = ce_state->ctrl_addr;
4276 
4277 	return hif_info;
4278 }
4279 qdf_export_symbol(hif_get_addl_pipe_info);
4280 
4281 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
4282 {
4283 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4284 
4285 	scn->nss_wifi_ol_mode = mode;
4286 	return 0;
4287 }
4288 qdf_export_symbol(hif_set_nss_wifiol_mode);
4289 #endif
4290 
4291 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
4292 {
4293 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4294 	scn->hif_attribute = hif_attrib;
4295 }
4296 
4297 
4298 /* disable interrupts (only applicable for legacy copy engine currently */
4299 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
4300 {
4301 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4302 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
4303 	uint32_t ctrl_addr = CE_state->ctrl_addr;
4304 
4305 	Q_TARGET_ACCESS_BEGIN(scn);
4306 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
4307 	Q_TARGET_ACCESS_END(scn);
4308 }
4309 qdf_export_symbol(hif_disable_interrupt);
4310 
4311 /**
4312  * hif_fw_event_handler() - hif fw event handler
4313  * @hif_state: pointer to hif ce state structure
4314  *
4315  * Process fw events and raise HTC callback to process fw events.
4316  *
4317  * Return: none
4318  */
4319 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4320 {
4321 	struct hif_msg_callbacks *msg_callbacks =
4322 		&hif_state->msg_callbacks_current;
4323 
4324 	if (!msg_callbacks->fwEventHandler)
4325 		return;
4326 
4327 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
4328 			QDF_STATUS_E_FAILURE);
4329 }
4330 
4331 #ifndef QCA_WIFI_3_0
4332 /**
4333  * hif_fw_interrupt_handler() - FW interrupt handler
4334  * @irq: irq number
4335  * @arg: the user pointer
4336  *
4337  * Called from the PCI interrupt handler when a
4338  * firmware-generated interrupt to the Host.
4339  *
4340  * only registered for legacy ce devices
4341  *
4342  * Return: status of handled irq
4343  */
4344 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4345 {
4346 	struct hif_softc *scn = arg;
4347 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4348 	uint32_t fw_indicator_address, fw_indicator;
4349 
4350 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4351 		return ATH_ISR_NOSCHED;
4352 
4353 	fw_indicator_address = hif_state->fw_indicator_address;
4354 	/* For sudden unplug this will return ~0 */
4355 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4356 
4357 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4358 		/* ACK: clear Target-side pending event */
4359 		A_TARGET_WRITE(scn, fw_indicator_address,
4360 			       fw_indicator & ~FW_IND_EVENT_PENDING);
4361 		if (Q_TARGET_ACCESS_END(scn) < 0)
4362 			return ATH_ISR_SCHED;
4363 
4364 		if (hif_state->started) {
4365 			hif_fw_event_handler(hif_state);
4366 		} else {
4367 			/*
4368 			 * Probable Target failure before we're prepared
4369 			 * to handle it.  Generally unexpected.
4370 			 * fw_indicator used as bitmap, and defined as below:
4371 			 *     FW_IND_EVENT_PENDING    0x1
4372 			 *     FW_IND_INITIALIZED      0x2
4373 			 *     FW_IND_NEEDRECOVER      0x4
4374 			 */
4375 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
4376 				("%s: Early firmware event indicated 0x%x\n",
4377 				 __func__, fw_indicator));
4378 		}
4379 	} else {
4380 		if (Q_TARGET_ACCESS_END(scn) < 0)
4381 			return ATH_ISR_SCHED;
4382 	}
4383 
4384 	return ATH_ISR_SCHED;
4385 }
4386 #else
4387 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4388 {
4389 	return ATH_ISR_SCHED;
4390 }
4391 #endif /* #ifdef QCA_WIFI_3_0 */
4392 
4393 
4394 /**
4395  * hif_wlan_disable(): call the platform driver to disable wlan
4396  * @scn: HIF Context
4397  *
4398  * This function passes the con_mode to platform driver to disable
4399  * wlan.
4400  *
4401  * Return: void
4402  */
4403 void hif_wlan_disable(struct hif_softc *scn)
4404 {
4405 	enum pld_driver_mode mode;
4406 	uint32_t con_mode = hif_get_conparam(scn);
4407 
4408 	if (scn->target_status == TARGET_STATUS_RESET)
4409 		return;
4410 
4411 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4412 		mode = PLD_FTM;
4413 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4414 		mode = PLD_EPPING;
4415 	else
4416 		mode = PLD_MISSION;
4417 
4418 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4419 }
4420 
4421 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4422 {
4423 	int status;
4424 	uint8_t ul_pipe, dl_pipe;
4425 	int ul_is_polled, dl_is_polled;
4426 
4427 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4428 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4429 					 HTC_CTRL_RSVD_SVC,
4430 					 &ul_pipe, &dl_pipe,
4431 					 &ul_is_polled, &dl_is_polled);
4432 	if (status) {
4433 		hif_err("Failed to map pipe: %d", status);
4434 		return status;
4435 	}
4436 
4437 	*ce_id = dl_pipe;
4438 
4439 	return 0;
4440 }
4441 
4442 #ifdef HIF_CE_LOG_INFO
4443 /**
4444  * ce_get_index_info(): Get CE index info
4445  * @scn: HIF Context
4446  * @ce_state: CE opaque handle
4447  * @info: CE info
4448  *
4449  * Return: 0 for success and non zero for failure
4450  */
4451 static
4452 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
4453 		      struct ce_index *info)
4454 {
4455 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4456 
4457 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
4458 }
4459 
4460 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
4461 		     unsigned int *offset)
4462 {
4463 	struct hang_event_info info = {0};
4464 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
4465 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
4466 	uint8_t curr_index = 0;
4467 	uint8_t i;
4468 	uint16_t size;
4469 
4470 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
4471 	info.active_grp_tasklet_cnt =
4472 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
4473 
4474 	for (i = 0; i < scn->ce_count; i++) {
4475 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
4476 			continue;
4477 
4478 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
4479 				      &info.ce_info[curr_index]))
4480 			continue;
4481 
4482 		curr_index++;
4483 	}
4484 
4485 	info.ce_count = curr_index;
4486 	size = sizeof(info) -
4487 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
4488 
4489 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
4490 		return;
4491 
4492 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
4493 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
4494 
4495 	qdf_mem_copy(data + *offset, &info, size);
4496 	*offset = *offset + size;
4497 }
4498 #endif
4499