xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #include "qdf_module.h"
41 
42 #define CE_POLL_TIMEOUT 10      /* ms */
43 
44 #define AGC_DUMP         1
45 #define CHANINFO_DUMP    2
46 #define BB_WATCHDOG_DUMP 3
47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48 #define PCIE_ACCESS_DUMP 4
49 #endif
50 #include "mp_dev.h"
51 
52 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
53 	defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG)
54 #define QCA_WIFI_SUPPORT_SRNG
55 #endif
56 
57 /* Forward references */
58 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
59 
60 /*
61  * Fix EV118783, poll to check whether a BMI response comes
62  * other than waiting for the interruption which may be lost.
63  */
64 /* #define BMI_RSP_POLLING */
65 #define BMI_RSP_TO_MILLISEC  1000
66 
67 #ifdef CONFIG_BYPASS_QMI
68 #define BYPASS_QMI 1
69 #else
70 #define BYPASS_QMI 0
71 #endif
72 
73 #ifdef ENABLE_10_4_FW_HDR
74 #if (ENABLE_10_4_FW_HDR == 1)
75 #define WDI_IPA_SERVICE_GROUP 5
76 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
77 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
78 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
79 #endif /* ENABLE_10_4_FW_HDR == 1 */
80 #endif /* ENABLE_10_4_FW_HDR */
81 
82 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
83 static void hif_config_rri_on_ddr(struct hif_softc *scn);
84 
85 /**
86  * hif_target_access_log_dump() - dump access log
87  *
88  * dump access log
89  *
90  * Return: n/a
91  */
92 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
93 static void hif_target_access_log_dump(void)
94 {
95 	hif_target_dump_access_log();
96 }
97 #endif
98 
99 
100 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
101 		      uint8_t cmd_id, bool start)
102 {
103 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
104 
105 	switch (cmd_id) {
106 	case AGC_DUMP:
107 		if (start)
108 			priv_start_agc(scn);
109 		else
110 			priv_dump_agc(scn);
111 		break;
112 	case CHANINFO_DUMP:
113 		if (start)
114 			priv_start_cap_chaninfo(scn);
115 		else
116 			priv_dump_chaninfo(scn);
117 		break;
118 	case BB_WATCHDOG_DUMP:
119 		priv_dump_bbwatchdog(scn);
120 		break;
121 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
122 	case PCIE_ACCESS_DUMP:
123 		hif_target_access_log_dump();
124 		break;
125 #endif
126 	default:
127 		HIF_ERROR("%s: Invalid htc dump command", __func__);
128 		break;
129 	}
130 }
131 
132 static void ce_poll_timeout(void *arg)
133 {
134 	struct CE_state *CE_state = (struct CE_state *)arg;
135 
136 	if (CE_state->timer_inited) {
137 		ce_per_engine_service(CE_state->scn, CE_state->id);
138 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
139 	}
140 }
141 
142 static unsigned int roundup_pwr2(unsigned int n)
143 {
144 	int i;
145 	unsigned int test_pwr2;
146 
147 	if (!(n & (n - 1)))
148 		return n; /* already a power of 2 */
149 
150 	test_pwr2 = 4;
151 	for (i = 0; i < 29; i++) {
152 		if (test_pwr2 > n)
153 			return test_pwr2;
154 		test_pwr2 = test_pwr2 << 1;
155 	}
156 
157 	QDF_ASSERT(0); /* n too large */
158 	return 0;
159 }
160 
161 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
162 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
163 
164 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
165 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
166 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
167 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
168 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
171 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
172 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
173 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
174 #ifdef QCA_WIFI_3_0_ADRASTEA
175 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
177 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
178 #endif
179 };
180 
181 #ifdef QCN7605_SUPPORT
182 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
183 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
184 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
185 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
186 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
187 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
188 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
189 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
190 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
191 };
192 #endif
193 
194 #ifdef WLAN_FEATURE_EPPING
195 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
196 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
197 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
198 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
199 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
201 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
202 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
203 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
204 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
205 };
206 #endif
207 
208 /* CE_PCI TABLE */
209 /*
210  * NOTE: the table below is out of date, though still a useful reference.
211  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
212  * mapping of HTC services to HIF pipes.
213  */
214 /*
215  * This authoritative table defines Copy Engine configuration and the mapping
216  * of services/endpoints to CEs.  A subset of this information is passed to
217  * the Target during startup as a prerequisite to entering BMI phase.
218  * See:
219  *    target_service_to_ce_map - Target-side mapping
220  *    hif_map_service_to_pipe      - Host-side mapping
221  *    target_ce_config         - Target-side configuration
222  *    host_ce_config           - Host-side configuration
223    ============================================================================
224    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
225  |                      |      | ctio | Size     | Frequency
226  |                      |      | n    |          |
227    ============================================================================
228    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
229    descriptor |                      |      |      | O(100B)  | and regular
230    download   |                      |      |      |          |
231    ----------------------------------------------------------------------------
232    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
233    indication |                      |      |      | O(10B)   | regular
234    upload     |                      |      |      |          |
235    ----------------------------------------------------------------------------
236    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
237    upload     |                      |      |      | O(1000B) | (frequent
238    e.g. noise |                      |      |      |          | during IP1.0
239    packets    |                      |      |      |          | testing)
240    ----------------------------------------------------------------------------
241    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
242    download   |                      |      |      | O(1000B) | (frequent
243    e.g.       |                      |      |      |          | during IP1.0
244    misdirecte |                      |      |      |          | testing)
245    d EAPOL    |                      |      |      |          |
246    packets    |                      |      |      |          |
247    ----------------------------------------------------------------------------
248    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
249  | DATA_VO (uplink)     |      |      |          |
250    ----------------------------------------------------------------------------
251    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
252  | DATA_VO (downlink)   |      |      |          |
253    ----------------------------------------------------------------------------
254    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
255  |                      |      |      | O(100B)  |
256    ----------------------------------------------------------------------------
257    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
258    messages   | (downlink)           |      |      | O(100B)  |
259  |                      |      |      |          |
260    ----------------------------------------------------------------------------
261    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
262  | HTC_RAW_STREAMS      |      |      |          |
263  | (uplink)             |      |      |          |
264    ----------------------------------------------------------------------------
265    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
266  | HTC_RAW_STREAMS      |      |      |          |
267  | (downlink)           |      |      |          |
268    ----------------------------------------------------------------------------
269    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
270  |                      |      |      |          | infrequent
271    ============================================================================
272  */
273 
274 /*
275  * Map from service/endpoint to Copy Engine.
276  * This table is derived from the CE_PCI TABLE, above.
277  * It is passed to the Target at startup for use by firmware.
278  */
279 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
280 	{
281 		WMI_DATA_VO_SVC,
282 		PIPEDIR_OUT,    /* out = UL = host -> target */
283 		3,
284 	},
285 	{
286 		WMI_DATA_VO_SVC,
287 		PIPEDIR_IN,     /* in = DL = target -> host */
288 		2,
289 	},
290 	{
291 		WMI_DATA_BK_SVC,
292 		PIPEDIR_OUT,    /* out = UL = host -> target */
293 		3,
294 	},
295 	{
296 		WMI_DATA_BK_SVC,
297 		PIPEDIR_IN,     /* in = DL = target -> host */
298 		2,
299 	},
300 	{
301 		WMI_DATA_BE_SVC,
302 		PIPEDIR_OUT,    /* out = UL = host -> target */
303 		3,
304 	},
305 	{
306 		WMI_DATA_BE_SVC,
307 		PIPEDIR_IN,     /* in = DL = target -> host */
308 		2,
309 	},
310 	{
311 		WMI_DATA_VI_SVC,
312 		PIPEDIR_OUT,    /* out = UL = host -> target */
313 		3,
314 	},
315 	{
316 		WMI_DATA_VI_SVC,
317 		PIPEDIR_IN,     /* in = DL = target -> host */
318 		2,
319 	},
320 	{
321 		WMI_CONTROL_SVC,
322 		PIPEDIR_OUT,    /* out = UL = host -> target */
323 		3,
324 	},
325 	{
326 		WMI_CONTROL_SVC,
327 		PIPEDIR_IN,     /* in = DL = target -> host */
328 		2,
329 	},
330 	{
331 		HTC_CTRL_RSVD_SVC,
332 		PIPEDIR_OUT,    /* out = UL = host -> target */
333 		0,              /* could be moved to 3 (share with WMI) */
334 	},
335 	{
336 		HTC_CTRL_RSVD_SVC,
337 		PIPEDIR_IN,     /* in = DL = target -> host */
338 		2,
339 	},
340 	{
341 		HTC_RAW_STREAMS_SVC, /* not currently used */
342 		PIPEDIR_OUT,    /* out = UL = host -> target */
343 		0,
344 	},
345 	{
346 		HTC_RAW_STREAMS_SVC, /* not currently used */
347 		PIPEDIR_IN,     /* in = DL = target -> host */
348 		2,
349 	},
350 	{
351 		HTT_DATA_MSG_SVC,
352 		PIPEDIR_OUT,    /* out = UL = host -> target */
353 		4,
354 	},
355 	{
356 		HTT_DATA_MSG_SVC,
357 		PIPEDIR_IN,     /* in = DL = target -> host */
358 		1,
359 	},
360 	{
361 		WDI_IPA_TX_SVC,
362 		PIPEDIR_OUT,    /* in = DL = target -> host */
363 		5,
364 	},
365 #if defined(QCA_WIFI_3_0_ADRASTEA)
366 	{
367 		HTT_DATA2_MSG_SVC,
368 		PIPEDIR_IN,    /* in = DL = target -> host */
369 		9,
370 	},
371 	{
372 		HTT_DATA3_MSG_SVC,
373 		PIPEDIR_IN,    /* in = DL = target -> host */
374 		10,
375 	},
376 	{
377 		PACKET_LOG_SVC,
378 		PIPEDIR_IN,    /* in = DL = target -> host */
379 		11,
380 	},
381 #endif
382 	/* (Additions here) */
383 
384 	{                       /* Must be last */
385 		0,
386 		0,
387 		0,
388 	},
389 };
390 
391 /* PIPEDIR_OUT = HOST to Target */
392 /* PIPEDIR_IN  = TARGET to HOST */
393 #if (defined(QCA_WIFI_QCA8074))
394 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
395 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
396 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
397 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
398 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
399 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
400 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
401 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
402 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
403 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
404 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
405 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
406 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
407 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
408 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
409 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
410 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
411 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
412 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
413 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
414 	/* (Additions here) */
415 	{ 0, 0, 0, },
416 };
417 #else
418 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
419 };
420 #endif
421 
422 #if (defined(QCA_WIFI_QCA8074V2))
423 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
424 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
425 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
426 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
427 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
428 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
429 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
430 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
431 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
432 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
433 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
434 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
435 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
436 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
437 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
438 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
439 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
440 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
441 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
442 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
443 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
444 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
445 	/* (Additions here) */
446 	{ 0, 0, 0, },
447 };
448 #else
449 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
450 };
451 #endif
452 
453 #if (defined(QCA_WIFI_QCA6018))
454 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
455 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
456 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
457 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
458 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
459 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
460 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
461 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
462 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
463 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
464 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
465 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
466 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
467 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
468 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
469 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
470 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
471 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
472 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
473 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
474 	/* (Additions here) */
475 	{ 0, 0, 0, },
476 };
477 #else
478 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
479 };
480 #endif
481 
482 #if (defined(QCA_WIFI_QCN9000))
483 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
484 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
485 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
486 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
487 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
488 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
489 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
490 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
491 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
492 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
493 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
494 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
495 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
496 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
497 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
498 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
499 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
500 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
501 	/* (Additions here) */
502 	{ 0, 0, 0, },
503 };
504 #else
505 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
506 };
507 #endif
508 
509 /* PIPEDIR_OUT = HOST to Target */
510 /* PIPEDIR_IN  = TARGET to HOST */
511 #ifdef QCN7605_SUPPORT
512 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
513 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
514 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
515 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
516 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
517 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
518 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
519 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
520 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
521 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
522 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
523 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
524 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
525 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
526 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
527 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
528 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
529 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
530 #ifdef IPA_OFFLOAD
531 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
532 #else
533 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
534 #endif
535 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
536 	/* (Additions here) */
537 	{ 0, 0, 0, },
538 };
539 #endif
540 
541 #if (defined(QCA_WIFI_QCA6290))
542 #ifdef QCA_6290_AP_MODE
543 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
544 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
545 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
546 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
547 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
548 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
549 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
550 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
551 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
552 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
553 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
554 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
555 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
556 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
557 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
558 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
559 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
560 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
561 	/* (Additions here) */
562 	{ 0, 0, 0, },
563 };
564 #else
565 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
566 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
567 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
568 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
569 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
570 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
571 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
572 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
573 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
574 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
575 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
576 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
577 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
578 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
579 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
580 	/* (Additions here) */
581 	{ 0, 0, 0, },
582 };
583 #endif
584 #else
585 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
586 };
587 #endif
588 
589 #if (defined(QCA_WIFI_QCA6390))
590 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
591 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
592 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
593 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
594 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
595 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
596 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
597 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
598 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
599 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
600 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
601 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
602 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
603 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
604 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
605 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
606 	/* (Additions here) */
607 	{ 0, 0, 0, },
608 };
609 #else
610 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
611 };
612 #endif
613 
614 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
615 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
616 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
617 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
618 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
619 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
620 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
621 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
622 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
623 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
624 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
625 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
626 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
627 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
628 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
629 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
630 	/* (Additions here) */
631 	{ 0, 0, 0, },
632 };
633 
634 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
635 	{
636 		WMI_DATA_VO_SVC,
637 		PIPEDIR_OUT,    /* out = UL = host -> target */
638 		3,
639 	},
640 	{
641 		WMI_DATA_VO_SVC,
642 		PIPEDIR_IN,     /* in = DL = target -> host */
643 		2,
644 	},
645 	{
646 		WMI_DATA_BK_SVC,
647 		PIPEDIR_OUT,    /* out = UL = host -> target */
648 		3,
649 	},
650 	{
651 		WMI_DATA_BK_SVC,
652 		PIPEDIR_IN,     /* in = DL = target -> host */
653 		2,
654 	},
655 	{
656 		WMI_DATA_BE_SVC,
657 		PIPEDIR_OUT,    /* out = UL = host -> target */
658 		3,
659 	},
660 	{
661 		WMI_DATA_BE_SVC,
662 		PIPEDIR_IN,     /* in = DL = target -> host */
663 		2,
664 	},
665 	{
666 		WMI_DATA_VI_SVC,
667 		PIPEDIR_OUT,    /* out = UL = host -> target */
668 		3,
669 	},
670 	{
671 		WMI_DATA_VI_SVC,
672 		PIPEDIR_IN,     /* in = DL = target -> host */
673 		2,
674 	},
675 	{
676 		WMI_CONTROL_SVC,
677 		PIPEDIR_OUT,    /* out = UL = host -> target */
678 		3,
679 	},
680 	{
681 		WMI_CONTROL_SVC,
682 		PIPEDIR_IN,     /* in = DL = target -> host */
683 		2,
684 	},
685 	{
686 		HTC_CTRL_RSVD_SVC,
687 		PIPEDIR_OUT,    /* out = UL = host -> target */
688 		0,              /* could be moved to 3 (share with WMI) */
689 	},
690 	{
691 		HTC_CTRL_RSVD_SVC,
692 		PIPEDIR_IN,     /* in = DL = target -> host */
693 		1,
694 	},
695 	{
696 		HTC_RAW_STREAMS_SVC, /* not currently used */
697 		PIPEDIR_OUT,    /* out = UL = host -> target */
698 		0,
699 	},
700 	{
701 		HTC_RAW_STREAMS_SVC, /* not currently used */
702 		PIPEDIR_IN,     /* in = DL = target -> host */
703 		1,
704 	},
705 	{
706 		HTT_DATA_MSG_SVC,
707 		PIPEDIR_OUT,    /* out = UL = host -> target */
708 		4,
709 	},
710 #ifdef WLAN_FEATURE_FASTPATH
711 	{
712 		HTT_DATA_MSG_SVC,
713 		PIPEDIR_IN,     /* in = DL = target -> host */
714 		5,
715 	},
716 #else /* WLAN_FEATURE_FASTPATH */
717 	{
718 		HTT_DATA_MSG_SVC,
719 		PIPEDIR_IN,  /* in = DL = target -> host */
720 		1,
721 	},
722 #endif /* WLAN_FEATURE_FASTPATH */
723 
724 	/* (Additions here) */
725 
726 	{                       /* Must be last */
727 		0,
728 		0,
729 		0,
730 	},
731 };
732 
733 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
734 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
735 
736 #ifdef WLAN_FEATURE_EPPING
737 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
738 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
739 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
740 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
741 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
742 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
743 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
744 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
745 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
746 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
747 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
748 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
749 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
750 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
751 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
752 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
753 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
754 	{0, 0, 0,},             /* Must be last */
755 };
756 
757 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
758 					   **tgt_svc_map_to_use,
759 					   uint32_t *sz_tgt_svc_map_to_use)
760 {
761 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
762 	*sz_tgt_svc_map_to_use =
763 			sizeof(target_service_to_ce_map_wlan_epping);
764 }
765 #endif
766 
767 #ifdef QCN7605_SUPPORT
768 static inline
769 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
770 			       uint32_t *sz_tgt_svc_map_to_use)
771 {
772 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
773 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
774 }
775 #else
776 static inline
777 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
778 			       uint32_t *sz_tgt_svc_map_to_use)
779 {
780 	HIF_ERROR("%s: QCN7605 not supported", __func__);
781 }
782 #endif
783 
784 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
785 				    struct service_to_pipe **tgt_svc_map_to_use,
786 				    uint32_t *sz_tgt_svc_map_to_use)
787 {
788 	uint32_t mode = hif_get_conparam(scn);
789 	struct hif_target_info *tgt_info = &scn->target_info;
790 
791 	if (QDF_IS_EPPING_ENABLED(mode)) {
792 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
793 						      sz_tgt_svc_map_to_use);
794 	} else {
795 		switch (tgt_info->target_type) {
796 		default:
797 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
798 			*sz_tgt_svc_map_to_use =
799 				sizeof(target_service_to_ce_map_wlan);
800 			break;
801 		case TARGET_TYPE_QCN7605:
802 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
803 						  sz_tgt_svc_map_to_use);
804 			break;
805 		case TARGET_TYPE_AR900B:
806 		case TARGET_TYPE_QCA9984:
807 		case TARGET_TYPE_IPQ4019:
808 		case TARGET_TYPE_QCA9888:
809 		case TARGET_TYPE_AR9888:
810 		case TARGET_TYPE_AR9888V2:
811 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
812 			*sz_tgt_svc_map_to_use =
813 				sizeof(target_service_to_ce_map_ar900b);
814 			break;
815 		case TARGET_TYPE_QCA6290:
816 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
817 			*sz_tgt_svc_map_to_use =
818 				sizeof(target_service_to_ce_map_qca6290);
819 			break;
820 		case TARGET_TYPE_QCA6390:
821 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
822 			*sz_tgt_svc_map_to_use =
823 				sizeof(target_service_to_ce_map_qca6390);
824 			break;
825 		case TARGET_TYPE_QCA6490:
826 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
827 			*sz_tgt_svc_map_to_use =
828 				sizeof(target_service_to_ce_map_qca6490);
829 			break;
830 		case TARGET_TYPE_QCA8074:
831 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
832 			*sz_tgt_svc_map_to_use =
833 				sizeof(target_service_to_ce_map_qca8074);
834 			break;
835 		case TARGET_TYPE_QCA8074V2:
836 			*tgt_svc_map_to_use =
837 				target_service_to_ce_map_qca8074_v2;
838 			*sz_tgt_svc_map_to_use =
839 				sizeof(target_service_to_ce_map_qca8074_v2);
840 			break;
841 		case TARGET_TYPE_QCA6018:
842 			*tgt_svc_map_to_use =
843 				target_service_to_ce_map_qca6018;
844 			*sz_tgt_svc_map_to_use =
845 				sizeof(target_service_to_ce_map_qca6018);
846 			break;
847 		case TARGET_TYPE_QCN9000:
848 			*tgt_svc_map_to_use =
849 				target_service_to_ce_map_qcn9000;
850 			*sz_tgt_svc_map_to_use =
851 				sizeof(target_service_to_ce_map_qcn9000);
852 			break;
853 		}
854 	}
855 }
856 
857 /**
858  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
859  * @ce_state : pointer to the state context of the CE
860  *
861  * Description:
862  *   Sets htt_rx_data attribute of the state structure if the
863  *   CE serves one of the HTT DATA services.
864  *
865  * Return:
866  *  false (attribute set to false)
867  *  true  (attribute set to true);
868  */
869 static bool ce_mark_datapath(struct CE_state *ce_state)
870 {
871 	struct service_to_pipe *svc_map;
872 	uint32_t map_sz, map_len;
873 	int    i;
874 	bool   rc = false;
875 
876 	if (ce_state) {
877 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
878 					       &map_sz);
879 
880 		map_len = map_sz / sizeof(struct service_to_pipe);
881 		for (i = 0; i < map_len; i++) {
882 			if ((svc_map[i].pipenum == ce_state->id) &&
883 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
884 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
885 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
886 				/* HTT CEs are unidirectional */
887 				if (svc_map[i].pipedir == PIPEDIR_IN)
888 					ce_state->htt_rx_data = true;
889 				else
890 					ce_state->htt_tx_data = true;
891 				rc = true;
892 			}
893 		}
894 	}
895 	return rc;
896 }
897 
898 /**
899  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
900  * @ce_id: ce in question
901  * @ring: ring state being examined
902  * @type: "src_ring" or "dest_ring" string for identifying the ring
903  *
904  * Warns on non-zero index values.
905  * Causes a kernel panic if the ring is not empty durring initialization.
906  */
907 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
908 					 char *type)
909 {
910 	if (ring->write_index != 0 || ring->sw_index != 0)
911 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
912 			  ce_id, type, ring->sw_index, ring->write_index);
913 	if (ring->write_index != ring->sw_index)
914 		QDF_BUG(0);
915 }
916 
917 #ifdef IPA_OFFLOAD
918 /**
919  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
920  * @scn: softc instance
921  * @ce_id: ce in question
922  * @base_addr: pointer to copyengine ring base address
923  * @ce_ring: copyengine instance
924  * @nentries: number of entries should be allocated
925  * @desc_size: ce desc size
926  *
927  * Return: QDF_STATUS_SUCCESS - for success
928  */
929 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
930 				     qdf_dma_addr_t *base_addr,
931 				     struct CE_ring_state *ce_ring,
932 				     unsigned int nentries, uint32_t desc_size)
933 {
934 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
935 	    !ce_srng_based(scn)) {
936 		if (!scn->ipa_ce_ring) {
937 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
938 				scn->qdf_dev,
939 				nentries * desc_size + CE_DESC_RING_ALIGN);
940 			if (!scn->ipa_ce_ring) {
941 				HIF_ERROR(
942 				"%s: Failed to allocate memory for IPA ce ring",
943 				__func__);
944 				return QDF_STATUS_E_NOMEM;
945 			}
946 		}
947 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
948 						&scn->ipa_ce_ring->mem_info);
949 		ce_ring->base_addr_owner_space_unaligned =
950 						scn->ipa_ce_ring->vaddr;
951 	} else {
952 		ce_ring->base_addr_owner_space_unaligned =
953 			qdf_mem_alloc_consistent(scn->qdf_dev,
954 						 scn->qdf_dev->dev,
955 						 (nentries * desc_size +
956 						 CE_DESC_RING_ALIGN),
957 						 base_addr);
958 		if (!ce_ring->base_addr_owner_space_unaligned) {
959 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
960 				  __func__, CE_id);
961 			return QDF_STATUS_E_NOMEM;
962 		}
963 	}
964 	return QDF_STATUS_SUCCESS;
965 }
966 
967 /**
968  * ce_free_desc_ring() - Frees copyengine descriptor ring
969  * @scn: softc instance
970  * @ce_id: ce in question
971  * @ce_ring: copyengine instance
972  * @desc_size: ce desc size
973  *
974  * Return: None
975  */
976 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
977 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
978 {
979 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
980 	    !ce_srng_based(scn)) {
981 		if (scn->ipa_ce_ring) {
982 			qdf_mem_shared_mem_free(scn->qdf_dev,
983 						scn->ipa_ce_ring);
984 			scn->ipa_ce_ring = NULL;
985 		}
986 		ce_ring->base_addr_owner_space_unaligned = NULL;
987 	} else {
988 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
989 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
990 			ce_ring->base_addr_owner_space_unaligned,
991 			ce_ring->base_addr_CE_space, 0);
992 		ce_ring->base_addr_owner_space_unaligned = NULL;
993 	}
994 }
995 #else
996 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
997 				     qdf_dma_addr_t *base_addr,
998 				     struct CE_ring_state *ce_ring,
999 				     unsigned int nentries, uint32_t desc_size)
1000 {
1001 	ce_ring->base_addr_owner_space_unaligned =
1002 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1003 					 (nentries * desc_size +
1004 					 CE_DESC_RING_ALIGN), base_addr);
1005 	if (!ce_ring->base_addr_owner_space_unaligned) {
1006 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1007 			  __func__, CE_id);
1008 		return QDF_STATUS_E_NOMEM;
1009 	}
1010 	return QDF_STATUS_SUCCESS;
1011 }
1012 
1013 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1014 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1015 {
1016 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1017 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1018 		ce_ring->base_addr_owner_space_unaligned,
1019 		ce_ring->base_addr_CE_space, 0);
1020 	ce_ring->base_addr_owner_space_unaligned = NULL;
1021 }
1022 #endif /* IPA_OFFLOAD */
1023 
1024 /*
1025  * TODO: Need to explore the possibility of having this as part of a
1026  * target context instead of a global array.
1027  */
1028 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1029 
1030 void ce_service_register_module(enum ce_target_type target_type,
1031 				struct ce_ops* (*ce_attach)(void))
1032 {
1033 	if (target_type < CE_MAX_TARGET_TYPE)
1034 		ce_attach_register[target_type] = ce_attach;
1035 }
1036 
1037 qdf_export_symbol(ce_service_register_module);
1038 
1039 /**
1040  * ce_srng_based() - Does this target use srng
1041  * @ce_state : pointer to the state context of the CE
1042  *
1043  * Description:
1044  *   returns true if the target is SRNG based
1045  *
1046  * Return:
1047  *  false (attribute set to false)
1048  *  true  (attribute set to true);
1049  */
1050 bool ce_srng_based(struct hif_softc *scn)
1051 {
1052 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1053 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1054 
1055 	switch (tgt_info->target_type) {
1056 	case TARGET_TYPE_QCA8074:
1057 	case TARGET_TYPE_QCA8074V2:
1058 	case TARGET_TYPE_QCA6290:
1059 	case TARGET_TYPE_QCA6390:
1060 	case TARGET_TYPE_QCA6490:
1061 	case TARGET_TYPE_QCA6018:
1062 	case TARGET_TYPE_QCN9000:
1063 		return true;
1064 	default:
1065 		return false;
1066 	}
1067 	return false;
1068 }
1069 qdf_export_symbol(ce_srng_based);
1070 
1071 #ifdef QCA_WIFI_SUPPORT_SRNG
1072 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1073 {
1074 	struct ce_ops *ops = NULL;
1075 
1076 	if (ce_srng_based(scn)) {
1077 		if (ce_attach_register[CE_SVC_SRNG])
1078 			ops = ce_attach_register[CE_SVC_SRNG]();
1079 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1080 		ops = ce_attach_register[CE_SVC_LEGACY]();
1081 	}
1082 
1083 	return ops;
1084 }
1085 
1086 
1087 #else	/* QCA_LITHIUM */
1088 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1089 {
1090 	if (ce_attach_register[CE_SVC_LEGACY])
1091 		return ce_attach_register[CE_SVC_LEGACY]();
1092 
1093 	return NULL;
1094 }
1095 #endif /* QCA_LITHIUM */
1096 
1097 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1098 		struct pld_shadow_reg_v2_cfg **shadow_config,
1099 		int *num_shadow_registers_configured) {
1100 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1101 
1102 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1103 			scn, shadow_config, num_shadow_registers_configured);
1104 }
1105 
1106 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1107 						uint8_t ring_type)
1108 {
1109 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1110 
1111 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1112 }
1113 
1114 
1115 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1116 		uint8_t ring_type, uint32_t nentries)
1117 {
1118 	uint32_t ce_nbytes;
1119 	char *ptr;
1120 	qdf_dma_addr_t base_addr;
1121 	struct CE_ring_state *ce_ring;
1122 	uint32_t desc_size;
1123 	struct hif_softc *scn = CE_state->scn;
1124 
1125 	ce_nbytes = sizeof(struct CE_ring_state)
1126 		+ (nentries * sizeof(void *));
1127 	ptr = qdf_mem_malloc(ce_nbytes);
1128 	if (!ptr)
1129 		return NULL;
1130 
1131 	ce_ring = (struct CE_ring_state *)ptr;
1132 	ptr += sizeof(struct CE_ring_state);
1133 	ce_ring->nentries = nentries;
1134 	ce_ring->nentries_mask = nentries - 1;
1135 
1136 	ce_ring->low_water_mark_nentries = 0;
1137 	ce_ring->high_water_mark_nentries = nentries;
1138 	ce_ring->per_transfer_context = (void **)ptr;
1139 
1140 	desc_size = ce_get_desc_size(scn, ring_type);
1141 
1142 	/* Legacy platforms that do not support cache
1143 	 * coherent DMA are unsupported
1144 	 */
1145 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1146 			       ce_ring, nentries,
1147 			       desc_size) !=
1148 	    QDF_STATUS_SUCCESS) {
1149 		HIF_ERROR("%s: ring has no DMA mem",
1150 				__func__);
1151 		qdf_mem_free(ce_ring);
1152 		return NULL;
1153 	}
1154 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1155 
1156 	/* Correctly initialize memory to 0 to
1157 	 * prevent garbage data crashing system
1158 	 * when download firmware
1159 	 */
1160 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1161 			nentries * desc_size +
1162 			CE_DESC_RING_ALIGN);
1163 
1164 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1165 
1166 		ce_ring->base_addr_CE_space =
1167 			(ce_ring->base_addr_CE_space_unaligned +
1168 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1169 
1170 		ce_ring->base_addr_owner_space = (void *)
1171 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1172 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1173 	} else {
1174 		ce_ring->base_addr_CE_space =
1175 				ce_ring->base_addr_CE_space_unaligned;
1176 		ce_ring->base_addr_owner_space =
1177 				ce_ring->base_addr_owner_space_unaligned;
1178 	}
1179 
1180 	return ce_ring;
1181 }
1182 
1183 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1184 			uint32_t ce_id, struct CE_ring_state *ring,
1185 			struct CE_attr *attr)
1186 {
1187 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1188 
1189 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1190 					      ring, attr);
1191 }
1192 
1193 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1194 {
1195 	uint8_t ul_pipe, dl_pipe;
1196 	int ce_id, status, ul_is_polled, dl_is_polled;
1197 	struct CE_state *ce_state;
1198 
1199 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1200 					 &ul_pipe, &dl_pipe,
1201 					 &ul_is_polled, &dl_is_polled);
1202 	if (status) {
1203 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1204 		return status;
1205 	}
1206 
1207 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1208 		if (ce_id == ul_pipe)
1209 			continue;
1210 		if (ce_id == dl_pipe)
1211 			continue;
1212 
1213 		ce_state = scn->ce_id_to_state[ce_id];
1214 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1215 		if (ce_state->state == CE_RUNNING)
1216 			ce_state->state = CE_PAUSED;
1217 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1218 	}
1219 
1220 	return status;
1221 }
1222 
1223 int hif_ce_bus_late_resume(struct hif_softc *scn)
1224 {
1225 	int ce_id;
1226 	struct CE_state *ce_state;
1227 	int write_index = 0;
1228 	bool index_updated;
1229 
1230 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1231 		ce_state = scn->ce_id_to_state[ce_id];
1232 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1233 		if (ce_state->state == CE_PENDING) {
1234 			write_index = ce_state->src_ring->write_index;
1235 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1236 					write_index);
1237 			ce_state->state = CE_RUNNING;
1238 			index_updated = true;
1239 		} else {
1240 			index_updated = false;
1241 		}
1242 
1243 		if (ce_state->state == CE_PAUSED)
1244 			ce_state->state = CE_RUNNING;
1245 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1246 
1247 		if (index_updated)
1248 			hif_record_ce_desc_event(scn, ce_id,
1249 				RESUME_WRITE_INDEX_UPDATE,
1250 				NULL, NULL, write_index, 0);
1251 	}
1252 
1253 	return 0;
1254 }
1255 
1256 /**
1257  * ce_oom_recovery() - try to recover rx ce from oom condition
1258  * @context: CE_state of the CE with oom rx ring
1259  *
1260  * the executing work Will continue to be rescheduled until
1261  * at least 1 descriptor is successfully posted to the rx ring.
1262  *
1263  * return: none
1264  */
1265 static void ce_oom_recovery(void *context)
1266 {
1267 	struct CE_state *ce_state = context;
1268 	struct hif_softc *scn = ce_state->scn;
1269 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1270 	struct HIF_CE_pipe_info *pipe_info =
1271 		&ce_softc->pipe_info[ce_state->id];
1272 
1273 	hif_post_recv_buffers_for_pipe(pipe_info);
1274 }
1275 
1276 #ifdef HIF_CE_DEBUG_DATA_BUF
1277 /**
1278  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1279  * the CE descriptors.
1280  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1281  * @scn: hif scn handle
1282  * ce_id: Copy Engine Id
1283  *
1284  * Return: QDF_STATUS
1285  */
1286 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1287 {
1288 	struct hif_ce_desc_event *event = NULL;
1289 	struct hif_ce_desc_event *hist_ev = NULL;
1290 	uint32_t index = 0;
1291 
1292 	hist_ev =
1293 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1294 
1295 	if (!hist_ev)
1296 		return QDF_STATUS_E_NOMEM;
1297 
1298 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1299 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1300 		event = &hist_ev[index];
1301 		event->data =
1302 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1303 		if (!event->data) {
1304 			hif_err_rl("ce debug data alloc failed");
1305 			return QDF_STATUS_E_NOMEM;
1306 		}
1307 	}
1308 	return QDF_STATUS_SUCCESS;
1309 }
1310 
1311 /**
1312  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1313  * the CE descriptors.
1314  * @scn: hif scn handle
1315  * ce_id: Copy Engine Id
1316  *
1317  * Return:
1318  */
1319 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1320 {
1321 	struct hif_ce_desc_event *event = NULL;
1322 	struct hif_ce_desc_event *hist_ev = NULL;
1323 	uint32_t index = 0;
1324 
1325 	hist_ev =
1326 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1327 
1328 	if (!hist_ev)
1329 		return;
1330 
1331 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1332 		event = &hist_ev[index];
1333 		if (event->data)
1334 			qdf_mem_free(event->data);
1335 		event->data = NULL;
1336 		event = NULL;
1337 	}
1338 
1339 }
1340 #endif /* HIF_CE_DEBUG_DATA_BUF */
1341 
1342 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
1343 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1344 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1345 
1346 /**
1347  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1348  * @scn: hif scn handle
1349  * @ce_id: Copy Engine Id
1350  * @src_nentries: source ce ring entries
1351  * Return: QDF_STATUS
1352  */
1353 static QDF_STATUS
1354 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1355 			   uint32_t src_nentries)
1356 {
1357 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1358 
1359 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1360 	ce_hist->enable[ce_id] = 1;
1361 
1362 	if (src_nentries)
1363 		alloc_mem_ce_debug_hist_data(scn, ce_id);
1364 	else
1365 		ce_hist->data_enable[ce_id] = false;
1366 
1367 	return QDF_STATUS_SUCCESS;
1368 }
1369 
1370 /**
1371  * free_mem_ce_debug_history() - Free CE descriptor history
1372  * @scn: hif scn handle
1373  * @ce_id: Copy Engine Id
1374  *
1375  * Return: None
1376  */
1377 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1378 {
1379 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1380 
1381 	ce_hist->enable[ce_id] = 0;
1382 	if (ce_hist->data_enable[ce_id]) {
1383 		ce_hist->data_enable[ce_id] = false;
1384 		free_mem_ce_debug_hist_data(scn, ce_id);
1385 	}
1386 	ce_hist->hist_ev[ce_id] = NULL;
1387 }
1388 #else
1389 static inline QDF_STATUS
1390 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1391 			   uint32_t src_nentries)
1392 {
1393 	return QDF_STATUS_SUCCESS;
1394 }
1395 
1396 static inline void
1397 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1398 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
1399 #else
1400 #if defined(HIF_CE_DEBUG_DATA_BUF)
1401 
1402 static QDF_STATUS
1403 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1404 			   uint32_t src_nentries)
1405 {
1406 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1407 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1408 
1409 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
1410 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1411 		return QDF_STATUS_E_NOMEM;
1412 	} else {
1413 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1414 		return QDF_STATUS_SUCCESS;
1415 	}
1416 }
1417 
1418 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1419 {
1420 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1421 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1422 
1423 	if (!hist_ev)
1424 		return;
1425 
1426 	if (ce_hist->data_enable[CE_id]) {
1427 		ce_hist->data_enable[CE_id] = false;
1428 		free_mem_ce_debug_hist_data(scn, CE_id);
1429 	}
1430 
1431 	ce_hist->enable[CE_id] = 0;
1432 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1433 	ce_hist->hist_ev[CE_id] = NULL;
1434 }
1435 
1436 #else
1437 
1438 static inline QDF_STATUS
1439 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1440 			   uint32_t src_nentries)
1441 {
1442 	return QDF_STATUS_SUCCESS;
1443 }
1444 
1445 static inline void
1446 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1447 #endif /* HIF_CE_DEBUG_DATA_BUF */
1448 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
1449 
1450 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1451 /**
1452  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1453  * CE records on the console using sysfs.
1454  * @scn: hif scn handle
1455  *
1456  * Return:
1457  */
1458 static inline void reset_ce_debug_history(struct hif_softc *scn)
1459 {
1460 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1461 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1462 	 * index. Disable data storing
1463 	 */
1464 	ce_hist->hist_index = 0;
1465 	ce_hist->hist_id = 0;
1466 }
1467 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1468 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
1469 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1470 
1471 void ce_enable_polling(void *cestate)
1472 {
1473 	struct CE_state *CE_state = (struct CE_state *)cestate;
1474 
1475 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1476 		CE_state->timer_inited = true;
1477 }
1478 
1479 void ce_disable_polling(void *cestate)
1480 {
1481 	struct CE_state *CE_state = (struct CE_state *)cestate;
1482 
1483 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1484 		CE_state->timer_inited = false;
1485 }
1486 
1487 /*
1488  * Initialize a Copy Engine based on caller-supplied attributes.
1489  * This may be called once to initialize both source and destination
1490  * rings or it may be called twice for separate source and destination
1491  * initialization. It may be that only one side or the other is
1492  * initialized by software/firmware.
1493  *
1494  * This should be called durring the initialization sequence before
1495  * interupts are enabled, so we don't have to worry about thread safety.
1496  */
1497 struct CE_handle *ce_init(struct hif_softc *scn,
1498 			  unsigned int CE_id, struct CE_attr *attr)
1499 {
1500 	struct CE_state *CE_state;
1501 	uint32_t ctrl_addr;
1502 	unsigned int nentries;
1503 	bool malloc_CE_state = false;
1504 	bool malloc_src_ring = false;
1505 	int status;
1506 
1507 	QDF_ASSERT(CE_id < scn->ce_count);
1508 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1509 	CE_state = scn->ce_id_to_state[CE_id];
1510 
1511 	if (!CE_state) {
1512 		CE_state =
1513 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1514 		if (!CE_state)
1515 			return NULL;
1516 
1517 		malloc_CE_state = true;
1518 		qdf_spinlock_create(&CE_state->ce_index_lock);
1519 
1520 		CE_state->id = CE_id;
1521 		CE_state->ctrl_addr = ctrl_addr;
1522 		CE_state->state = CE_RUNNING;
1523 		CE_state->attr_flags = attr->flags;
1524 	}
1525 	CE_state->scn = scn;
1526 	CE_state->service = ce_engine_service_reg;
1527 
1528 	qdf_atomic_init(&CE_state->rx_pending);
1529 	if (!attr) {
1530 		/* Already initialized; caller wants the handle */
1531 		return (struct CE_handle *)CE_state;
1532 	}
1533 
1534 	if (CE_state->src_sz_max)
1535 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1536 	else
1537 		CE_state->src_sz_max = attr->src_sz_max;
1538 
1539 	ce_init_ce_desc_event_log(scn, CE_id,
1540 				  attr->src_nentries + attr->dest_nentries);
1541 
1542 	/* source ring setup */
1543 	nentries = attr->src_nentries;
1544 	if (nentries) {
1545 		struct CE_ring_state *src_ring;
1546 
1547 		nentries = roundup_pwr2(nentries);
1548 		if (CE_state->src_ring) {
1549 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1550 		} else {
1551 			src_ring = CE_state->src_ring =
1552 				ce_alloc_ring_state(CE_state,
1553 						CE_RING_SRC,
1554 						nentries);
1555 			if (!src_ring) {
1556 				/* cannot allocate src ring. If the
1557 				 * CE_state is allocated locally free
1558 				 * CE_State and return error.
1559 				 */
1560 				HIF_ERROR("%s: src ring has no mem", __func__);
1561 				if (malloc_CE_state) {
1562 					/* allocated CE_state locally */
1563 					qdf_mem_free(CE_state);
1564 					malloc_CE_state = false;
1565 				}
1566 				return NULL;
1567 			}
1568 			/* we can allocate src ring. Mark that the src ring is
1569 			 * allocated locally
1570 			 */
1571 			malloc_src_ring = true;
1572 
1573 			/*
1574 			 * Also allocate a shadow src ring in
1575 			 * regular mem to use for faster access.
1576 			 */
1577 			src_ring->shadow_base_unaligned =
1578 				qdf_mem_malloc(nentries *
1579 					       sizeof(struct CE_src_desc) +
1580 					       CE_DESC_RING_ALIGN);
1581 			if (!src_ring->shadow_base_unaligned)
1582 				goto error_no_dma_mem;
1583 
1584 			src_ring->shadow_base = (struct CE_src_desc *)
1585 				(((size_t) src_ring->shadow_base_unaligned +
1586 				CE_DESC_RING_ALIGN - 1) &
1587 				 ~(CE_DESC_RING_ALIGN - 1));
1588 
1589 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1590 					       src_ring, attr);
1591 			if (status < 0)
1592 				goto error_target_access;
1593 
1594 			ce_ring_test_initial_indexes(CE_id, src_ring,
1595 						     "src_ring");
1596 		}
1597 	}
1598 
1599 	/* destination ring setup */
1600 	nentries = attr->dest_nentries;
1601 	if (nentries) {
1602 		struct CE_ring_state *dest_ring;
1603 
1604 		nentries = roundup_pwr2(nentries);
1605 		if (CE_state->dest_ring) {
1606 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1607 		} else {
1608 			dest_ring = CE_state->dest_ring =
1609 				ce_alloc_ring_state(CE_state,
1610 						CE_RING_DEST,
1611 						nentries);
1612 			if (!dest_ring) {
1613 				/* cannot allocate dst ring. If the CE_state
1614 				 * or src ring is allocated locally free
1615 				 * CE_State and src ring and return error.
1616 				 */
1617 				HIF_ERROR("%s: dest ring has no mem",
1618 					  __func__);
1619 				goto error_no_dma_mem;
1620 			}
1621 
1622 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1623 				      dest_ring, attr);
1624 			if (status < 0)
1625 				goto error_target_access;
1626 
1627 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1628 						     "dest_ring");
1629 
1630 			/* For srng based target, init status ring here */
1631 			if (ce_srng_based(CE_state->scn)) {
1632 				CE_state->status_ring =
1633 					ce_alloc_ring_state(CE_state,
1634 							CE_RING_STATUS,
1635 							nentries);
1636 				if (!CE_state->status_ring) {
1637 					/*Allocation failed. Cleanup*/
1638 					qdf_mem_free(CE_state->dest_ring);
1639 					if (malloc_src_ring) {
1640 						qdf_mem_free
1641 							(CE_state->src_ring);
1642 						CE_state->src_ring = NULL;
1643 						malloc_src_ring = false;
1644 					}
1645 					if (malloc_CE_state) {
1646 						/* allocated CE_state locally */
1647 						scn->ce_id_to_state[CE_id] =
1648 							NULL;
1649 						qdf_mem_free(CE_state);
1650 						malloc_CE_state = false;
1651 					}
1652 
1653 					return NULL;
1654 				}
1655 
1656 				status = ce_ring_setup(scn, CE_RING_STATUS,
1657 					       CE_id, CE_state->status_ring,
1658 					       attr);
1659 				if (status < 0)
1660 					goto error_target_access;
1661 
1662 			}
1663 
1664 			/* epping */
1665 			/* poll timer */
1666 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1667 				qdf_timer_init(scn->qdf_dev,
1668 						&CE_state->poll_timer,
1669 						ce_poll_timeout,
1670 						CE_state,
1671 						QDF_TIMER_TYPE_WAKE_APPS);
1672 				ce_enable_polling(CE_state);
1673 				qdf_timer_mod(&CE_state->poll_timer,
1674 						      CE_POLL_TIMEOUT);
1675 			}
1676 		}
1677 	}
1678 
1679 	if (!ce_srng_based(scn)) {
1680 		/* Enable CE error interrupts */
1681 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1682 			goto error_target_access;
1683 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1684 		if (Q_TARGET_ACCESS_END(scn) < 0)
1685 			goto error_target_access;
1686 	}
1687 
1688 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1689 			ce_oom_recovery, CE_state);
1690 
1691 	/* update the htt_data attribute */
1692 	ce_mark_datapath(CE_state);
1693 	scn->ce_id_to_state[CE_id] = CE_state;
1694 
1695 	alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
1696 
1697 	return (struct CE_handle *)CE_state;
1698 
1699 error_target_access:
1700 error_no_dma_mem:
1701 	ce_fini((struct CE_handle *)CE_state);
1702 	return NULL;
1703 }
1704 
1705 /**
1706  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1707  * @hif_ctx: HIF Context
1708  *
1709  * API to check if polling is enabled on all CEs. Returns true when polling
1710  * is enabled on all CEs.
1711  *
1712  * Return: bool
1713  */
1714 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1715 {
1716 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1717 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1718 	struct CE_attr *attr;
1719 	int id;
1720 
1721 	for (id = 0; id < scn->ce_count; id++) {
1722 		attr = &hif_state->host_ce_config[id];
1723 		if (attr && (attr->dest_nentries) &&
1724 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1725 			return false;
1726 	}
1727 	return true;
1728 }
1729 qdf_export_symbol(hif_is_polled_mode_enabled);
1730 
1731 #ifdef WLAN_FEATURE_FASTPATH
1732 /**
1733  * hif_enable_fastpath() Update that we have enabled fastpath mode
1734  * @hif_ctx: HIF context
1735  *
1736  * For use in data path
1737  *
1738  * Retrun: void
1739  */
1740 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1741 {
1742 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1743 
1744 	if (ce_srng_based(scn)) {
1745 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1746 		return;
1747 	}
1748 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1749 	scn->fastpath_mode_on = true;
1750 }
1751 
1752 /**
1753  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1754  * @hif_ctx: HIF Context
1755  *
1756  * For use in data path to skip HTC
1757  *
1758  * Return: bool
1759  */
1760 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1761 {
1762 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1763 
1764 	return scn->fastpath_mode_on;
1765 }
1766 
1767 /**
1768  * hif_get_ce_handle - API to get CE handle for FastPath mode
1769  * @hif_ctx: HIF Context
1770  * @id: CopyEngine Id
1771  *
1772  * API to return CE handle for fastpath mode
1773  *
1774  * Return: void
1775  */
1776 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1777 {
1778 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1779 
1780 	return scn->ce_id_to_state[id];
1781 }
1782 qdf_export_symbol(hif_get_ce_handle);
1783 
1784 /**
1785  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1786  * No processing is required inside this function.
1787  * @ce_hdl: Cope engine handle
1788  * Using an assert, this function makes sure that,
1789  * the TX CE has been processed completely.
1790  *
1791  * This is called while dismantling CE structures. No other thread
1792  * should be using these structures while dismantling is occurring
1793  * therfore no locking is needed.
1794  *
1795  * Return: none
1796  */
1797 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1798 {
1799 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1800 	struct CE_ring_state *src_ring = ce_state->src_ring;
1801 	struct hif_softc *sc = ce_state->scn;
1802 	uint32_t sw_index, write_index;
1803 
1804 	if (hif_is_nss_wifi_enabled(sc))
1805 		return;
1806 
1807 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1808 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1809 			 __func__, __LINE__);
1810 		sw_index = src_ring->sw_index;
1811 		write_index = src_ring->sw_index;
1812 
1813 		/* At this point Tx CE should be clean */
1814 		qdf_assert_always(sw_index == write_index);
1815 	}
1816 }
1817 
1818 /**
1819  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1820  * @ce_hdl: Handle to CE
1821  *
1822  * These buffers are never allocated on the fly, but
1823  * are allocated only once during HIF start and freed
1824  * only once during HIF stop.
1825  * NOTE:
1826  * The assumption here is there is no in-flight DMA in progress
1827  * currently, so that buffers can be freed up safely.
1828  *
1829  * Return: NONE
1830  */
1831 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1832 {
1833 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1834 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1835 	qdf_nbuf_t nbuf;
1836 	int i;
1837 
1838 	if (ce_state->scn->fastpath_mode_on == false)
1839 		return;
1840 
1841 	if (!ce_state->htt_rx_data)
1842 		return;
1843 
1844 	/*
1845 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1846 	 * this CE is completely full: does not leave one blank space, to
1847 	 * distinguish between empty queue & full queue. So free all the
1848 	 * entries.
1849 	 */
1850 	for (i = 0; i < dst_ring->nentries; i++) {
1851 		nbuf = dst_ring->per_transfer_context[i];
1852 
1853 		/*
1854 		 * The reasons for doing this check are:
1855 		 * 1) Protect against calling cleanup before allocating buffers
1856 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1857 		 *    could have a partially filled ring, because of a memory
1858 		 *    allocation failure in the middle of allocating ring.
1859 		 *    This check accounts for that case, checking
1860 		 *    fastpath_mode_on flag or started flag would not have
1861 		 *    covered that case. This is not in performance path,
1862 		 *    so OK to do this.
1863 		 */
1864 		if (nbuf) {
1865 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1866 					      QDF_DMA_FROM_DEVICE);
1867 			qdf_nbuf_free(nbuf);
1868 		}
1869 	}
1870 }
1871 
1872 /**
1873  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1874  * @scn: HIF handle
1875  *
1876  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1877  * Hence we have to post all the entries in the pipe, even, in the beginning
1878  * unlike for other CE pipes where one less than dest_nentries are filled in
1879  * the beginning.
1880  *
1881  * Return: None
1882  */
1883 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1884 {
1885 	int pipe_num;
1886 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1887 
1888 	if (scn->fastpath_mode_on == false)
1889 		return;
1890 
1891 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1892 		struct HIF_CE_pipe_info *pipe_info =
1893 			&hif_state->pipe_info[pipe_num];
1894 		struct CE_state *ce_state =
1895 			scn->ce_id_to_state[pipe_info->pipe_num];
1896 
1897 		if (ce_state->htt_rx_data)
1898 			atomic_inc(&pipe_info->recv_bufs_needed);
1899 	}
1900 }
1901 #else
1902 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1903 {
1904 }
1905 
1906 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1907 {
1908 	return false;
1909 }
1910 #endif /* WLAN_FEATURE_FASTPATH */
1911 
1912 void ce_fini(struct CE_handle *copyeng)
1913 {
1914 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1915 	unsigned int CE_id = CE_state->id;
1916 	struct hif_softc *scn = CE_state->scn;
1917 	uint32_t desc_size;
1918 
1919 	bool inited = CE_state->timer_inited;
1920 	CE_state->state = CE_UNUSED;
1921 	scn->ce_id_to_state[CE_id] = NULL;
1922 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1923 	ce_disable_polling(CE_state);
1924 
1925 	qdf_lro_deinit(CE_state->lro_data);
1926 
1927 	if (CE_state->src_ring) {
1928 		/* Cleanup the datapath Tx ring */
1929 		ce_h2t_tx_ce_cleanup(copyeng);
1930 
1931 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1932 		if (CE_state->src_ring->shadow_base_unaligned)
1933 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1934 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1935 			ce_free_desc_ring(scn, CE_state->id,
1936 					  CE_state->src_ring,
1937 					  desc_size);
1938 		qdf_mem_free(CE_state->src_ring);
1939 	}
1940 	if (CE_state->dest_ring) {
1941 		/* Cleanup the datapath Rx ring */
1942 		ce_t2h_msg_ce_cleanup(copyeng);
1943 
1944 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1945 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1946 			ce_free_desc_ring(scn, CE_state->id,
1947 					  CE_state->dest_ring,
1948 					  desc_size);
1949 		qdf_mem_free(CE_state->dest_ring);
1950 
1951 		/* epping */
1952 		if (inited) {
1953 			qdf_timer_free(&CE_state->poll_timer);
1954 		}
1955 	}
1956 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1957 		/* Cleanup the datapath Tx ring */
1958 		ce_h2t_tx_ce_cleanup(copyeng);
1959 
1960 		if (CE_state->status_ring->shadow_base_unaligned)
1961 			qdf_mem_free(
1962 				CE_state->status_ring->shadow_base_unaligned);
1963 
1964 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1965 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1966 			ce_free_desc_ring(scn, CE_state->id,
1967 					  CE_state->status_ring,
1968 					  desc_size);
1969 		qdf_mem_free(CE_state->status_ring);
1970 	}
1971 
1972 	free_mem_ce_debug_history(scn, CE_id);
1973 	reset_ce_debug_history(scn);
1974 	ce_deinit_ce_desc_event_log(scn, CE_id);
1975 
1976 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1977 	qdf_mem_free(CE_state);
1978 }
1979 
1980 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1981 {
1982 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1983 
1984 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1985 		  sizeof(hif_state->msg_callbacks_pending));
1986 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1987 		  sizeof(hif_state->msg_callbacks_current));
1988 }
1989 
1990 /* Send the first nbytes bytes of the buffer */
1991 QDF_STATUS
1992 hif_send_head(struct hif_opaque_softc *hif_ctx,
1993 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1994 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1995 {
1996 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1997 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1998 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1999 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2000 	int bytes = nbytes, nfrags = 0;
2001 	struct ce_sendlist sendlist;
2002 	int status, i = 0;
2003 	unsigned int mux_id = 0;
2004 
2005 	if (nbytes > qdf_nbuf_len(nbuf)) {
2006 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
2007 			  (uint32_t)qdf_nbuf_len(nbuf));
2008 		QDF_ASSERT(0);
2009 	}
2010 
2011 	transfer_id =
2012 		(mux_id & MUX_ID_MASK) |
2013 		(transfer_id & TRANSACTION_ID_MASK);
2014 	data_attr &= DESC_DATA_FLAG_MASK;
2015 	/*
2016 	 * The common case involves sending multiple fragments within a
2017 	 * single download (the tx descriptor and the tx frame header).
2018 	 * So, optimize for the case of multiple fragments by not even
2019 	 * checking whether it's necessary to use a sendlist.
2020 	 * The overhead of using a sendlist for a single buffer download
2021 	 * is not a big deal, since it happens rarely (for WMI messages).
2022 	 */
2023 	ce_sendlist_init(&sendlist);
2024 	do {
2025 		qdf_dma_addr_t frag_paddr;
2026 		int frag_bytes;
2027 
2028 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2029 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2030 		/*
2031 		 * Clear the packet offset for all but the first CE desc.
2032 		 */
2033 		if (i++ > 0)
2034 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2035 
2036 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2037 				    frag_bytes >
2038 				    bytes ? bytes : frag_bytes,
2039 				    qdf_nbuf_get_frag_is_wordstream
2040 				    (nbuf,
2041 				    nfrags) ? 0 :
2042 				    CE_SEND_FLAG_SWAP_DISABLE,
2043 				    data_attr);
2044 		if (status != QDF_STATUS_SUCCESS) {
2045 			HIF_ERROR("%s: error, frag_num %d larger than limit",
2046 				__func__, nfrags);
2047 			return status;
2048 		}
2049 		bytes -= frag_bytes;
2050 		nfrags++;
2051 	} while (bytes > 0);
2052 
2053 	/* Make sure we have resources to handle this request */
2054 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2055 	if (pipe_info->num_sends_allowed < nfrags) {
2056 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2057 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2058 		return QDF_STATUS_E_RESOURCES;
2059 	}
2060 	pipe_info->num_sends_allowed -= nfrags;
2061 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2062 
2063 	if (qdf_unlikely(!ce_hdl)) {
2064 		HIF_ERROR("%s: error CE handle is null", __func__);
2065 		return A_ERROR;
2066 	}
2067 
2068 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2069 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2070 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2071 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2072 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2073 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2074 
2075 	return status;
2076 }
2077 
2078 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2079 								int force)
2080 {
2081 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2082 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2083 
2084 	if (!force) {
2085 		int resources;
2086 		/*
2087 		 * Decide whether to actually poll for completions, or just
2088 		 * wait for a later chance. If there seem to be plenty of
2089 		 * resources left, then just wait, since checking involves
2090 		 * reading a CE register, which is a relatively expensive
2091 		 * operation.
2092 		 */
2093 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2094 		/*
2095 		 * If at least 50% of the total resources are still available,
2096 		 * don't bother checking again yet.
2097 		 */
2098 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2099 									 1))
2100 			return;
2101 	}
2102 #if ATH_11AC_TXCOMPACT
2103 	ce_per_engine_servicereap(scn, pipe);
2104 #else
2105 	ce_per_engine_service(scn, pipe);
2106 #endif
2107 }
2108 
2109 uint16_t
2110 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2111 {
2112 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2113 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2114 	uint16_t rv;
2115 
2116 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2117 	rv = pipe_info->num_sends_allowed;
2118 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2119 	return rv;
2120 }
2121 
2122 /* Called by lower (CE) layer when a send to Target completes. */
2123 static void
2124 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2125 		     void *transfer_context, qdf_dma_addr_t CE_data,
2126 		     unsigned int nbytes, unsigned int transfer_id,
2127 		     unsigned int sw_index, unsigned int hw_index,
2128 		     unsigned int toeplitz_hash_result)
2129 {
2130 	struct HIF_CE_pipe_info *pipe_info =
2131 		(struct HIF_CE_pipe_info *)ce_context;
2132 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2133 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2134 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2135 	struct hif_msg_callbacks *msg_callbacks =
2136 		&pipe_info->pipe_callbacks;
2137 
2138 	do {
2139 		/*
2140 		 * The upper layer callback will be triggered
2141 		 * when last fragment is complteted.
2142 		 */
2143 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
2144 			if (scn->target_status == TARGET_STATUS_RESET) {
2145 
2146 				qdf_nbuf_unmap_single(scn->qdf_dev,
2147 						      transfer_context,
2148 						      QDF_DMA_TO_DEVICE);
2149 				qdf_nbuf_free(transfer_context);
2150 			} else
2151 				msg_callbacks->txCompletionHandler(
2152 					msg_callbacks->Context,
2153 					transfer_context, transfer_id,
2154 					toeplitz_hash_result);
2155 		}
2156 
2157 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2158 		pipe_info->num_sends_allowed++;
2159 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2160 	} while (ce_completed_send_next(copyeng,
2161 			&ce_context, &transfer_context,
2162 			&CE_data, &nbytes, &transfer_id,
2163 			&sw_idx, &hw_idx,
2164 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2165 }
2166 
2167 /**
2168  * hif_ce_do_recv(): send message from copy engine to upper layers
2169  * @msg_callbacks: structure containing callback and callback context
2170  * @netbuff: skb containing message
2171  * @nbytes: number of bytes in the message
2172  * @pipe_info: used for the pipe_number info
2173  *
2174  * Checks the packet length, configures the length in the netbuff,
2175  * and calls the upper layer callback.
2176  *
2177  * return: None
2178  */
2179 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2180 		qdf_nbuf_t netbuf, int nbytes,
2181 		struct HIF_CE_pipe_info *pipe_info) {
2182 	if (nbytes <= pipe_info->buf_sz) {
2183 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2184 		msg_callbacks->
2185 			rxCompletionHandler(msg_callbacks->Context,
2186 					netbuf, pipe_info->pipe_num);
2187 	} else {
2188 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
2189 				__func__, netbuf, nbytes);
2190 
2191 		qdf_nbuf_free(netbuf);
2192 	}
2193 }
2194 
2195 /* Called by lower (CE) layer when data is received from the Target. */
2196 static void
2197 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2198 		     void *transfer_context, qdf_dma_addr_t CE_data,
2199 		     unsigned int nbytes, unsigned int transfer_id,
2200 		     unsigned int flags)
2201 {
2202 	struct HIF_CE_pipe_info *pipe_info =
2203 		(struct HIF_CE_pipe_info *)ce_context;
2204 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2205 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2206 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2207 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2208 	struct hif_msg_callbacks *msg_callbacks =
2209 		 &pipe_info->pipe_callbacks;
2210 
2211 	do {
2212 		hif_pm_runtime_mark_last_busy(hif_ctx);
2213 		qdf_nbuf_unmap_single(scn->qdf_dev,
2214 				      (qdf_nbuf_t) transfer_context,
2215 				      QDF_DMA_FROM_DEVICE);
2216 
2217 		atomic_inc(&pipe_info->recv_bufs_needed);
2218 		hif_post_recv_buffers_for_pipe(pipe_info);
2219 		if (scn->target_status == TARGET_STATUS_RESET)
2220 			qdf_nbuf_free(transfer_context);
2221 		else
2222 			hif_ce_do_recv(msg_callbacks, transfer_context,
2223 				nbytes, pipe_info);
2224 
2225 		/* Set up force_break flag if num of receices reaches
2226 		 * MAX_NUM_OF_RECEIVES
2227 		 */
2228 		ce_state->receive_count++;
2229 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2230 			ce_state->force_break = 1;
2231 			break;
2232 		}
2233 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2234 					&CE_data, &nbytes, &transfer_id,
2235 					&flags) == QDF_STATUS_SUCCESS);
2236 
2237 }
2238 
2239 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2240 
2241 void
2242 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2243 	      struct hif_msg_callbacks *callbacks)
2244 {
2245 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2246 
2247 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2248 	spin_lock_init(&pcie_access_log_lock);
2249 #endif
2250 	/* Save callbacks for later installation */
2251 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2252 		 sizeof(hif_state->msg_callbacks_pending));
2253 
2254 }
2255 
2256 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2257 {
2258 	struct CE_handle *ce_diag = hif_state->ce_diag;
2259 	int pipe_num;
2260 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2261 	struct hif_msg_callbacks *hif_msg_callbacks =
2262 		&hif_state->msg_callbacks_current;
2263 
2264 	/* daemonize("hif_compl_thread"); */
2265 
2266 	if (scn->ce_count == 0) {
2267 		HIF_ERROR("%s: Invalid ce_count", __func__);
2268 		return -EINVAL;
2269 	}
2270 
2271 	if (!hif_msg_callbacks ||
2272 			!hif_msg_callbacks->rxCompletionHandler ||
2273 			!hif_msg_callbacks->txCompletionHandler) {
2274 		HIF_ERROR("%s: no completion handler registered", __func__);
2275 		return -EFAULT;
2276 	}
2277 
2278 	A_TARGET_ACCESS_LIKELY(scn);
2279 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2280 		struct CE_attr attr;
2281 		struct HIF_CE_pipe_info *pipe_info;
2282 
2283 		pipe_info = &hif_state->pipe_info[pipe_num];
2284 		if (pipe_info->ce_hdl == ce_diag)
2285 			continue;       /* Handle Diagnostic CE specially */
2286 		attr = hif_state->host_ce_config[pipe_num];
2287 		if (attr.src_nentries) {
2288 			/* pipe used to send to target */
2289 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2290 					 __func__, pipe_num, pipe_info);
2291 			ce_send_cb_register(pipe_info->ce_hdl,
2292 					    hif_pci_ce_send_done, pipe_info,
2293 					    attr.flags & CE_ATTR_DISABLE_INTR);
2294 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2295 		}
2296 		if (attr.dest_nentries) {
2297 			/* pipe used to receive from target */
2298 			ce_recv_cb_register(pipe_info->ce_hdl,
2299 					    hif_pci_ce_recv_data, pipe_info,
2300 					    attr.flags & CE_ATTR_DISABLE_INTR);
2301 		}
2302 
2303 		if (attr.src_nentries)
2304 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2305 
2306 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2307 					sizeof(pipe_info->pipe_callbacks));
2308 	}
2309 
2310 	A_TARGET_ACCESS_UNLIKELY(scn);
2311 	return 0;
2312 }
2313 
2314 /*
2315  * Install pending msg callbacks.
2316  *
2317  * TBDXXX: This hack is needed because upper layers install msg callbacks
2318  * for use with HTC before BMI is done; yet this HIF implementation
2319  * needs to continue to use BMI msg callbacks. Really, upper layers
2320  * should not register HTC callbacks until AFTER BMI phase.
2321  */
2322 static void hif_msg_callbacks_install(struct hif_softc *scn)
2323 {
2324 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2325 
2326 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2327 		 &hif_state->msg_callbacks_pending,
2328 		 sizeof(hif_state->msg_callbacks_pending));
2329 }
2330 
2331 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2332 							uint8_t *DLPipe)
2333 {
2334 	int ul_is_polled, dl_is_polled;
2335 
2336 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2337 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2338 }
2339 
2340 /**
2341  * hif_dump_pipe_debug_count() - Log error count
2342  * @scn: hif_softc pointer.
2343  *
2344  * Output the pipe error counts of each pipe to log file
2345  *
2346  * Return: N/A
2347  */
2348 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2349 {
2350 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2351 	int pipe_num;
2352 
2353 	if (!hif_state) {
2354 		HIF_ERROR("%s hif_state is NULL", __func__);
2355 		return;
2356 	}
2357 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2358 		struct HIF_CE_pipe_info *pipe_info;
2359 
2360 	pipe_info = &hif_state->pipe_info[pipe_num];
2361 
2362 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2363 			pipe_info->nbuf_dma_err_count > 0 ||
2364 			pipe_info->nbuf_ce_enqueue_err_count)
2365 		HIF_ERROR(
2366 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2367 			__func__, pipe_info->pipe_num,
2368 			atomic_read(&pipe_info->recv_bufs_needed),
2369 			pipe_info->nbuf_alloc_err_count,
2370 			pipe_info->nbuf_dma_err_count,
2371 			pipe_info->nbuf_ce_enqueue_err_count);
2372 	}
2373 }
2374 
2375 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2376 					  void *nbuf, uint32_t *error_cnt,
2377 					  enum hif_ce_event_type failure_type,
2378 					  const char *failure_type_string)
2379 {
2380 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2381 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2382 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2383 	int ce_id = CE_state->id;
2384 	uint32_t error_cnt_tmp;
2385 
2386 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2387 	error_cnt_tmp = ++(*error_cnt);
2388 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2389 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2390 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2391 		  failure_type_string);
2392 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2393 				 NULL, nbuf, bufs_needed_tmp, 0);
2394 	/* if we fail to allocate the last buffer for an rx pipe,
2395 	 *	there is no trigger to refill the ce and we will
2396 	 *	eventually crash
2397 	 */
2398 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2399 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2400 
2401 }
2402 
2403 
2404 
2405 
2406 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2407 {
2408 	struct CE_handle *ce_hdl;
2409 	qdf_size_t buf_sz;
2410 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2411 	QDF_STATUS status;
2412 	uint32_t bufs_posted = 0;
2413 	unsigned int ce_id;
2414 
2415 	buf_sz = pipe_info->buf_sz;
2416 	if (buf_sz == 0) {
2417 		/* Unused Copy Engine */
2418 		return QDF_STATUS_SUCCESS;
2419 	}
2420 
2421 	ce_hdl = pipe_info->ce_hdl;
2422 	ce_id = ((struct CE_state *)ce_hdl)->id;
2423 
2424 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2425 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2426 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2427 		qdf_nbuf_t nbuf;
2428 
2429 		atomic_dec(&pipe_info->recv_bufs_needed);
2430 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2431 
2432 		hif_record_ce_desc_event(scn, ce_id,
2433 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
2434 					 0, 0);
2435 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2436 		if (!nbuf) {
2437 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2438 					&pipe_info->nbuf_alloc_err_count,
2439 					 HIF_RX_NBUF_ALLOC_FAILURE,
2440 					"HIF_RX_NBUF_ALLOC_FAILURE");
2441 			return QDF_STATUS_E_NOMEM;
2442 		}
2443 
2444 		hif_record_ce_desc_event(scn, ce_id,
2445 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
2446 					 0, 0);
2447 		/*
2448 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2449 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2450 		 * DMA_FROM_DEVICE);
2451 		 */
2452 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2453 					    QDF_DMA_FROM_DEVICE);
2454 
2455 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2456 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2457 					&pipe_info->nbuf_dma_err_count,
2458 					 HIF_RX_NBUF_MAP_FAILURE,
2459 					"HIF_RX_NBUF_MAP_FAILURE");
2460 			qdf_nbuf_free(nbuf);
2461 			return status;
2462 		}
2463 
2464 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2465 		hif_record_ce_desc_event(scn, ce_id,
2466 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
2467 					 0, 0);
2468 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2469 					       buf_sz, DMA_FROM_DEVICE);
2470 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2471 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2472 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2473 					&pipe_info->nbuf_ce_enqueue_err_count,
2474 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2475 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2476 
2477 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2478 						QDF_DMA_FROM_DEVICE);
2479 			qdf_nbuf_free(nbuf);
2480 			return status;
2481 		}
2482 
2483 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2484 		bufs_posted++;
2485 	}
2486 	pipe_info->nbuf_alloc_err_count =
2487 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2488 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2489 	pipe_info->nbuf_dma_err_count =
2490 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2491 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2492 	pipe_info->nbuf_ce_enqueue_err_count =
2493 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2494 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2495 
2496 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2497 
2498 	return QDF_STATUS_SUCCESS;
2499 }
2500 
2501 /*
2502  * Try to post all desired receive buffers for all pipes.
2503  * Returns 0 for non fastpath rx copy engine as
2504  * oom_allocation_work will be scheduled to recover any
2505  * failures, non-zero if unable to completely replenish
2506  * receive buffers for fastpath rx Copy engine.
2507  */
2508 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2509 {
2510 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2511 	int pipe_num;
2512 	struct CE_state *ce_state = NULL;
2513 	QDF_STATUS qdf_status;
2514 
2515 	A_TARGET_ACCESS_LIKELY(scn);
2516 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2517 		struct HIF_CE_pipe_info *pipe_info;
2518 
2519 		ce_state = scn->ce_id_to_state[pipe_num];
2520 		pipe_info = &hif_state->pipe_info[pipe_num];
2521 
2522 		if (hif_is_nss_wifi_enabled(scn) &&
2523 		    ce_state && (ce_state->htt_rx_data))
2524 			continue;
2525 
2526 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2527 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2528 			ce_state->htt_rx_data &&
2529 			scn->fastpath_mode_on) {
2530 			A_TARGET_ACCESS_UNLIKELY(scn);
2531 			return qdf_status;
2532 		}
2533 	}
2534 
2535 	A_TARGET_ACCESS_UNLIKELY(scn);
2536 
2537 	return QDF_STATUS_SUCCESS;
2538 }
2539 
2540 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2541 {
2542 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2543 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2544 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2545 
2546 	hif_update_fastpath_recv_bufs_cnt(scn);
2547 
2548 	hif_msg_callbacks_install(scn);
2549 
2550 	if (hif_completion_thread_startup(hif_state))
2551 		return QDF_STATUS_E_FAILURE;
2552 
2553 	/* enable buffer cleanup */
2554 	hif_state->started = true;
2555 
2556 	/* Post buffers once to start things off. */
2557 	qdf_status = hif_post_recv_buffers(scn);
2558 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2559 		/* cleanup is done in hif_ce_disable */
2560 		HIF_ERROR("%s:failed to post buffers", __func__);
2561 		return qdf_status;
2562 	}
2563 
2564 	return qdf_status;
2565 }
2566 
2567 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2568 {
2569 	struct hif_softc *scn;
2570 	struct CE_handle *ce_hdl;
2571 	uint32_t buf_sz;
2572 	struct HIF_CE_state *hif_state;
2573 	qdf_nbuf_t netbuf;
2574 	qdf_dma_addr_t CE_data;
2575 	void *per_CE_context;
2576 
2577 	buf_sz = pipe_info->buf_sz;
2578 	/* Unused Copy Engine */
2579 	if (buf_sz == 0)
2580 		return;
2581 
2582 
2583 	hif_state = pipe_info->HIF_CE_state;
2584 	if (!hif_state->started)
2585 		return;
2586 
2587 	scn = HIF_GET_SOFTC(hif_state);
2588 	ce_hdl = pipe_info->ce_hdl;
2589 
2590 	if (!scn->qdf_dev)
2591 		return;
2592 	while (ce_revoke_recv_next
2593 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2594 			&CE_data) == QDF_STATUS_SUCCESS) {
2595 		if (netbuf) {
2596 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2597 					      QDF_DMA_FROM_DEVICE);
2598 			qdf_nbuf_free(netbuf);
2599 		}
2600 	}
2601 }
2602 
2603 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2604 {
2605 	struct CE_handle *ce_hdl;
2606 	struct HIF_CE_state *hif_state;
2607 	struct hif_softc *scn;
2608 	qdf_nbuf_t netbuf;
2609 	void *per_CE_context;
2610 	qdf_dma_addr_t CE_data;
2611 	unsigned int nbytes;
2612 	unsigned int id;
2613 	uint32_t buf_sz;
2614 	uint32_t toeplitz_hash_result;
2615 
2616 	buf_sz = pipe_info->buf_sz;
2617 	if (buf_sz == 0) {
2618 		/* Unused Copy Engine */
2619 		return;
2620 	}
2621 
2622 	hif_state = pipe_info->HIF_CE_state;
2623 	if (!hif_state->started) {
2624 		return;
2625 	}
2626 
2627 	scn = HIF_GET_SOFTC(hif_state);
2628 
2629 	ce_hdl = pipe_info->ce_hdl;
2630 
2631 	while (ce_cancel_send_next
2632 		       (ce_hdl, &per_CE_context,
2633 		       (void **)&netbuf, &CE_data, &nbytes,
2634 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2635 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2636 			/*
2637 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2638 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2639 			 * freed in htt_htc_misc_pkt_pool_free() in
2640 			 * wlantl_close(), so do not free them here again
2641 			 * by checking whether it's the endpoint
2642 			 * which they are queued in.
2643 			 */
2644 			if (id == scn->htc_htt_tx_endpoint)
2645 				return;
2646 			/* Indicate the completion to higher
2647 			 * layer to free the buffer
2648 			 */
2649 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2650 				pipe_info->pipe_callbacks.
2651 				    txCompletionHandler(pipe_info->
2652 					    pipe_callbacks.Context,
2653 					    netbuf, id, toeplitz_hash_result);
2654 		}
2655 	}
2656 }
2657 
2658 /*
2659  * Cleanup residual buffers for device shutdown:
2660  *    buffers that were enqueued for receive
2661  *    buffers that were to be sent
2662  * Note: Buffers that had completed but which were
2663  * not yet processed are on a completion queue. They
2664  * are handled when the completion thread shuts down.
2665  */
2666 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2667 {
2668 	int pipe_num;
2669 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2670 	struct CE_state *ce_state;
2671 
2672 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2673 		struct HIF_CE_pipe_info *pipe_info;
2674 
2675 		ce_state = scn->ce_id_to_state[pipe_num];
2676 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2677 				((ce_state->htt_tx_data) ||
2678 				 (ce_state->htt_rx_data))) {
2679 			continue;
2680 		}
2681 
2682 		pipe_info = &hif_state->pipe_info[pipe_num];
2683 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2684 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2685 	}
2686 }
2687 
2688 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2689 {
2690 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2691 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2692 
2693 	hif_buffer_cleanup(hif_state);
2694 }
2695 
2696 static void hif_destroy_oom_work(struct hif_softc *scn)
2697 {
2698 	struct CE_state *ce_state;
2699 	int ce_id;
2700 
2701 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2702 		ce_state = scn->ce_id_to_state[ce_id];
2703 		if (ce_state)
2704 			qdf_destroy_work(scn->qdf_dev,
2705 					 &ce_state->oom_allocation_work);
2706 	}
2707 }
2708 
2709 void hif_ce_stop(struct hif_softc *scn)
2710 {
2711 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2712 	int pipe_num;
2713 
2714 	/*
2715 	 * before cleaning up any memory, ensure irq &
2716 	 * bottom half contexts will not be re-entered
2717 	 */
2718 	hif_disable_isr(&scn->osc);
2719 	hif_destroy_oom_work(scn);
2720 	scn->hif_init_done = false;
2721 
2722 	/*
2723 	 * At this point, asynchronous threads are stopped,
2724 	 * The Target should not DMA nor interrupt, Host code may
2725 	 * not initiate anything more.  So we just need to clean
2726 	 * up Host-side state.
2727 	 */
2728 
2729 	if (scn->athdiag_procfs_inited) {
2730 		athdiag_procfs_remove();
2731 		scn->athdiag_procfs_inited = false;
2732 	}
2733 
2734 	hif_buffer_cleanup(hif_state);
2735 
2736 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2737 		struct HIF_CE_pipe_info *pipe_info;
2738 		struct CE_attr attr;
2739 		struct CE_handle *ce_diag = hif_state->ce_diag;
2740 
2741 		pipe_info = &hif_state->pipe_info[pipe_num];
2742 		if (pipe_info->ce_hdl) {
2743 			if (pipe_info->ce_hdl != ce_diag) {
2744 				attr = hif_state->host_ce_config[pipe_num];
2745 				if (attr.src_nentries)
2746 					qdf_spinlock_destroy(&pipe_info->
2747 							completion_freeq_lock);
2748 			}
2749 			ce_fini(pipe_info->ce_hdl);
2750 			pipe_info->ce_hdl = NULL;
2751 			pipe_info->buf_sz = 0;
2752 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2753 		}
2754 	}
2755 
2756 	if (hif_state->sleep_timer_init) {
2757 		qdf_timer_stop(&hif_state->sleep_timer);
2758 		qdf_timer_free(&hif_state->sleep_timer);
2759 		hif_state->sleep_timer_init = false;
2760 	}
2761 
2762 	hif_state->started = false;
2763 }
2764 
2765 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2766 				   struct shadow_reg_cfg
2767 				   **target_shadow_reg_cfg_ret,
2768 				   uint32_t *shadow_cfg_sz_ret)
2769 {
2770 	if (target_shadow_reg_cfg_ret)
2771 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2772 	if (shadow_cfg_sz_ret)
2773 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2774 }
2775 
2776 /**
2777  * hif_get_target_ce_config() - get copy engine configuration
2778  * @target_ce_config_ret: basic copy engine configuration
2779  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2780  * @target_service_to_ce_map_ret: service mapping for the copy engines
2781  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2782  * @target_shadow_reg_cfg_ret: shadow register configuration
2783  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2784  *
2785  * providing accessor to these values outside of this file.
2786  * currently these are stored in static pointers to const sections.
2787  * there are multiple configurations that are selected from at compile time.
2788  * Runtime selection would need to consider mode, target type and bus type.
2789  *
2790  * Return: return by parameter.
2791  */
2792 void hif_get_target_ce_config(struct hif_softc *scn,
2793 		struct CE_pipe_config **target_ce_config_ret,
2794 		uint32_t *target_ce_config_sz_ret,
2795 		struct service_to_pipe **target_service_to_ce_map_ret,
2796 		uint32_t *target_service_to_ce_map_sz_ret,
2797 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2798 		uint32_t *shadow_cfg_sz_ret)
2799 {
2800 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2801 
2802 	*target_ce_config_ret = hif_state->target_ce_config;
2803 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2804 
2805 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2806 				       target_service_to_ce_map_sz_ret);
2807 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2808 			       shadow_cfg_sz_ret);
2809 }
2810 
2811 #ifdef CONFIG_SHADOW_V2
2812 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2813 {
2814 	int i;
2815 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2816 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2817 
2818 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2819 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2820 		     "%s: i %d, val %x", __func__, i,
2821 		     cfg->shadow_reg_v2_cfg[i].addr);
2822 	}
2823 }
2824 
2825 #else
2826 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2827 {
2828 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2829 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2830 }
2831 #endif
2832 
2833 #ifdef ADRASTEA_RRI_ON_DDR
2834 /**
2835  * hif_get_src_ring_read_index(): Called to get the SRRI
2836  *
2837  * @scn: hif_softc pointer
2838  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2839  *
2840  * This function returns the SRRI to the caller. For CEs that
2841  * dont have interrupts enabled, we look at the DDR based SRRI
2842  *
2843  * Return: SRRI
2844  */
2845 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2846 		uint32_t CE_ctrl_addr)
2847 {
2848 	struct CE_attr attr;
2849 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2850 
2851 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2852 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2853 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2854 	} else {
2855 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2856 			return A_TARGET_READ(scn,
2857 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2858 		else
2859 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2860 					CE_ctrl_addr);
2861 	}
2862 }
2863 
2864 /**
2865  * hif_get_dst_ring_read_index(): Called to get the DRRI
2866  *
2867  * @scn: hif_softc pointer
2868  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2869  *
2870  * This function returns the DRRI to the caller. For CEs that
2871  * dont have interrupts enabled, we look at the DDR based DRRI
2872  *
2873  * Return: DRRI
2874  */
2875 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2876 		uint32_t CE_ctrl_addr)
2877 {
2878 	struct CE_attr attr;
2879 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2880 
2881 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2882 
2883 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2884 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2885 	} else {
2886 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2887 			return A_TARGET_READ(scn,
2888 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2889 		else
2890 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2891 					CE_ctrl_addr);
2892 	}
2893 }
2894 
2895 /**
2896  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2897  * @scn: hif_softc pointer
2898  *
2899  * Return: qdf status
2900  */
2901 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2902 {
2903 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
2904 
2905 	scn->vaddr_rri_on_ddr =
2906 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2907 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2908 		&paddr_rri_on_ddr);
2909 
2910 	if (!scn->vaddr_rri_on_ddr) {
2911 		hif_err("dmaable page alloc fail");
2912 		return QDF_STATUS_E_NOMEM;
2913 	}
2914 
2915 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2916 
2917 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2918 
2919 	return QDF_STATUS_SUCCESS;
2920 }
2921 #endif
2922 
2923 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2924 /**
2925  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2926  *
2927  * @scn: hif_softc pointer
2928  *
2929  * This function allocates non cached memory on ddr and sends
2930  * the physical address of this memory to the CE hardware. The
2931  * hardware updates the RRI on this particular location.
2932  *
2933  * Return: None
2934  */
2935 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2936 {
2937 	unsigned int i;
2938 	uint32_t high_paddr, low_paddr;
2939 
2940 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2941 		return;
2942 
2943 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
2944 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2945 
2946 	HIF_DBG("%s using srri and drri from DDR", __func__);
2947 
2948 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2949 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2950 
2951 	for (i = 0; i < CE_COUNT; i++)
2952 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2953 }
2954 #else
2955 /**
2956  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2957  *
2958  * @scn: hif_softc pointer
2959  *
2960  * This is a dummy implementation for platforms that don't
2961  * support this functionality.
2962  *
2963  * Return: None
2964  */
2965 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2966 {
2967 }
2968 #endif
2969 
2970 /**
2971  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2972  *                                    QMI command
2973  * @scn: hif context
2974  * @cfg: wlan enable config
2975  *
2976  * In case of Genoa, rri_over_ddr memory configuration is passed
2977  * to firmware through QMI configure command.
2978  */
2979 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
2980 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2981 					   struct pld_wlan_enable_cfg *cfg)
2982 {
2983 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2984 		return;
2985 
2986 	cfg->rri_over_ddr_cfg_valid = true;
2987 	cfg->rri_over_ddr_cfg.base_addr_low =
2988 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
2989 	cfg->rri_over_ddr_cfg.base_addr_high =
2990 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
2991 }
2992 #else
2993 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2994 					   struct pld_wlan_enable_cfg *cfg)
2995 {
2996 }
2997 #endif
2998 
2999 /**
3000  * hif_wlan_enable(): call the platform driver to enable wlan
3001  * @scn: HIF Context
3002  *
3003  * This function passes the con_mode and CE configuration to
3004  * platform driver to enable wlan.
3005  *
3006  * Return: linux error code
3007  */
3008 int hif_wlan_enable(struct hif_softc *scn)
3009 {
3010 	struct pld_wlan_enable_cfg cfg;
3011 	enum pld_driver_mode mode;
3012 	uint32_t con_mode = hif_get_conparam(scn);
3013 
3014 	hif_get_target_ce_config(scn,
3015 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3016 			&cfg.num_ce_tgt_cfg,
3017 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3018 			&cfg.num_ce_svc_pipe_cfg,
3019 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3020 			&cfg.num_shadow_reg_cfg);
3021 
3022 	/* translate from structure size to array size */
3023 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3024 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3025 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3026 
3027 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3028 			      &cfg.num_shadow_reg_v2_cfg);
3029 
3030 	hif_print_hal_shadow_register_cfg(&cfg);
3031 
3032 	hif_update_rri_over_ddr_config(scn, &cfg);
3033 
3034 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3035 		mode = PLD_FTM;
3036 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3037 		mode = PLD_COLDBOOT_CALIBRATION;
3038 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3039 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3040 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3041 		mode = PLD_EPPING;
3042 	else
3043 		mode = PLD_MISSION;
3044 
3045 	if (BYPASS_QMI)
3046 		return 0;
3047 	else
3048 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3049 }
3050 
3051 #ifdef WLAN_FEATURE_EPPING
3052 
3053 #define CE_EPPING_USES_IRQ true
3054 
3055 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3056 {
3057 	if (CE_EPPING_USES_IRQ)
3058 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3059 	else
3060 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3061 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3062 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3063 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3064 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3065 }
3066 #endif
3067 
3068 #ifdef QCN7605_SUPPORT
3069 static inline
3070 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3071 			       struct HIF_CE_state *hif_state)
3072 {
3073 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3074 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3075 	hif_state->target_ce_config_sz =
3076 				 sizeof(target_ce_config_wlan_qcn7605);
3077 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3078 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3079 	scn->ce_count = QCN7605_CE_COUNT;
3080 }
3081 #else
3082 static inline
3083 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3084 			       struct HIF_CE_state *hif_state)
3085 {
3086 	HIF_ERROR("QCN7605 not supported");
3087 }
3088 #endif
3089 
3090 #ifdef CE_SVC_CMN_INIT
3091 #ifdef QCA_WIFI_SUPPORT_SRNG
3092 static inline void hif_ce_service_init(void)
3093 {
3094 	ce_service_srng_init();
3095 }
3096 #else
3097 static inline void hif_ce_service_init(void)
3098 {
3099 	ce_service_legacy_init();
3100 }
3101 #endif
3102 #else
3103 static inline void hif_ce_service_init(void)
3104 {
3105 }
3106 #endif
3107 
3108 
3109 /**
3110  * hif_ce_prepare_config() - load the correct static tables.
3111  * @scn: hif context
3112  *
3113  * Epping uses different static attribute tables than mission mode.
3114  */
3115 void hif_ce_prepare_config(struct hif_softc *scn)
3116 {
3117 	uint32_t mode = hif_get_conparam(scn);
3118 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3119 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3120 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3121 
3122 	hif_ce_service_init();
3123 	hif_state->ce_services = ce_services_attach(scn);
3124 
3125 	scn->ce_count = HOST_CE_COUNT;
3126 	/* if epping is enabled we need to use the epping configuration. */
3127 	if (QDF_IS_EPPING_ENABLED(mode)) {
3128 		hif_ce_prepare_epping_config(hif_state);
3129 		return;
3130 	}
3131 
3132 	switch (tgt_info->target_type) {
3133 	default:
3134 		hif_state->host_ce_config = host_ce_config_wlan;
3135 		hif_state->target_ce_config = target_ce_config_wlan;
3136 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3137 		break;
3138 	case TARGET_TYPE_QCN7605:
3139 		hif_set_ce_config_qcn7605(scn, hif_state);
3140 		break;
3141 	case TARGET_TYPE_AR900B:
3142 	case TARGET_TYPE_QCA9984:
3143 	case TARGET_TYPE_IPQ4019:
3144 	case TARGET_TYPE_QCA9888:
3145 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3146 			hif_state->host_ce_config =
3147 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3148 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3149 			hif_state->host_ce_config =
3150 				host_lowdesc_ce_cfg_wlan_ar900b;
3151 		} else {
3152 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3153 		}
3154 
3155 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3156 		hif_state->target_ce_config_sz =
3157 				sizeof(target_ce_config_wlan_ar900b);
3158 
3159 		break;
3160 
3161 	case TARGET_TYPE_AR9888:
3162 	case TARGET_TYPE_AR9888V2:
3163 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3164 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3165 		} else {
3166 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3167 		}
3168 
3169 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3170 		hif_state->target_ce_config_sz =
3171 					sizeof(target_ce_config_wlan_ar9888);
3172 
3173 		break;
3174 
3175 	case TARGET_TYPE_QCA8074:
3176 	case TARGET_TYPE_QCA8074V2:
3177 	case TARGET_TYPE_QCA6018:
3178 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3179 			hif_state->host_ce_config =
3180 					host_ce_config_wlan_qca8074_pci;
3181 			hif_state->target_ce_config =
3182 				target_ce_config_wlan_qca8074_pci;
3183 			hif_state->target_ce_config_sz =
3184 				sizeof(target_ce_config_wlan_qca8074_pci);
3185 		} else {
3186 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3187 			hif_state->target_ce_config =
3188 					target_ce_config_wlan_qca8074;
3189 			hif_state->target_ce_config_sz =
3190 				sizeof(target_ce_config_wlan_qca8074);
3191 		}
3192 		break;
3193 	case TARGET_TYPE_QCA6290:
3194 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3195 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3196 		hif_state->target_ce_config_sz =
3197 					sizeof(target_ce_config_wlan_qca6290);
3198 
3199 		scn->ce_count = QCA_6290_CE_COUNT;
3200 		break;
3201 	case TARGET_TYPE_QCN9000:
3202 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3203 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3204 		hif_state->target_ce_config_sz =
3205 					sizeof(target_ce_config_wlan_qcn9000);
3206 		scn->ce_count = QCN_9000_CE_COUNT;
3207 		break;
3208 	case TARGET_TYPE_QCA6390:
3209 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3210 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3211 		hif_state->target_ce_config_sz =
3212 					sizeof(target_ce_config_wlan_qca6390);
3213 
3214 		scn->ce_count = QCA_6390_CE_COUNT;
3215 		break;
3216 	case TARGET_TYPE_QCA6490:
3217 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3218 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3219 		hif_state->target_ce_config_sz =
3220 					sizeof(target_ce_config_wlan_qca6490);
3221 
3222 		scn->ce_count = QCA_6490_CE_COUNT;
3223 		break;
3224 	case TARGET_TYPE_ADRASTEA:
3225 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3226 			hif_state->host_ce_config =
3227 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3228 			hif_state->target_ce_config =
3229 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3230 			hif_state->target_ce_config_sz =
3231 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3232 		} else {
3233 			hif_state->host_ce_config =
3234 				host_ce_config_wlan_adrastea;
3235 			hif_state->target_ce_config =
3236 					target_ce_config_wlan_adrastea;
3237 			hif_state->target_ce_config_sz =
3238 					sizeof(target_ce_config_wlan_adrastea);
3239 		}
3240 		break;
3241 
3242 	}
3243 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3244 }
3245 
3246 /**
3247  * hif_ce_open() - do ce specific allocations
3248  * @hif_sc: pointer to hif context
3249  *
3250  * return: 0 for success or QDF_STATUS_E_NOMEM
3251  */
3252 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3253 {
3254 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3255 
3256 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3257 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3258 	return QDF_STATUS_SUCCESS;
3259 }
3260 
3261 /**
3262  * hif_ce_close() - do ce specific free
3263  * @hif_sc: pointer to hif context
3264  */
3265 void hif_ce_close(struct hif_softc *hif_sc)
3266 {
3267 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3268 
3269 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3270 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3271 }
3272 
3273 /**
3274  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3275  * @hif_sc: hif context
3276  *
3277  * uses state variables to support cleaning up when hif_config_ce fails.
3278  */
3279 void hif_unconfig_ce(struct hif_softc *hif_sc)
3280 {
3281 	int pipe_num;
3282 	struct HIF_CE_pipe_info *pipe_info;
3283 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3284 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3285 
3286 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3287 		pipe_info = &hif_state->pipe_info[pipe_num];
3288 		if (pipe_info->ce_hdl) {
3289 			ce_unregister_irq(hif_state, (1 << pipe_num));
3290 		}
3291 	}
3292 	deinit_tasklet_workers(hif_hdl);
3293 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3294 		pipe_info = &hif_state->pipe_info[pipe_num];
3295 		if (pipe_info->ce_hdl) {
3296 			ce_fini(pipe_info->ce_hdl);
3297 			pipe_info->ce_hdl = NULL;
3298 			pipe_info->buf_sz = 0;
3299 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3300 		}
3301 	}
3302 	if (hif_sc->athdiag_procfs_inited) {
3303 		athdiag_procfs_remove();
3304 		hif_sc->athdiag_procfs_inited = false;
3305 	}
3306 }
3307 
3308 #ifdef CONFIG_BYPASS_QMI
3309 #ifdef QCN7605_SUPPORT
3310 /**
3311  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3312  * @scn: pointer to HIF structure
3313  *
3314  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3315  *
3316  * Return: void
3317  */
3318 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3319 {
3320 	void *target_va;
3321 	phys_addr_t target_pa;
3322 	struct ce_info *ce_info_ptr;
3323 	uint32_t msi_data_start;
3324 	uint32_t msi_data_count;
3325 	uint32_t msi_irq_start;
3326 	uint32_t i = 0;
3327 	int ret;
3328 
3329 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3330 					     scn->qdf_dev->dev,
3331 					     FW_SHARED_MEM +
3332 					     sizeof(struct ce_info),
3333 					     &target_pa);
3334 	if (!target_va)
3335 		return;
3336 
3337 	ce_info_ptr = (struct ce_info *)target_va;
3338 
3339 	if (scn->vaddr_rri_on_ddr) {
3340 		ce_info_ptr->rri_over_ddr_low_paddr  =
3341 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3342 		ce_info_ptr->rri_over_ddr_high_paddr =
3343 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3344 	}
3345 
3346 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3347 					  &msi_data_count, &msi_data_start,
3348 					  &msi_irq_start);
3349 	if (ret) {
3350 		hif_err("Failed to get CE msi config");
3351 		return;
3352 	}
3353 
3354 	for (i = 0; i < CE_COUNT_MAX; i++) {
3355 		ce_info_ptr->cfg[i].ce_id = i;
3356 		ce_info_ptr->cfg[i].msi_vector =
3357 			 (i % msi_data_count) + msi_irq_start;
3358 	}
3359 
3360 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3361 	hif_info("target va %pK target pa %pa", target_va, &target_pa);
3362 }
3363 #else
3364 /**
3365  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3366  * @scn: pointer to HIF structure
3367  *
3368  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3369  *
3370  * Return: void
3371  */
3372 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3373 {
3374 	void *target_va;
3375 	phys_addr_t target_pa;
3376 
3377 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3378 				FW_SHARED_MEM, &target_pa);
3379 	if (!target_va) {
3380 		HIF_TRACE("Memory allocation failed could not post target buf");
3381 		return;
3382 	}
3383 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3384 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
3385 }
3386 #endif
3387 
3388 #else
3389 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3390 {
3391 }
3392 #endif
3393 
3394 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3395 				bool wait_for_it)
3396 {
3397 	/* todo */
3398 	return 0;
3399 }
3400 
3401 /**
3402  * hif_config_ce() - configure copy engines
3403  * @scn: hif context
3404  *
3405  * Prepares fw, copy engine hardware and host sw according
3406  * to the attributes selected by hif_ce_prepare_config.
3407  *
3408  * also calls athdiag_procfs_init
3409  *
3410  * return: 0 for success nonzero for failure.
3411  */
3412 int hif_config_ce(struct hif_softc *scn)
3413 {
3414 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3415 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3416 	struct HIF_CE_pipe_info *pipe_info;
3417 	int pipe_num;
3418 	struct CE_state *ce_state = NULL;
3419 
3420 #ifdef ADRASTEA_SHADOW_REGISTERS
3421 	int i;
3422 #endif
3423 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3424 
3425 	scn->notice_send = true;
3426 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3427 
3428 	hif_post_static_buf_to_target(scn);
3429 
3430 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3431 
3432 	hif_config_rri_on_ddr(scn);
3433 
3434 	if (ce_srng_based(scn))
3435 		scn->bus_ops.hif_target_sleep_state_adjust =
3436 			&hif_srng_sleep_state_adjust;
3437 
3438 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3439 	 * index. Disable data storing
3440 	 */
3441 	reset_ce_debug_history(scn);
3442 
3443 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3444 		struct CE_attr *attr;
3445 
3446 		pipe_info = &hif_state->pipe_info[pipe_num];
3447 		pipe_info->pipe_num = pipe_num;
3448 		pipe_info->HIF_CE_state = hif_state;
3449 		attr = &hif_state->host_ce_config[pipe_num];
3450 
3451 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3452 		ce_state = scn->ce_id_to_state[pipe_num];
3453 		if (!ce_state) {
3454 			A_TARGET_ACCESS_UNLIKELY(scn);
3455 			goto err;
3456 		}
3457 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3458 		QDF_ASSERT(pipe_info->ce_hdl);
3459 		if (!pipe_info->ce_hdl) {
3460 			rv = QDF_STATUS_E_FAILURE;
3461 			A_TARGET_ACCESS_UNLIKELY(scn);
3462 			goto err;
3463 		}
3464 
3465 		ce_state->lro_data = qdf_lro_init();
3466 
3467 		if (attr->flags & CE_ATTR_DIAG) {
3468 			/* Reserve the ultimate CE for
3469 			 * Diagnostic Window support
3470 			 */
3471 			hif_state->ce_diag = pipe_info->ce_hdl;
3472 			continue;
3473 		}
3474 
3475 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3476 				(ce_state->htt_rx_data))
3477 			continue;
3478 
3479 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3480 		if (attr->dest_nentries > 0) {
3481 			atomic_set(&pipe_info->recv_bufs_needed,
3482 				   init_buffer_count(attr->dest_nentries - 1));
3483 			/*SRNG based CE has one entry less */
3484 			if (ce_srng_based(scn))
3485 				atomic_dec(&pipe_info->recv_bufs_needed);
3486 		} else {
3487 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3488 		}
3489 		ce_tasklet_init(hif_state, (1 << pipe_num));
3490 		ce_register_irq(hif_state, (1 << pipe_num));
3491 	}
3492 
3493 	if (athdiag_procfs_init(scn) != 0) {
3494 		A_TARGET_ACCESS_UNLIKELY(scn);
3495 		goto err;
3496 	}
3497 	scn->athdiag_procfs_inited = true;
3498 
3499 	HIF_DBG("%s: ce_init done", __func__);
3500 
3501 	init_tasklet_workers(hif_hdl);
3502 
3503 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3504 
3505 #ifdef ADRASTEA_SHADOW_REGISTERS
3506 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3507 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3508 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3509 			  __func__, i,
3510 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3511 	}
3512 #endif
3513 
3514 	return rv != QDF_STATUS_SUCCESS;
3515 
3516 err:
3517 	/* Failure, so clean up */
3518 	hif_unconfig_ce(scn);
3519 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3520 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3521 }
3522 
3523 #ifdef IPA_OFFLOAD
3524 /**
3525  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3526  * @scn: bus context
3527  * @ce_sr_base_paddr: copyengine source ring base physical address
3528  * @ce_sr_ring_size: copyengine source ring size
3529  * @ce_reg_paddr: copyengine register physical address
3530  *
3531  * IPA micro controller data path offload feature enabled,
3532  * HIF should release copy engine related resource information to IPA UC
3533  * IPA UC will access hardware resource with released information
3534  *
3535  * Return: None
3536  */
3537 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3538 			     qdf_shared_mem_t **ce_sr,
3539 			     uint32_t *ce_sr_ring_size,
3540 			     qdf_dma_addr_t *ce_reg_paddr)
3541 {
3542 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3543 	struct HIF_CE_pipe_info *pipe_info =
3544 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3545 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3546 
3547 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3548 			    ce_reg_paddr);
3549 }
3550 #endif /* IPA_OFFLOAD */
3551 
3552 
3553 #ifdef ADRASTEA_SHADOW_REGISTERS
3554 
3555 /*
3556  * Current shadow register config
3557  *
3558  * -----------------------------------------------------------
3559  * Shadow Register      |     CE   |    src/dst write index
3560  * -----------------------------------------------------------
3561  *         0            |     0    |           src
3562  *         1     No Config - Doesn't point to anything
3563  *         2     No Config - Doesn't point to anything
3564  *         3            |     3    |           src
3565  *         4            |     4    |           src
3566  *         5            |     5    |           src
3567  *         6     No Config - Doesn't point to anything
3568  *         7            |     7    |           src
3569  *         8     No Config - Doesn't point to anything
3570  *         9     No Config - Doesn't point to anything
3571  *         10    No Config - Doesn't point to anything
3572  *         11    No Config - Doesn't point to anything
3573  * -----------------------------------------------------------
3574  *         12    No Config - Doesn't point to anything
3575  *         13           |     1    |           dst
3576  *         14           |     2    |           dst
3577  *         15    No Config - Doesn't point to anything
3578  *         16    No Config - Doesn't point to anything
3579  *         17    No Config - Doesn't point to anything
3580  *         18    No Config - Doesn't point to anything
3581  *         19           |     7    |           dst
3582  *         20           |     8    |           dst
3583  *         21    No Config - Doesn't point to anything
3584  *         22    No Config - Doesn't point to anything
3585  *         23    No Config - Doesn't point to anything
3586  * -----------------------------------------------------------
3587  *
3588  *
3589  * ToDo - Move shadow register config to following in the future
3590  * This helps free up a block of shadow registers towards the end.
3591  * Can be used for other purposes
3592  *
3593  * -----------------------------------------------------------
3594  * Shadow Register      |     CE   |    src/dst write index
3595  * -----------------------------------------------------------
3596  *      0            |     0    |           src
3597  *      1            |     3    |           src
3598  *      2            |     4    |           src
3599  *      3            |     5    |           src
3600  *      4            |     7    |           src
3601  * -----------------------------------------------------------
3602  *      5            |     1    |           dst
3603  *      6            |     2    |           dst
3604  *      7            |     7    |           dst
3605  *      8            |     8    |           dst
3606  * -----------------------------------------------------------
3607  *      9     No Config - Doesn't point to anything
3608  *      12    No Config - Doesn't point to anything
3609  *      13    No Config - Doesn't point to anything
3610  *      14    No Config - Doesn't point to anything
3611  *      15    No Config - Doesn't point to anything
3612  *      16    No Config - Doesn't point to anything
3613  *      17    No Config - Doesn't point to anything
3614  *      18    No Config - Doesn't point to anything
3615  *      19    No Config - Doesn't point to anything
3616  *      20    No Config - Doesn't point to anything
3617  *      21    No Config - Doesn't point to anything
3618  *      22    No Config - Doesn't point to anything
3619  *      23    No Config - Doesn't point to anything
3620  * -----------------------------------------------------------
3621 */
3622 #ifndef QCN7605_SUPPORT
3623 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3624 {
3625 	u32 addr = 0;
3626 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3627 
3628 	switch (ce) {
3629 	case 0:
3630 		addr = SHADOW_VALUE0;
3631 		break;
3632 	case 3:
3633 		addr = SHADOW_VALUE3;
3634 		break;
3635 	case 4:
3636 		addr = SHADOW_VALUE4;
3637 		break;
3638 	case 5:
3639 		addr = SHADOW_VALUE5;
3640 		break;
3641 	case 7:
3642 		addr = SHADOW_VALUE7;
3643 		break;
3644 	default:
3645 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3646 		QDF_ASSERT(0);
3647 	}
3648 	return addr;
3649 
3650 }
3651 
3652 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3653 {
3654 	u32 addr = 0;
3655 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3656 
3657 	switch (ce) {
3658 	case 1:
3659 		addr = SHADOW_VALUE13;
3660 		break;
3661 	case 2:
3662 		addr = SHADOW_VALUE14;
3663 		break;
3664 	case 5:
3665 		addr = SHADOW_VALUE17;
3666 		break;
3667 	case 7:
3668 		addr = SHADOW_VALUE19;
3669 		break;
3670 	case 8:
3671 		addr = SHADOW_VALUE20;
3672 		break;
3673 	case 9:
3674 		addr = SHADOW_VALUE21;
3675 		break;
3676 	case 10:
3677 		addr = SHADOW_VALUE22;
3678 		break;
3679 	case 11:
3680 		addr = SHADOW_VALUE23;
3681 		break;
3682 	default:
3683 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3684 		QDF_ASSERT(0);
3685 	}
3686 
3687 	return addr;
3688 
3689 }
3690 #else
3691 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3692 {
3693 	u32 addr = 0;
3694 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3695 
3696 	switch (ce) {
3697 	case 0:
3698 		addr = SHADOW_VALUE0;
3699 		break;
3700 	case 4:
3701 		addr = SHADOW_VALUE4;
3702 		break;
3703 	case 5:
3704 		addr = SHADOW_VALUE5;
3705 		break;
3706 	default:
3707 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3708 		QDF_ASSERT(0);
3709 	}
3710 	return addr;
3711 }
3712 
3713 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3714 {
3715 	u32 addr = 0;
3716 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3717 
3718 	switch (ce) {
3719 	case 1:
3720 		addr = SHADOW_VALUE13;
3721 		break;
3722 	case 2:
3723 		addr = SHADOW_VALUE14;
3724 		break;
3725 	case 3:
3726 		addr = SHADOW_VALUE15;
3727 		break;
3728 	case 5:
3729 		addr = SHADOW_VALUE17;
3730 		break;
3731 	case 7:
3732 		addr = SHADOW_VALUE19;
3733 		break;
3734 	case 8:
3735 		addr = SHADOW_VALUE20;
3736 		break;
3737 	case 9:
3738 		addr = SHADOW_VALUE21;
3739 		break;
3740 	case 10:
3741 		addr = SHADOW_VALUE22;
3742 		break;
3743 	case 11:
3744 		addr = SHADOW_VALUE23;
3745 		break;
3746 	default:
3747 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3748 		QDF_ASSERT(0);
3749 	}
3750 
3751 	return addr;
3752 }
3753 #endif
3754 #endif
3755 
3756 #if defined(FEATURE_LRO)
3757 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3758 {
3759 	struct CE_state *ce_state;
3760 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3761 
3762 	ce_state = scn->ce_id_to_state[ctx_id];
3763 
3764 	return ce_state->lro_data;
3765 }
3766 #endif
3767 
3768 /**
3769  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3770  * this service
3771  * @scn: hif_softc pointer.
3772  * @svc_id: Service ID for which the mapping is needed.
3773  * @ul_pipe: address of the container in which ul pipe is returned.
3774  * @dl_pipe: address of the container in which dl pipe is returned.
3775  * @ul_is_polled: address of the container in which a bool
3776  *			indicating if the UL CE for this service
3777  *			is polled is returned.
3778  * @dl_is_polled: address of the container in which a bool
3779  *			indicating if the DL CE for this service
3780  *			is polled is returned.
3781  *
3782  * Return: Indicates whether the service has been found in the table.
3783  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3784  *         There will be warning logs if either leg has not been updated
3785  *         because it missed the entry in the table (but this is not an err).
3786  */
3787 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3788 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3789 			int *dl_is_polled)
3790 {
3791 	int status = QDF_STATUS_E_INVAL;
3792 	unsigned int i;
3793 	struct service_to_pipe element;
3794 	struct service_to_pipe *tgt_svc_map_to_use;
3795 	uint32_t sz_tgt_svc_map_to_use;
3796 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3797 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3798 	bool dl_updated = false;
3799 	bool ul_updated = false;
3800 
3801 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3802 				       &sz_tgt_svc_map_to_use);
3803 
3804 	*dl_is_polled = 0;  /* polling for received messages not supported */
3805 
3806 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3807 
3808 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3809 		if (element.service_id == svc_id) {
3810 			if (element.pipedir == PIPEDIR_OUT) {
3811 				*ul_pipe = element.pipenum;
3812 				*ul_is_polled =
3813 					(hif_state->host_ce_config[*ul_pipe].flags &
3814 					 CE_ATTR_DISABLE_INTR) != 0;
3815 				ul_updated = true;
3816 			} else if (element.pipedir == PIPEDIR_IN) {
3817 				*dl_pipe = element.pipenum;
3818 				dl_updated = true;
3819 			}
3820 			status = QDF_STATUS_SUCCESS;
3821 		}
3822 	}
3823 	if (ul_updated == false)
3824 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3825 	if (dl_updated == false)
3826 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3827 
3828 	return status;
3829 }
3830 
3831 #ifdef SHADOW_REG_DEBUG
3832 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3833 		uint32_t CE_ctrl_addr)
3834 {
3835 	uint32_t read_from_hw, srri_from_ddr = 0;
3836 
3837 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3838 
3839 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3840 
3841 	if (read_from_hw != srri_from_ddr) {
3842 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3843 		       __func__, srri_from_ddr, read_from_hw,
3844 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3845 		QDF_ASSERT(0);
3846 	}
3847 	return srri_from_ddr;
3848 }
3849 
3850 
3851 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3852 		uint32_t CE_ctrl_addr)
3853 {
3854 	uint32_t read_from_hw, drri_from_ddr = 0;
3855 
3856 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3857 
3858 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3859 
3860 	if (read_from_hw != drri_from_ddr) {
3861 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3862 		       drri_from_ddr, read_from_hw,
3863 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3864 		QDF_ASSERT(0);
3865 	}
3866 	return drri_from_ddr;
3867 }
3868 
3869 #endif
3870 
3871 /**
3872  * hif_dump_ce_registers() - dump ce registers
3873  * @scn: hif_opaque_softc pointer.
3874  *
3875  * Output the copy engine registers
3876  *
3877  * Return: 0 for success or error code
3878  */
3879 int hif_dump_ce_registers(struct hif_softc *scn)
3880 {
3881 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3882 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3883 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3884 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3885 	uint16_t i;
3886 	QDF_STATUS status;
3887 
3888 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3889 		if (!scn->ce_id_to_state[i]) {
3890 			HIF_DBG("CE%d not used.", i);
3891 			continue;
3892 		}
3893 
3894 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3895 					   (uint8_t *) &ce_reg_values[0],
3896 					   ce_reg_word_size * sizeof(uint32_t));
3897 
3898 		if (status != QDF_STATUS_SUCCESS) {
3899 			HIF_ERROR("Dumping CE register failed!");
3900 			return -EACCES;
3901 		}
3902 		HIF_ERROR("CE%d=>\n", i);
3903 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3904 				   (uint8_t *) &ce_reg_values[0],
3905 				   ce_reg_word_size * sizeof(uint32_t));
3906 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3907 				+ SR_WR_INDEX_ADDRESS),
3908 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3909 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3910 				+ CURRENT_SRRI_ADDRESS),
3911 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3912 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3913 				+ DST_WR_INDEX_ADDRESS),
3914 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3915 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3916 				+ CURRENT_DRRI_ADDRESS),
3917 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3918 		qdf_print("---");
3919 	}
3920 	return 0;
3921 }
3922 qdf_export_symbol(hif_dump_ce_registers);
3923 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3924 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3925 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3926 {
3927 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3928 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3929 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3930 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3931 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3932 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3933 	struct CE_ring_state *src_ring = ce_state->src_ring;
3934 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3935 
3936 	if (src_ring) {
3937 		hif_info->ul_pipe.nentries = src_ring->nentries;
3938 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3939 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3940 		hif_info->ul_pipe.write_index = src_ring->write_index;
3941 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3942 		hif_info->ul_pipe.base_addr_CE_space =
3943 			src_ring->base_addr_CE_space;
3944 		hif_info->ul_pipe.base_addr_owner_space =
3945 			src_ring->base_addr_owner_space;
3946 	}
3947 
3948 
3949 	if (dest_ring) {
3950 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3951 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3952 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3953 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3954 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3955 		hif_info->dl_pipe.base_addr_CE_space =
3956 			dest_ring->base_addr_CE_space;
3957 		hif_info->dl_pipe.base_addr_owner_space =
3958 			dest_ring->base_addr_owner_space;
3959 	}
3960 
3961 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3962 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3963 
3964 	return hif_info;
3965 }
3966 qdf_export_symbol(hif_get_addl_pipe_info);
3967 
3968 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3969 {
3970 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3971 
3972 	scn->nss_wifi_ol_mode = mode;
3973 	return 0;
3974 }
3975 qdf_export_symbol(hif_set_nss_wifiol_mode);
3976 #endif
3977 
3978 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3979 {
3980 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3981 	scn->hif_attribute = hif_attrib;
3982 }
3983 
3984 
3985 /* disable interrupts (only applicable for legacy copy engine currently */
3986 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3987 {
3988 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3989 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3990 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3991 
3992 	Q_TARGET_ACCESS_BEGIN(scn);
3993 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3994 	Q_TARGET_ACCESS_END(scn);
3995 }
3996 qdf_export_symbol(hif_disable_interrupt);
3997 
3998 /**
3999  * hif_fw_event_handler() - hif fw event handler
4000  * @hif_state: pointer to hif ce state structure
4001  *
4002  * Process fw events and raise HTC callback to process fw events.
4003  *
4004  * Return: none
4005  */
4006 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4007 {
4008 	struct hif_msg_callbacks *msg_callbacks =
4009 		&hif_state->msg_callbacks_current;
4010 
4011 	if (!msg_callbacks->fwEventHandler)
4012 		return;
4013 
4014 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
4015 			QDF_STATUS_E_FAILURE);
4016 }
4017 
4018 #ifndef QCA_WIFI_3_0
4019 /**
4020  * hif_fw_interrupt_handler() - FW interrupt handler
4021  * @irq: irq number
4022  * @arg: the user pointer
4023  *
4024  * Called from the PCI interrupt handler when a
4025  * firmware-generated interrupt to the Host.
4026  *
4027  * only registered for legacy ce devices
4028  *
4029  * Return: status of handled irq
4030  */
4031 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4032 {
4033 	struct hif_softc *scn = arg;
4034 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4035 	uint32_t fw_indicator_address, fw_indicator;
4036 
4037 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4038 		return ATH_ISR_NOSCHED;
4039 
4040 	fw_indicator_address = hif_state->fw_indicator_address;
4041 	/* For sudden unplug this will return ~0 */
4042 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4043 
4044 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4045 		/* ACK: clear Target-side pending event */
4046 		A_TARGET_WRITE(scn, fw_indicator_address,
4047 			       fw_indicator & ~FW_IND_EVENT_PENDING);
4048 		if (Q_TARGET_ACCESS_END(scn) < 0)
4049 			return ATH_ISR_SCHED;
4050 
4051 		if (hif_state->started) {
4052 			hif_fw_event_handler(hif_state);
4053 		} else {
4054 			/*
4055 			 * Probable Target failure before we're prepared
4056 			 * to handle it.  Generally unexpected.
4057 			 * fw_indicator used as bitmap, and defined as below:
4058 			 *     FW_IND_EVENT_PENDING    0x1
4059 			 *     FW_IND_INITIALIZED      0x2
4060 			 *     FW_IND_NEEDRECOVER      0x4
4061 			 */
4062 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
4063 				("%s: Early firmware event indicated 0x%x\n",
4064 				 __func__, fw_indicator));
4065 		}
4066 	} else {
4067 		if (Q_TARGET_ACCESS_END(scn) < 0)
4068 			return ATH_ISR_SCHED;
4069 	}
4070 
4071 	return ATH_ISR_SCHED;
4072 }
4073 #else
4074 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4075 {
4076 	return ATH_ISR_SCHED;
4077 }
4078 #endif /* #ifdef QCA_WIFI_3_0 */
4079 
4080 
4081 /**
4082  * hif_wlan_disable(): call the platform driver to disable wlan
4083  * @scn: HIF Context
4084  *
4085  * This function passes the con_mode to platform driver to disable
4086  * wlan.
4087  *
4088  * Return: void
4089  */
4090 void hif_wlan_disable(struct hif_softc *scn)
4091 {
4092 	enum pld_driver_mode mode;
4093 	uint32_t con_mode = hif_get_conparam(scn);
4094 
4095 	if (scn->target_status == TARGET_STATUS_RESET)
4096 		return;
4097 
4098 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4099 		mode = PLD_FTM;
4100 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4101 		mode = PLD_EPPING;
4102 	else
4103 		mode = PLD_MISSION;
4104 
4105 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4106 }
4107 
4108 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4109 {
4110 	QDF_STATUS status;
4111 	uint8_t ul_pipe, dl_pipe;
4112 	int ul_is_polled, dl_is_polled;
4113 
4114 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4115 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4116 					 HTC_CTRL_RSVD_SVC,
4117 					 &ul_pipe, &dl_pipe,
4118 					 &ul_is_polled, &dl_is_polled);
4119 	if (status) {
4120 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4121 		return qdf_status_to_os_return(status);
4122 	}
4123 
4124 	*ce_id = dl_pipe;
4125 
4126 	return 0;
4127 }
4128