xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #include "qdf_module.h"
41 
42 #define CE_POLL_TIMEOUT 10      /* ms */
43 
44 #define AGC_DUMP         1
45 #define CHANINFO_DUMP    2
46 #define BB_WATCHDOG_DUMP 3
47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48 #define PCIE_ACCESS_DUMP 4
49 #endif
50 #include "mp_dev.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include "qdf_hang_event_notifier.h"
53 #endif
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
56 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018)) && \
57 	!defined(QCA_WIFI_SUPPORT_SRNG)
58 #define QCA_WIFI_SUPPORT_SRNG
59 #endif
60 
61 /* Forward references */
62 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
63 
64 /*
65  * Fix EV118783, poll to check whether a BMI response comes
66  * other than waiting for the interruption which may be lost.
67  */
68 /* #define BMI_RSP_POLLING */
69 #define BMI_RSP_TO_MILLISEC  1000
70 
71 #ifdef CONFIG_BYPASS_QMI
72 #define BYPASS_QMI 1
73 #else
74 #define BYPASS_QMI 0
75 #endif
76 
77 #ifdef ENABLE_10_4_FW_HDR
78 #if (ENABLE_10_4_FW_HDR == 1)
79 #define WDI_IPA_SERVICE_GROUP 5
80 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
81 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
82 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
83 #endif /* ENABLE_10_4_FW_HDR == 1 */
84 #endif /* ENABLE_10_4_FW_HDR */
85 
86 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
87 static void hif_config_rri_on_ddr(struct hif_softc *scn);
88 
89 /**
90  * hif_target_access_log_dump() - dump access log
91  *
92  * dump access log
93  *
94  * Return: n/a
95  */
96 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
97 static void hif_target_access_log_dump(void)
98 {
99 	hif_target_dump_access_log();
100 }
101 #endif
102 
103 
104 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
105 		      uint8_t cmd_id, bool start)
106 {
107 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
108 
109 	switch (cmd_id) {
110 	case AGC_DUMP:
111 		if (start)
112 			priv_start_agc(scn);
113 		else
114 			priv_dump_agc(scn);
115 		break;
116 	case CHANINFO_DUMP:
117 		if (start)
118 			priv_start_cap_chaninfo(scn);
119 		else
120 			priv_dump_chaninfo(scn);
121 		break;
122 	case BB_WATCHDOG_DUMP:
123 		priv_dump_bbwatchdog(scn);
124 		break;
125 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
126 	case PCIE_ACCESS_DUMP:
127 		hif_target_access_log_dump();
128 		break;
129 #endif
130 	default:
131 		HIF_ERROR("%s: Invalid htc dump command", __func__);
132 		break;
133 	}
134 }
135 
136 static void ce_poll_timeout(void *arg)
137 {
138 	struct CE_state *CE_state = (struct CE_state *)arg;
139 
140 	if (CE_state->timer_inited) {
141 		ce_per_engine_service(CE_state->scn, CE_state->id);
142 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
143 	}
144 }
145 
146 static unsigned int roundup_pwr2(unsigned int n)
147 {
148 	int i;
149 	unsigned int test_pwr2;
150 
151 	if (!(n & (n - 1)))
152 		return n; /* already a power of 2 */
153 
154 	test_pwr2 = 4;
155 	for (i = 0; i < 29; i++) {
156 		if (test_pwr2 > n)
157 			return test_pwr2;
158 		test_pwr2 = test_pwr2 << 1;
159 	}
160 
161 	QDF_ASSERT(0); /* n too large */
162 	return 0;
163 }
164 
165 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
166 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
167 
168 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
169 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
174 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
175 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
177 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
178 #ifdef QCA_WIFI_3_0_ADRASTEA
179 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
181 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
182 #endif
183 };
184 
185 #ifdef QCN7605_SUPPORT
186 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
187 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
190 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
191 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
192 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
193 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
194 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
195 };
196 #endif
197 
198 #ifdef WLAN_FEATURE_EPPING
199 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
200 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
204 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
205 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
206 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
207 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
208 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
209 };
210 #endif
211 
212 /* CE_PCI TABLE */
213 /*
214  * NOTE: the table below is out of date, though still a useful reference.
215  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
216  * mapping of HTC services to HIF pipes.
217  */
218 /*
219  * This authoritative table defines Copy Engine configuration and the mapping
220  * of services/endpoints to CEs.  A subset of this information is passed to
221  * the Target during startup as a prerequisite to entering BMI phase.
222  * See:
223  *    target_service_to_ce_map - Target-side mapping
224  *    hif_map_service_to_pipe      - Host-side mapping
225  *    target_ce_config         - Target-side configuration
226  *    host_ce_config           - Host-side configuration
227    ============================================================================
228    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
229  |                      |      | ctio | Size     | Frequency
230  |                      |      | n    |          |
231    ============================================================================
232    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
233    descriptor |                      |      |      | O(100B)  | and regular
234    download   |                      |      |      |          |
235    ----------------------------------------------------------------------------
236    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
237    indication |                      |      |      | O(10B)   | regular
238    upload     |                      |      |      |          |
239    ----------------------------------------------------------------------------
240    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
241    upload     |                      |      |      | O(1000B) | (frequent
242    e.g. noise |                      |      |      |          | during IP1.0
243    packets    |                      |      |      |          | testing)
244    ----------------------------------------------------------------------------
245    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
246    download   |                      |      |      | O(1000B) | (frequent
247    e.g.       |                      |      |      |          | during IP1.0
248    misdirecte |                      |      |      |          | testing)
249    d EAPOL    |                      |      |      |          |
250    packets    |                      |      |      |          |
251    ----------------------------------------------------------------------------
252    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
253  | DATA_VO (uplink)     |      |      |          |
254    ----------------------------------------------------------------------------
255    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
256  | DATA_VO (downlink)   |      |      |          |
257    ----------------------------------------------------------------------------
258    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
259  |                      |      |      | O(100B)  |
260    ----------------------------------------------------------------------------
261    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
262    messages   | (downlink)           |      |      | O(100B)  |
263  |                      |      |      |          |
264    ----------------------------------------------------------------------------
265    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
266  | HTC_RAW_STREAMS      |      |      |          |
267  | (uplink)             |      |      |          |
268    ----------------------------------------------------------------------------
269    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
270  | HTC_RAW_STREAMS      |      |      |          |
271  | (downlink)           |      |      |          |
272    ----------------------------------------------------------------------------
273    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
274  |                      |      |      |          | infrequent
275    ============================================================================
276  */
277 
278 /*
279  * Map from service/endpoint to Copy Engine.
280  * This table is derived from the CE_PCI TABLE, above.
281  * It is passed to the Target at startup for use by firmware.
282  */
283 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
284 	{
285 		WMI_DATA_VO_SVC,
286 		PIPEDIR_OUT,    /* out = UL = host -> target */
287 		3,
288 	},
289 	{
290 		WMI_DATA_VO_SVC,
291 		PIPEDIR_IN,     /* in = DL = target -> host */
292 		2,
293 	},
294 	{
295 		WMI_DATA_BK_SVC,
296 		PIPEDIR_OUT,    /* out = UL = host -> target */
297 		3,
298 	},
299 	{
300 		WMI_DATA_BK_SVC,
301 		PIPEDIR_IN,     /* in = DL = target -> host */
302 		2,
303 	},
304 	{
305 		WMI_DATA_BE_SVC,
306 		PIPEDIR_OUT,    /* out = UL = host -> target */
307 		3,
308 	},
309 	{
310 		WMI_DATA_BE_SVC,
311 		PIPEDIR_IN,     /* in = DL = target -> host */
312 		2,
313 	},
314 	{
315 		WMI_DATA_VI_SVC,
316 		PIPEDIR_OUT,    /* out = UL = host -> target */
317 		3,
318 	},
319 	{
320 		WMI_DATA_VI_SVC,
321 		PIPEDIR_IN,     /* in = DL = target -> host */
322 		2,
323 	},
324 	{
325 		WMI_CONTROL_SVC,
326 		PIPEDIR_OUT,    /* out = UL = host -> target */
327 		3,
328 	},
329 	{
330 		WMI_CONTROL_SVC,
331 		PIPEDIR_IN,     /* in = DL = target -> host */
332 		2,
333 	},
334 	{
335 		HTC_CTRL_RSVD_SVC,
336 		PIPEDIR_OUT,    /* out = UL = host -> target */
337 		0,              /* could be moved to 3 (share with WMI) */
338 	},
339 	{
340 		HTC_CTRL_RSVD_SVC,
341 		PIPEDIR_IN,     /* in = DL = target -> host */
342 		2,
343 	},
344 	{
345 		HTC_RAW_STREAMS_SVC, /* not currently used */
346 		PIPEDIR_OUT,    /* out = UL = host -> target */
347 		0,
348 	},
349 	{
350 		HTC_RAW_STREAMS_SVC, /* not currently used */
351 		PIPEDIR_IN,     /* in = DL = target -> host */
352 		2,
353 	},
354 	{
355 		HTT_DATA_MSG_SVC,
356 		PIPEDIR_OUT,    /* out = UL = host -> target */
357 		4,
358 	},
359 	{
360 		HTT_DATA_MSG_SVC,
361 		PIPEDIR_IN,     /* in = DL = target -> host */
362 		1,
363 	},
364 	{
365 		WDI_IPA_TX_SVC,
366 		PIPEDIR_OUT,    /* in = DL = target -> host */
367 		5,
368 	},
369 #if defined(QCA_WIFI_3_0_ADRASTEA)
370 	{
371 		HTT_DATA2_MSG_SVC,
372 		PIPEDIR_IN,    /* in = DL = target -> host */
373 		9,
374 	},
375 	{
376 		HTT_DATA3_MSG_SVC,
377 		PIPEDIR_IN,    /* in = DL = target -> host */
378 		10,
379 	},
380 	{
381 		PACKET_LOG_SVC,
382 		PIPEDIR_IN,    /* in = DL = target -> host */
383 		11,
384 	},
385 #endif
386 	/* (Additions here) */
387 
388 	{                       /* Must be last */
389 		0,
390 		0,
391 		0,
392 	},
393 };
394 
395 /* PIPEDIR_OUT = HOST to Target */
396 /* PIPEDIR_IN  = TARGET to HOST */
397 #if (defined(QCA_WIFI_QCA8074))
398 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
399 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
400 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
401 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
402 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
403 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
404 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
405 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
406 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
407 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
408 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
409 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
410 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
411 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
412 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
413 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
414 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
415 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
416 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
417 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
418 	/* (Additions here) */
419 	{ 0, 0, 0, },
420 };
421 #else
422 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
423 };
424 #endif
425 
426 #if (defined(QCA_WIFI_QCA8074V2))
427 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
428 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
429 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
430 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
431 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
432 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
433 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
434 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
435 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
436 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
437 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
438 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
439 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
440 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
441 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
442 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
443 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
444 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
445 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
446 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
447 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
448 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
449 	/* (Additions here) */
450 	{ 0, 0, 0, },
451 };
452 #else
453 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
454 };
455 #endif
456 
457 #if (defined(QCA_WIFI_QCA6018))
458 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
459 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
460 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
461 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
462 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
463 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
464 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
465 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
466 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
467 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
468 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
469 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
470 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
471 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
472 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
473 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
474 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
475 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
476 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
477 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
478 	/* (Additions here) */
479 	{ 0, 0, 0, },
480 };
481 #else
482 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
483 };
484 #endif
485 
486 #if (defined(QCA_WIFI_QCN9000))
487 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
488 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
489 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
490 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
491 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
492 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
493 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
494 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
495 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
496 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
497 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
498 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
499 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
500 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
501 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
502 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
503 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
504 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
505 	/* (Additions here) */
506 	{ 0, 0, 0, },
507 };
508 #else
509 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
510 };
511 #endif
512 
513 #if (defined(QCA_WIFI_QCA5018))
514 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
515 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
516 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
517 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
518 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
519 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
520 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
521 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
522 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
523 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
524 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
525 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
526 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
527 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
528 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
529 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
530 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
531 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
532 	/* (Additions here) */
533 	{ 0, 0, 0, },
534 };
535 #else
536 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
537 };
538 #endif
539 
540 /* PIPEDIR_OUT = HOST to Target */
541 /* PIPEDIR_IN  = TARGET to HOST */
542 #ifdef QCN7605_SUPPORT
543 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
544 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
545 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
546 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
547 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
548 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
549 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
550 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
551 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
552 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
553 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
554 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
555 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
556 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
557 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
558 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
559 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
560 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
561 #ifdef IPA_OFFLOAD
562 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
563 #else
564 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
565 #endif
566 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
567 	/* (Additions here) */
568 	{ 0, 0, 0, },
569 };
570 #endif
571 
572 #if (defined(QCA_WIFI_QCA6290))
573 #ifdef QCA_6290_AP_MODE
574 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
575 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
576 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
577 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
578 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
579 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
580 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
581 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
582 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
583 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
584 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
585 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
586 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
587 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
588 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
589 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
590 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
591 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
592 	/* (Additions here) */
593 	{ 0, 0, 0, },
594 };
595 #else
596 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
597 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
598 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
599 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
600 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
601 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
602 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
603 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
604 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
605 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
606 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
607 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
608 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
609 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
610 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
611 	/* (Additions here) */
612 	{ 0, 0, 0, },
613 };
614 #endif
615 #else
616 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
617 };
618 #endif
619 
620 #if (defined(QCA_WIFI_QCA6390))
621 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
622 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
623 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
624 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
625 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
626 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
627 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
628 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
629 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
630 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
631 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
632 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
633 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
634 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
635 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
636 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
637 	/* (Additions here) */
638 	{ 0, 0, 0, },
639 };
640 #else
641 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
642 };
643 #endif
644 
645 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
646 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
647 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
648 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
649 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
650 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
651 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
652 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
653 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
654 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
655 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
656 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
657 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
658 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
659 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
660 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
661 	/* (Additions here) */
662 	{ 0, 0, 0, },
663 };
664 
665 #if (defined(QCA_WIFI_QCA6750))
666 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
667 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
668 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
669 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
670 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
671 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
672 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
673 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
674 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
675 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
676 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
677 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
678 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
679 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
680 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
681 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
682 	/* (Additions here) */
683 	{ 0, 0, 0, },
684 };
685 #else
686 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
687 };
688 #endif
689 
690 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
691 	{
692 		WMI_DATA_VO_SVC,
693 		PIPEDIR_OUT,    /* out = UL = host -> target */
694 		3,
695 	},
696 	{
697 		WMI_DATA_VO_SVC,
698 		PIPEDIR_IN,     /* in = DL = target -> host */
699 		2,
700 	},
701 	{
702 		WMI_DATA_BK_SVC,
703 		PIPEDIR_OUT,    /* out = UL = host -> target */
704 		3,
705 	},
706 	{
707 		WMI_DATA_BK_SVC,
708 		PIPEDIR_IN,     /* in = DL = target -> host */
709 		2,
710 	},
711 	{
712 		WMI_DATA_BE_SVC,
713 		PIPEDIR_OUT,    /* out = UL = host -> target */
714 		3,
715 	},
716 	{
717 		WMI_DATA_BE_SVC,
718 		PIPEDIR_IN,     /* in = DL = target -> host */
719 		2,
720 	},
721 	{
722 		WMI_DATA_VI_SVC,
723 		PIPEDIR_OUT,    /* out = UL = host -> target */
724 		3,
725 	},
726 	{
727 		WMI_DATA_VI_SVC,
728 		PIPEDIR_IN,     /* in = DL = target -> host */
729 		2,
730 	},
731 	{
732 		WMI_CONTROL_SVC,
733 		PIPEDIR_OUT,    /* out = UL = host -> target */
734 		3,
735 	},
736 	{
737 		WMI_CONTROL_SVC,
738 		PIPEDIR_IN,     /* in = DL = target -> host */
739 		2,
740 	},
741 	{
742 		HTC_CTRL_RSVD_SVC,
743 		PIPEDIR_OUT,    /* out = UL = host -> target */
744 		0,              /* could be moved to 3 (share with WMI) */
745 	},
746 	{
747 		HTC_CTRL_RSVD_SVC,
748 		PIPEDIR_IN,     /* in = DL = target -> host */
749 		1,
750 	},
751 	{
752 		HTC_RAW_STREAMS_SVC, /* not currently used */
753 		PIPEDIR_OUT,    /* out = UL = host -> target */
754 		0,
755 	},
756 	{
757 		HTC_RAW_STREAMS_SVC, /* not currently used */
758 		PIPEDIR_IN,     /* in = DL = target -> host */
759 		1,
760 	},
761 	{
762 		HTT_DATA_MSG_SVC,
763 		PIPEDIR_OUT,    /* out = UL = host -> target */
764 		4,
765 	},
766 #ifdef WLAN_FEATURE_FASTPATH
767 	{
768 		HTT_DATA_MSG_SVC,
769 		PIPEDIR_IN,     /* in = DL = target -> host */
770 		5,
771 	},
772 #else /* WLAN_FEATURE_FASTPATH */
773 	{
774 		HTT_DATA_MSG_SVC,
775 		PIPEDIR_IN,  /* in = DL = target -> host */
776 		1,
777 	},
778 #endif /* WLAN_FEATURE_FASTPATH */
779 
780 	/* (Additions here) */
781 
782 	{                       /* Must be last */
783 		0,
784 		0,
785 		0,
786 	},
787 };
788 
789 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
790 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
791 
792 #ifdef WLAN_FEATURE_EPPING
793 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
794 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
795 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
796 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
797 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
798 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
799 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
800 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
801 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
802 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
803 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
804 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
805 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
806 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
807 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
808 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
809 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
810 	{0, 0, 0,},             /* Must be last */
811 };
812 
813 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
814 					   **tgt_svc_map_to_use,
815 					   uint32_t *sz_tgt_svc_map_to_use)
816 {
817 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
818 	*sz_tgt_svc_map_to_use =
819 			sizeof(target_service_to_ce_map_wlan_epping);
820 }
821 #endif
822 
823 #ifdef QCN7605_SUPPORT
824 static inline
825 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
826 			       uint32_t *sz_tgt_svc_map_to_use)
827 {
828 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
829 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
830 }
831 #else
832 static inline
833 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
834 			       uint32_t *sz_tgt_svc_map_to_use)
835 {
836 	HIF_ERROR("%s: QCN7605 not supported", __func__);
837 }
838 #endif
839 
840 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
841 				    struct service_to_pipe **tgt_svc_map_to_use,
842 				    uint32_t *sz_tgt_svc_map_to_use)
843 {
844 	uint32_t mode = hif_get_conparam(scn);
845 	struct hif_target_info *tgt_info = &scn->target_info;
846 
847 	if (QDF_IS_EPPING_ENABLED(mode)) {
848 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
849 						      sz_tgt_svc_map_to_use);
850 	} else {
851 		switch (tgt_info->target_type) {
852 		default:
853 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
854 			*sz_tgt_svc_map_to_use =
855 				sizeof(target_service_to_ce_map_wlan);
856 			break;
857 		case TARGET_TYPE_QCN7605:
858 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
859 						  sz_tgt_svc_map_to_use);
860 			break;
861 		case TARGET_TYPE_AR900B:
862 		case TARGET_TYPE_QCA9984:
863 		case TARGET_TYPE_IPQ4019:
864 		case TARGET_TYPE_QCA9888:
865 		case TARGET_TYPE_AR9888:
866 		case TARGET_TYPE_AR9888V2:
867 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
868 			*sz_tgt_svc_map_to_use =
869 				sizeof(target_service_to_ce_map_ar900b);
870 			break;
871 		case TARGET_TYPE_QCA6290:
872 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
873 			*sz_tgt_svc_map_to_use =
874 				sizeof(target_service_to_ce_map_qca6290);
875 			break;
876 		case TARGET_TYPE_QCA6390:
877 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
878 			*sz_tgt_svc_map_to_use =
879 				sizeof(target_service_to_ce_map_qca6390);
880 			break;
881 		case TARGET_TYPE_QCA6490:
882 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
883 			*sz_tgt_svc_map_to_use =
884 				sizeof(target_service_to_ce_map_qca6490);
885 			break;
886 		case TARGET_TYPE_QCA6750:
887 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
888 			*sz_tgt_svc_map_to_use =
889 				sizeof(target_service_to_ce_map_qca6750);
890 			break;
891 		case TARGET_TYPE_QCA8074:
892 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
893 			*sz_tgt_svc_map_to_use =
894 				sizeof(target_service_to_ce_map_qca8074);
895 			break;
896 		case TARGET_TYPE_QCA8074V2:
897 			*tgt_svc_map_to_use =
898 				target_service_to_ce_map_qca8074_v2;
899 			*sz_tgt_svc_map_to_use =
900 				sizeof(target_service_to_ce_map_qca8074_v2);
901 			break;
902 		case TARGET_TYPE_QCA6018:
903 			*tgt_svc_map_to_use =
904 				target_service_to_ce_map_qca6018;
905 			*sz_tgt_svc_map_to_use =
906 				sizeof(target_service_to_ce_map_qca6018);
907 			break;
908 		case TARGET_TYPE_QCN9000:
909 			*tgt_svc_map_to_use =
910 				target_service_to_ce_map_qcn9000;
911 			*sz_tgt_svc_map_to_use =
912 				sizeof(target_service_to_ce_map_qcn9000);
913 			break;
914 		case TARGET_TYPE_QCA5018:
915 			*tgt_svc_map_to_use =
916 				target_service_to_ce_map_qca5018;
917 			*sz_tgt_svc_map_to_use =
918 				sizeof(target_service_to_ce_map_qca5018);
919 			break;
920 		}
921 	}
922 }
923 
924 /**
925  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
926  * @ce_state : pointer to the state context of the CE
927  *
928  * Description:
929  *   Sets htt_rx_data attribute of the state structure if the
930  *   CE serves one of the HTT DATA services.
931  *
932  * Return:
933  *  false (attribute set to false)
934  *  true  (attribute set to true);
935  */
936 static bool ce_mark_datapath(struct CE_state *ce_state)
937 {
938 	struct service_to_pipe *svc_map;
939 	uint32_t map_sz, map_len;
940 	int    i;
941 	bool   rc = false;
942 
943 	if (ce_state) {
944 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
945 					       &map_sz);
946 
947 		map_len = map_sz / sizeof(struct service_to_pipe);
948 		for (i = 0; i < map_len; i++) {
949 			if ((svc_map[i].pipenum == ce_state->id) &&
950 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
951 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
952 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
953 				/* HTT CEs are unidirectional */
954 				if (svc_map[i].pipedir == PIPEDIR_IN)
955 					ce_state->htt_rx_data = true;
956 				else
957 					ce_state->htt_tx_data = true;
958 				rc = true;
959 			}
960 		}
961 	}
962 	return rc;
963 }
964 
965 /**
966  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
967  * @ce_id: ce in question
968  * @ring: ring state being examined
969  * @type: "src_ring" or "dest_ring" string for identifying the ring
970  *
971  * Warns on non-zero index values.
972  * Causes a kernel panic if the ring is not empty durring initialization.
973  */
974 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
975 					 char *type)
976 {
977 	if (ring->write_index != 0 || ring->sw_index != 0)
978 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
979 			  ce_id, type, ring->sw_index, ring->write_index);
980 	if (ring->write_index != ring->sw_index)
981 		QDF_BUG(0);
982 }
983 
984 #ifdef IPA_OFFLOAD
985 /**
986  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
987  * @scn: softc instance
988  * @ce_id: ce in question
989  * @base_addr: pointer to copyengine ring base address
990  * @ce_ring: copyengine instance
991  * @nentries: number of entries should be allocated
992  * @desc_size: ce desc size
993  *
994  * Return: QDF_STATUS_SUCCESS - for success
995  */
996 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
997 				     qdf_dma_addr_t *base_addr,
998 				     struct CE_ring_state *ce_ring,
999 				     unsigned int nentries, uint32_t desc_size)
1000 {
1001 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1002 	    !ce_srng_based(scn)) {
1003 		if (!scn->ipa_ce_ring) {
1004 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1005 				scn->qdf_dev,
1006 				nentries * desc_size + CE_DESC_RING_ALIGN);
1007 			if (!scn->ipa_ce_ring) {
1008 				HIF_ERROR(
1009 				"%s: Failed to allocate memory for IPA ce ring",
1010 				__func__);
1011 				return QDF_STATUS_E_NOMEM;
1012 			}
1013 		}
1014 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1015 						&scn->ipa_ce_ring->mem_info);
1016 		ce_ring->base_addr_owner_space_unaligned =
1017 						scn->ipa_ce_ring->vaddr;
1018 	} else {
1019 		ce_ring->base_addr_owner_space_unaligned =
1020 			qdf_mem_alloc_consistent(scn->qdf_dev,
1021 						 scn->qdf_dev->dev,
1022 						 (nentries * desc_size +
1023 						 CE_DESC_RING_ALIGN),
1024 						 base_addr);
1025 		if (!ce_ring->base_addr_owner_space_unaligned) {
1026 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1027 				  __func__, CE_id);
1028 			return QDF_STATUS_E_NOMEM;
1029 		}
1030 	}
1031 	return QDF_STATUS_SUCCESS;
1032 }
1033 
1034 /**
1035  * ce_free_desc_ring() - Frees copyengine descriptor ring
1036  * @scn: softc instance
1037  * @ce_id: ce in question
1038  * @ce_ring: copyengine instance
1039  * @desc_size: ce desc size
1040  *
1041  * Return: None
1042  */
1043 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1044 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1045 {
1046 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1047 	    !ce_srng_based(scn)) {
1048 		if (scn->ipa_ce_ring) {
1049 			qdf_mem_shared_mem_free(scn->qdf_dev,
1050 						scn->ipa_ce_ring);
1051 			scn->ipa_ce_ring = NULL;
1052 		}
1053 		ce_ring->base_addr_owner_space_unaligned = NULL;
1054 	} else {
1055 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1056 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1057 			ce_ring->base_addr_owner_space_unaligned,
1058 			ce_ring->base_addr_CE_space, 0);
1059 		ce_ring->base_addr_owner_space_unaligned = NULL;
1060 	}
1061 }
1062 #else
1063 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1064 				     qdf_dma_addr_t *base_addr,
1065 				     struct CE_ring_state *ce_ring,
1066 				     unsigned int nentries, uint32_t desc_size)
1067 {
1068 	ce_ring->base_addr_owner_space_unaligned =
1069 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1070 					 (nentries * desc_size +
1071 					 CE_DESC_RING_ALIGN), base_addr);
1072 	if (!ce_ring->base_addr_owner_space_unaligned) {
1073 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1074 			  __func__, CE_id);
1075 		return QDF_STATUS_E_NOMEM;
1076 	}
1077 	return QDF_STATUS_SUCCESS;
1078 }
1079 
1080 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1081 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1082 {
1083 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1084 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1085 		ce_ring->base_addr_owner_space_unaligned,
1086 		ce_ring->base_addr_CE_space, 0);
1087 	ce_ring->base_addr_owner_space_unaligned = NULL;
1088 }
1089 #endif /* IPA_OFFLOAD */
1090 
1091 /*
1092  * TODO: Need to explore the possibility of having this as part of a
1093  * target context instead of a global array.
1094  */
1095 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1096 
1097 void ce_service_register_module(enum ce_target_type target_type,
1098 				struct ce_ops* (*ce_attach)(void))
1099 {
1100 	if (target_type < CE_MAX_TARGET_TYPE)
1101 		ce_attach_register[target_type] = ce_attach;
1102 }
1103 
1104 qdf_export_symbol(ce_service_register_module);
1105 
1106 /**
1107  * ce_srng_based() - Does this target use srng
1108  * @ce_state : pointer to the state context of the CE
1109  *
1110  * Description:
1111  *   returns true if the target is SRNG based
1112  *
1113  * Return:
1114  *  false (attribute set to false)
1115  *  true  (attribute set to true);
1116  */
1117 bool ce_srng_based(struct hif_softc *scn)
1118 {
1119 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1120 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1121 
1122 	switch (tgt_info->target_type) {
1123 	case TARGET_TYPE_QCA8074:
1124 	case TARGET_TYPE_QCA8074V2:
1125 	case TARGET_TYPE_QCA6290:
1126 	case TARGET_TYPE_QCA6390:
1127 	case TARGET_TYPE_QCA6490:
1128 	case TARGET_TYPE_QCA6750:
1129 	case TARGET_TYPE_QCA6018:
1130 	case TARGET_TYPE_QCN9000:
1131 	case TARGET_TYPE_QCA5018:
1132 		return true;
1133 	default:
1134 		return false;
1135 	}
1136 	return false;
1137 }
1138 qdf_export_symbol(ce_srng_based);
1139 
1140 #ifdef QCA_WIFI_SUPPORT_SRNG
1141 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1142 {
1143 	struct ce_ops *ops = NULL;
1144 
1145 	if (ce_srng_based(scn)) {
1146 		if (ce_attach_register[CE_SVC_SRNG])
1147 			ops = ce_attach_register[CE_SVC_SRNG]();
1148 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1149 		ops = ce_attach_register[CE_SVC_LEGACY]();
1150 	}
1151 
1152 	return ops;
1153 }
1154 
1155 
1156 #else	/* QCA_LITHIUM */
1157 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1158 {
1159 	if (ce_attach_register[CE_SVC_LEGACY])
1160 		return ce_attach_register[CE_SVC_LEGACY]();
1161 
1162 	return NULL;
1163 }
1164 #endif /* QCA_LITHIUM */
1165 
1166 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1167 		struct pld_shadow_reg_v2_cfg **shadow_config,
1168 		int *num_shadow_registers_configured) {
1169 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1170 
1171 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1172 			scn, shadow_config, num_shadow_registers_configured);
1173 }
1174 
1175 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1176 						uint8_t ring_type)
1177 {
1178 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1179 
1180 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1181 }
1182 
1183 
1184 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1185 		uint8_t ring_type, uint32_t nentries)
1186 {
1187 	uint32_t ce_nbytes;
1188 	char *ptr;
1189 	qdf_dma_addr_t base_addr;
1190 	struct CE_ring_state *ce_ring;
1191 	uint32_t desc_size;
1192 	struct hif_softc *scn = CE_state->scn;
1193 
1194 	ce_nbytes = sizeof(struct CE_ring_state)
1195 		+ (nentries * sizeof(void *));
1196 	ptr = qdf_mem_malloc(ce_nbytes);
1197 	if (!ptr)
1198 		return NULL;
1199 
1200 	ce_ring = (struct CE_ring_state *)ptr;
1201 	ptr += sizeof(struct CE_ring_state);
1202 	ce_ring->nentries = nentries;
1203 	ce_ring->nentries_mask = nentries - 1;
1204 
1205 	ce_ring->low_water_mark_nentries = 0;
1206 	ce_ring->high_water_mark_nentries = nentries;
1207 	ce_ring->per_transfer_context = (void **)ptr;
1208 
1209 	desc_size = ce_get_desc_size(scn, ring_type);
1210 
1211 	/* Legacy platforms that do not support cache
1212 	 * coherent DMA are unsupported
1213 	 */
1214 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1215 			       ce_ring, nentries,
1216 			       desc_size) !=
1217 	    QDF_STATUS_SUCCESS) {
1218 		HIF_ERROR("%s: ring has no DMA mem",
1219 				__func__);
1220 		qdf_mem_free(ce_ring);
1221 		return NULL;
1222 	}
1223 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1224 
1225 	/* Correctly initialize memory to 0 to
1226 	 * prevent garbage data crashing system
1227 	 * when download firmware
1228 	 */
1229 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1230 			nentries * desc_size +
1231 			CE_DESC_RING_ALIGN);
1232 
1233 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1234 
1235 		ce_ring->base_addr_CE_space =
1236 			(ce_ring->base_addr_CE_space_unaligned +
1237 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1238 
1239 		ce_ring->base_addr_owner_space = (void *)
1240 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1241 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1242 	} else {
1243 		ce_ring->base_addr_CE_space =
1244 				ce_ring->base_addr_CE_space_unaligned;
1245 		ce_ring->base_addr_owner_space =
1246 				ce_ring->base_addr_owner_space_unaligned;
1247 	}
1248 
1249 	return ce_ring;
1250 }
1251 
1252 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1253 			uint32_t ce_id, struct CE_ring_state *ring,
1254 			struct CE_attr *attr)
1255 {
1256 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1257 
1258 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1259 					      ring, attr);
1260 }
1261 
1262 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1263 {
1264 	uint8_t ul_pipe, dl_pipe;
1265 	int ce_id, status, ul_is_polled, dl_is_polled;
1266 	struct CE_state *ce_state;
1267 
1268 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1269 					 &ul_pipe, &dl_pipe,
1270 					 &ul_is_polled, &dl_is_polled);
1271 	if (status) {
1272 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1273 		return status;
1274 	}
1275 
1276 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1277 		if (ce_id == ul_pipe)
1278 			continue;
1279 		if (ce_id == dl_pipe)
1280 			continue;
1281 
1282 		ce_state = scn->ce_id_to_state[ce_id];
1283 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1284 		if (ce_state->state == CE_RUNNING)
1285 			ce_state->state = CE_PAUSED;
1286 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1287 	}
1288 
1289 	return status;
1290 }
1291 
1292 int hif_ce_bus_late_resume(struct hif_softc *scn)
1293 {
1294 	int ce_id;
1295 	struct CE_state *ce_state;
1296 	int write_index = 0;
1297 	bool index_updated;
1298 
1299 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1300 		ce_state = scn->ce_id_to_state[ce_id];
1301 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1302 		if (ce_state->state == CE_PENDING) {
1303 			write_index = ce_state->src_ring->write_index;
1304 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1305 					write_index);
1306 			ce_state->state = CE_RUNNING;
1307 			index_updated = true;
1308 		} else {
1309 			index_updated = false;
1310 		}
1311 
1312 		if (ce_state->state == CE_PAUSED)
1313 			ce_state->state = CE_RUNNING;
1314 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1315 
1316 		if (index_updated)
1317 			hif_record_ce_desc_event(scn, ce_id,
1318 				RESUME_WRITE_INDEX_UPDATE,
1319 				NULL, NULL, write_index, 0);
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 /**
1326  * ce_oom_recovery() - try to recover rx ce from oom condition
1327  * @context: CE_state of the CE with oom rx ring
1328  *
1329  * the executing work Will continue to be rescheduled until
1330  * at least 1 descriptor is successfully posted to the rx ring.
1331  *
1332  * return: none
1333  */
1334 static void ce_oom_recovery(void *context)
1335 {
1336 	struct CE_state *ce_state = context;
1337 	struct hif_softc *scn = ce_state->scn;
1338 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1339 	struct HIF_CE_pipe_info *pipe_info =
1340 		&ce_softc->pipe_info[ce_state->id];
1341 
1342 	hif_post_recv_buffers_for_pipe(pipe_info);
1343 }
1344 
1345 #ifdef HIF_CE_DEBUG_DATA_BUF
1346 /**
1347  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1348  * the CE descriptors.
1349  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1350  * @scn: hif scn handle
1351  * ce_id: Copy Engine Id
1352  *
1353  * Return: QDF_STATUS
1354  */
1355 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1356 {
1357 	struct hif_ce_desc_event *event = NULL;
1358 	struct hif_ce_desc_event *hist_ev = NULL;
1359 	uint32_t index = 0;
1360 
1361 	hist_ev =
1362 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1363 
1364 	if (!hist_ev)
1365 		return QDF_STATUS_E_NOMEM;
1366 
1367 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1368 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1369 		event = &hist_ev[index];
1370 		event->data =
1371 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1372 		if (!event->data) {
1373 			hif_err_rl("ce debug data alloc failed");
1374 			return QDF_STATUS_E_NOMEM;
1375 		}
1376 	}
1377 	return QDF_STATUS_SUCCESS;
1378 }
1379 
1380 /**
1381  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1382  * the CE descriptors.
1383  * @scn: hif scn handle
1384  * ce_id: Copy Engine Id
1385  *
1386  * Return:
1387  */
1388 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1389 {
1390 	struct hif_ce_desc_event *event = NULL;
1391 	struct hif_ce_desc_event *hist_ev = NULL;
1392 	uint32_t index = 0;
1393 
1394 	hist_ev =
1395 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1396 
1397 	if (!hist_ev)
1398 		return;
1399 
1400 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1401 		event = &hist_ev[index];
1402 		if (event->data)
1403 			qdf_mem_free(event->data);
1404 		event->data = NULL;
1405 		event = NULL;
1406 	}
1407 
1408 }
1409 #endif /* HIF_CE_DEBUG_DATA_BUF */
1410 
1411 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
1412 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1413 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1414 
1415 /**
1416  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1417  * @scn: hif scn handle
1418  * @ce_id: Copy Engine Id
1419  * @src_nentries: source ce ring entries
1420  * Return: QDF_STATUS
1421  */
1422 static QDF_STATUS
1423 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1424 			   uint32_t src_nentries)
1425 {
1426 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1427 
1428 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1429 	ce_hist->enable[ce_id] = 1;
1430 
1431 	if (src_nentries)
1432 		alloc_mem_ce_debug_hist_data(scn, ce_id);
1433 	else
1434 		ce_hist->data_enable[ce_id] = false;
1435 
1436 	return QDF_STATUS_SUCCESS;
1437 }
1438 
1439 /**
1440  * free_mem_ce_debug_history() - Free CE descriptor history
1441  * @scn: hif scn handle
1442  * @ce_id: Copy Engine Id
1443  *
1444  * Return: None
1445  */
1446 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1447 {
1448 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1449 
1450 	ce_hist->enable[ce_id] = 0;
1451 	if (ce_hist->data_enable[ce_id]) {
1452 		ce_hist->data_enable[ce_id] = false;
1453 		free_mem_ce_debug_hist_data(scn, ce_id);
1454 	}
1455 	ce_hist->hist_ev[ce_id] = NULL;
1456 }
1457 #else
1458 static inline QDF_STATUS
1459 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1460 			   uint32_t src_nentries)
1461 {
1462 	return QDF_STATUS_SUCCESS;
1463 }
1464 
1465 static inline void
1466 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1467 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
1468 #else
1469 #if defined(HIF_CE_DEBUG_DATA_BUF)
1470 
1471 static QDF_STATUS
1472 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1473 			   uint32_t src_nentries)
1474 {
1475 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1476 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1477 
1478 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
1479 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1480 		return QDF_STATUS_E_NOMEM;
1481 	} else {
1482 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1483 		return QDF_STATUS_SUCCESS;
1484 	}
1485 }
1486 
1487 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1488 {
1489 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1490 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1491 
1492 	if (!hist_ev)
1493 		return;
1494 
1495 	if (ce_hist->data_enable[CE_id]) {
1496 		ce_hist->data_enable[CE_id] = false;
1497 		free_mem_ce_debug_hist_data(scn, CE_id);
1498 	}
1499 
1500 	ce_hist->enable[CE_id] = 0;
1501 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1502 	ce_hist->hist_ev[CE_id] = NULL;
1503 }
1504 
1505 #else
1506 
1507 static inline QDF_STATUS
1508 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1509 			   uint32_t src_nentries)
1510 {
1511 	return QDF_STATUS_SUCCESS;
1512 }
1513 
1514 static inline void
1515 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1516 #endif /* HIF_CE_DEBUG_DATA_BUF */
1517 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
1518 
1519 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1520 /**
1521  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1522  * CE records on the console using sysfs.
1523  * @scn: hif scn handle
1524  *
1525  * Return:
1526  */
1527 static inline void reset_ce_debug_history(struct hif_softc *scn)
1528 {
1529 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1530 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1531 	 * index. Disable data storing
1532 	 */
1533 	ce_hist->hist_index = 0;
1534 	ce_hist->hist_id = 0;
1535 }
1536 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1537 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
1538 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1539 
1540 void ce_enable_polling(void *cestate)
1541 {
1542 	struct CE_state *CE_state = (struct CE_state *)cestate;
1543 
1544 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1545 		CE_state->timer_inited = true;
1546 }
1547 
1548 void ce_disable_polling(void *cestate)
1549 {
1550 	struct CE_state *CE_state = (struct CE_state *)cestate;
1551 
1552 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1553 		CE_state->timer_inited = false;
1554 }
1555 
1556 /*
1557  * Initialize a Copy Engine based on caller-supplied attributes.
1558  * This may be called once to initialize both source and destination
1559  * rings or it may be called twice for separate source and destination
1560  * initialization. It may be that only one side or the other is
1561  * initialized by software/firmware.
1562  *
1563  * This should be called durring the initialization sequence before
1564  * interupts are enabled, so we don't have to worry about thread safety.
1565  */
1566 struct CE_handle *ce_init(struct hif_softc *scn,
1567 			  unsigned int CE_id, struct CE_attr *attr)
1568 {
1569 	struct CE_state *CE_state;
1570 	uint32_t ctrl_addr;
1571 	unsigned int nentries;
1572 	bool malloc_CE_state = false;
1573 	bool malloc_src_ring = false;
1574 	int status;
1575 
1576 	QDF_ASSERT(CE_id < scn->ce_count);
1577 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1578 	CE_state = scn->ce_id_to_state[CE_id];
1579 
1580 	if (!CE_state) {
1581 		CE_state =
1582 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1583 		if (!CE_state)
1584 			return NULL;
1585 
1586 		malloc_CE_state = true;
1587 		qdf_spinlock_create(&CE_state->ce_index_lock);
1588 
1589 		CE_state->id = CE_id;
1590 		CE_state->ctrl_addr = ctrl_addr;
1591 		CE_state->state = CE_RUNNING;
1592 		CE_state->attr_flags = attr->flags;
1593 	}
1594 	CE_state->scn = scn;
1595 	CE_state->service = ce_engine_service_reg;
1596 
1597 	qdf_atomic_init(&CE_state->rx_pending);
1598 	if (!attr) {
1599 		/* Already initialized; caller wants the handle */
1600 		return (struct CE_handle *)CE_state;
1601 	}
1602 
1603 	if (CE_state->src_sz_max)
1604 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1605 	else
1606 		CE_state->src_sz_max = attr->src_sz_max;
1607 
1608 	ce_init_ce_desc_event_log(scn, CE_id,
1609 				  attr->src_nentries + attr->dest_nentries);
1610 
1611 	/* source ring setup */
1612 	nentries = attr->src_nentries;
1613 	if (nentries) {
1614 		struct CE_ring_state *src_ring;
1615 
1616 		nentries = roundup_pwr2(nentries);
1617 		if (CE_state->src_ring) {
1618 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1619 		} else {
1620 			src_ring = CE_state->src_ring =
1621 				ce_alloc_ring_state(CE_state,
1622 						CE_RING_SRC,
1623 						nentries);
1624 			if (!src_ring) {
1625 				/* cannot allocate src ring. If the
1626 				 * CE_state is allocated locally free
1627 				 * CE_State and return error.
1628 				 */
1629 				HIF_ERROR("%s: src ring has no mem", __func__);
1630 				if (malloc_CE_state) {
1631 					/* allocated CE_state locally */
1632 					qdf_mem_free(CE_state);
1633 					malloc_CE_state = false;
1634 				}
1635 				return NULL;
1636 			}
1637 			/* we can allocate src ring. Mark that the src ring is
1638 			 * allocated locally
1639 			 */
1640 			malloc_src_ring = true;
1641 
1642 			/*
1643 			 * Also allocate a shadow src ring in
1644 			 * regular mem to use for faster access.
1645 			 */
1646 			src_ring->shadow_base_unaligned =
1647 				qdf_mem_malloc(nentries *
1648 					       sizeof(struct CE_src_desc) +
1649 					       CE_DESC_RING_ALIGN);
1650 			if (!src_ring->shadow_base_unaligned)
1651 				goto error_no_dma_mem;
1652 
1653 			src_ring->shadow_base = (struct CE_src_desc *)
1654 				(((size_t) src_ring->shadow_base_unaligned +
1655 				CE_DESC_RING_ALIGN - 1) &
1656 				 ~(CE_DESC_RING_ALIGN - 1));
1657 
1658 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1659 					       src_ring, attr);
1660 			if (status < 0)
1661 				goto error_target_access;
1662 
1663 			ce_ring_test_initial_indexes(CE_id, src_ring,
1664 						     "src_ring");
1665 		}
1666 	}
1667 
1668 	/* destination ring setup */
1669 	nentries = attr->dest_nentries;
1670 	if (nentries) {
1671 		struct CE_ring_state *dest_ring;
1672 
1673 		nentries = roundup_pwr2(nentries);
1674 		if (CE_state->dest_ring) {
1675 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1676 		} else {
1677 			dest_ring = CE_state->dest_ring =
1678 				ce_alloc_ring_state(CE_state,
1679 						CE_RING_DEST,
1680 						nentries);
1681 			if (!dest_ring) {
1682 				/* cannot allocate dst ring. If the CE_state
1683 				 * or src ring is allocated locally free
1684 				 * CE_State and src ring and return error.
1685 				 */
1686 				HIF_ERROR("%s: dest ring has no mem",
1687 					  __func__);
1688 				goto error_no_dma_mem;
1689 			}
1690 
1691 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1692 				      dest_ring, attr);
1693 			if (status < 0)
1694 				goto error_target_access;
1695 
1696 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1697 						     "dest_ring");
1698 
1699 			/* For srng based target, init status ring here */
1700 			if (ce_srng_based(CE_state->scn)) {
1701 				CE_state->status_ring =
1702 					ce_alloc_ring_state(CE_state,
1703 							CE_RING_STATUS,
1704 							nentries);
1705 				if (!CE_state->status_ring) {
1706 					/*Allocation failed. Cleanup*/
1707 					qdf_mem_free(CE_state->dest_ring);
1708 					if (malloc_src_ring) {
1709 						qdf_mem_free
1710 							(CE_state->src_ring);
1711 						CE_state->src_ring = NULL;
1712 						malloc_src_ring = false;
1713 					}
1714 					if (malloc_CE_state) {
1715 						/* allocated CE_state locally */
1716 						scn->ce_id_to_state[CE_id] =
1717 							NULL;
1718 						qdf_mem_free(CE_state);
1719 						malloc_CE_state = false;
1720 					}
1721 
1722 					return NULL;
1723 				}
1724 
1725 				status = ce_ring_setup(scn, CE_RING_STATUS,
1726 					       CE_id, CE_state->status_ring,
1727 					       attr);
1728 				if (status < 0)
1729 					goto error_target_access;
1730 
1731 			}
1732 
1733 			/* epping */
1734 			/* poll timer */
1735 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1736 				qdf_timer_init(scn->qdf_dev,
1737 						&CE_state->poll_timer,
1738 						ce_poll_timeout,
1739 						CE_state,
1740 						QDF_TIMER_TYPE_WAKE_APPS);
1741 				ce_enable_polling(CE_state);
1742 				qdf_timer_mod(&CE_state->poll_timer,
1743 						      CE_POLL_TIMEOUT);
1744 			}
1745 		}
1746 	}
1747 
1748 	if (!ce_srng_based(scn)) {
1749 		/* Enable CE error interrupts */
1750 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1751 			goto error_target_access;
1752 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1753 		if (Q_TARGET_ACCESS_END(scn) < 0)
1754 			goto error_target_access;
1755 	}
1756 
1757 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1758 			ce_oom_recovery, CE_state);
1759 
1760 	/* update the htt_data attribute */
1761 	ce_mark_datapath(CE_state);
1762 	scn->ce_id_to_state[CE_id] = CE_state;
1763 
1764 	alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
1765 
1766 	return (struct CE_handle *)CE_state;
1767 
1768 error_target_access:
1769 error_no_dma_mem:
1770 	ce_fini((struct CE_handle *)CE_state);
1771 	return NULL;
1772 }
1773 
1774 /**
1775  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1776  * @hif_ctx: HIF Context
1777  *
1778  * API to check if polling is enabled on all CEs. Returns true when polling
1779  * is enabled on all CEs.
1780  *
1781  * Return: bool
1782  */
1783 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1784 {
1785 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1786 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1787 	struct CE_attr *attr;
1788 	int id;
1789 
1790 	for (id = 0; id < scn->ce_count; id++) {
1791 		attr = &hif_state->host_ce_config[id];
1792 		if (attr && (attr->dest_nentries) &&
1793 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1794 			return false;
1795 	}
1796 	return true;
1797 }
1798 qdf_export_symbol(hif_is_polled_mode_enabled);
1799 
1800 #ifdef WLAN_FEATURE_FASTPATH
1801 /**
1802  * hif_enable_fastpath() Update that we have enabled fastpath mode
1803  * @hif_ctx: HIF context
1804  *
1805  * For use in data path
1806  *
1807  * Retrun: void
1808  */
1809 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1810 {
1811 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1812 
1813 	if (ce_srng_based(scn)) {
1814 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1815 		return;
1816 	}
1817 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1818 	scn->fastpath_mode_on = true;
1819 }
1820 
1821 /**
1822  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1823  * @hif_ctx: HIF Context
1824  *
1825  * For use in data path to skip HTC
1826  *
1827  * Return: bool
1828  */
1829 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1830 {
1831 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1832 
1833 	return scn->fastpath_mode_on;
1834 }
1835 
1836 /**
1837  * hif_get_ce_handle - API to get CE handle for FastPath mode
1838  * @hif_ctx: HIF Context
1839  * @id: CopyEngine Id
1840  *
1841  * API to return CE handle for fastpath mode
1842  *
1843  * Return: void
1844  */
1845 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1846 {
1847 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1848 
1849 	return scn->ce_id_to_state[id];
1850 }
1851 qdf_export_symbol(hif_get_ce_handle);
1852 
1853 /**
1854  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1855  * No processing is required inside this function.
1856  * @ce_hdl: Cope engine handle
1857  * Using an assert, this function makes sure that,
1858  * the TX CE has been processed completely.
1859  *
1860  * This is called while dismantling CE structures. No other thread
1861  * should be using these structures while dismantling is occurring
1862  * therfore no locking is needed.
1863  *
1864  * Return: none
1865  */
1866 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1867 {
1868 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1869 	struct CE_ring_state *src_ring = ce_state->src_ring;
1870 	struct hif_softc *sc = ce_state->scn;
1871 	uint32_t sw_index, write_index;
1872 
1873 	if (hif_is_nss_wifi_enabled(sc))
1874 		return;
1875 
1876 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1877 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1878 			 __func__, __LINE__);
1879 		sw_index = src_ring->sw_index;
1880 		write_index = src_ring->sw_index;
1881 
1882 		/* At this point Tx CE should be clean */
1883 		qdf_assert_always(sw_index == write_index);
1884 	}
1885 }
1886 
1887 /**
1888  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1889  * @ce_hdl: Handle to CE
1890  *
1891  * These buffers are never allocated on the fly, but
1892  * are allocated only once during HIF start and freed
1893  * only once during HIF stop.
1894  * NOTE:
1895  * The assumption here is there is no in-flight DMA in progress
1896  * currently, so that buffers can be freed up safely.
1897  *
1898  * Return: NONE
1899  */
1900 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1901 {
1902 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1903 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1904 	qdf_nbuf_t nbuf;
1905 	int i;
1906 
1907 	if (ce_state->scn->fastpath_mode_on == false)
1908 		return;
1909 
1910 	if (!ce_state->htt_rx_data)
1911 		return;
1912 
1913 	/*
1914 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1915 	 * this CE is completely full: does not leave one blank space, to
1916 	 * distinguish between empty queue & full queue. So free all the
1917 	 * entries.
1918 	 */
1919 	for (i = 0; i < dst_ring->nentries; i++) {
1920 		nbuf = dst_ring->per_transfer_context[i];
1921 
1922 		/*
1923 		 * The reasons for doing this check are:
1924 		 * 1) Protect against calling cleanup before allocating buffers
1925 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1926 		 *    could have a partially filled ring, because of a memory
1927 		 *    allocation failure in the middle of allocating ring.
1928 		 *    This check accounts for that case, checking
1929 		 *    fastpath_mode_on flag or started flag would not have
1930 		 *    covered that case. This is not in performance path,
1931 		 *    so OK to do this.
1932 		 */
1933 		if (nbuf) {
1934 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1935 					      QDF_DMA_FROM_DEVICE);
1936 			qdf_nbuf_free(nbuf);
1937 		}
1938 	}
1939 }
1940 
1941 /**
1942  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1943  * @scn: HIF handle
1944  *
1945  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1946  * Hence we have to post all the entries in the pipe, even, in the beginning
1947  * unlike for other CE pipes where one less than dest_nentries are filled in
1948  * the beginning.
1949  *
1950  * Return: None
1951  */
1952 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1953 {
1954 	int pipe_num;
1955 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1956 
1957 	if (scn->fastpath_mode_on == false)
1958 		return;
1959 
1960 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1961 		struct HIF_CE_pipe_info *pipe_info =
1962 			&hif_state->pipe_info[pipe_num];
1963 		struct CE_state *ce_state =
1964 			scn->ce_id_to_state[pipe_info->pipe_num];
1965 
1966 		if (ce_state->htt_rx_data)
1967 			atomic_inc(&pipe_info->recv_bufs_needed);
1968 	}
1969 }
1970 #else
1971 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1972 {
1973 }
1974 
1975 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1976 {
1977 	return false;
1978 }
1979 #endif /* WLAN_FEATURE_FASTPATH */
1980 
1981 void ce_fini(struct CE_handle *copyeng)
1982 {
1983 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1984 	unsigned int CE_id = CE_state->id;
1985 	struct hif_softc *scn = CE_state->scn;
1986 	uint32_t desc_size;
1987 
1988 	bool inited = CE_state->timer_inited;
1989 	CE_state->state = CE_UNUSED;
1990 	scn->ce_id_to_state[CE_id] = NULL;
1991 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1992 	ce_disable_polling(CE_state);
1993 
1994 	qdf_lro_deinit(CE_state->lro_data);
1995 
1996 	if (CE_state->src_ring) {
1997 		/* Cleanup the datapath Tx ring */
1998 		ce_h2t_tx_ce_cleanup(copyeng);
1999 
2000 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2001 		if (CE_state->src_ring->shadow_base_unaligned)
2002 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2003 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2004 			ce_free_desc_ring(scn, CE_state->id,
2005 					  CE_state->src_ring,
2006 					  desc_size);
2007 		qdf_mem_free(CE_state->src_ring);
2008 	}
2009 	if (CE_state->dest_ring) {
2010 		/* Cleanup the datapath Rx ring */
2011 		ce_t2h_msg_ce_cleanup(copyeng);
2012 
2013 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
2014 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
2015 			ce_free_desc_ring(scn, CE_state->id,
2016 					  CE_state->dest_ring,
2017 					  desc_size);
2018 		qdf_mem_free(CE_state->dest_ring);
2019 
2020 		/* epping */
2021 		if (inited) {
2022 			qdf_timer_free(&CE_state->poll_timer);
2023 		}
2024 	}
2025 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
2026 		/* Cleanup the datapath Tx ring */
2027 		ce_h2t_tx_ce_cleanup(copyeng);
2028 
2029 		if (CE_state->status_ring->shadow_base_unaligned)
2030 			qdf_mem_free(
2031 				CE_state->status_ring->shadow_base_unaligned);
2032 
2033 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
2034 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
2035 			ce_free_desc_ring(scn, CE_state->id,
2036 					  CE_state->status_ring,
2037 					  desc_size);
2038 		qdf_mem_free(CE_state->status_ring);
2039 	}
2040 
2041 	free_mem_ce_debug_history(scn, CE_id);
2042 	reset_ce_debug_history(scn);
2043 	ce_deinit_ce_desc_event_log(scn, CE_id);
2044 
2045 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2046 	qdf_mem_free(CE_state);
2047 }
2048 
2049 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2050 {
2051 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2052 
2053 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2054 		  sizeof(hif_state->msg_callbacks_pending));
2055 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2056 		  sizeof(hif_state->msg_callbacks_current));
2057 }
2058 
2059 /* Send the first nbytes bytes of the buffer */
2060 QDF_STATUS
2061 hif_send_head(struct hif_opaque_softc *hif_ctx,
2062 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2063 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2064 {
2065 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2066 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2067 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2068 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2069 	int bytes = nbytes, nfrags = 0;
2070 	struct ce_sendlist sendlist;
2071 	int status, i = 0;
2072 	unsigned int mux_id = 0;
2073 
2074 	if (nbytes > qdf_nbuf_len(nbuf)) {
2075 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
2076 			  (uint32_t)qdf_nbuf_len(nbuf));
2077 		QDF_ASSERT(0);
2078 	}
2079 
2080 	transfer_id =
2081 		(mux_id & MUX_ID_MASK) |
2082 		(transfer_id & TRANSACTION_ID_MASK);
2083 	data_attr &= DESC_DATA_FLAG_MASK;
2084 	/*
2085 	 * The common case involves sending multiple fragments within a
2086 	 * single download (the tx descriptor and the tx frame header).
2087 	 * So, optimize for the case of multiple fragments by not even
2088 	 * checking whether it's necessary to use a sendlist.
2089 	 * The overhead of using a sendlist for a single buffer download
2090 	 * is not a big deal, since it happens rarely (for WMI messages).
2091 	 */
2092 	ce_sendlist_init(&sendlist);
2093 	do {
2094 		qdf_dma_addr_t frag_paddr;
2095 		int frag_bytes;
2096 
2097 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2098 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2099 		/*
2100 		 * Clear the packet offset for all but the first CE desc.
2101 		 */
2102 		if (i++ > 0)
2103 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2104 
2105 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2106 				    frag_bytes >
2107 				    bytes ? bytes : frag_bytes,
2108 				    qdf_nbuf_get_frag_is_wordstream
2109 				    (nbuf,
2110 				    nfrags) ? 0 :
2111 				    CE_SEND_FLAG_SWAP_DISABLE,
2112 				    data_attr);
2113 		if (status != QDF_STATUS_SUCCESS) {
2114 			HIF_ERROR("%s: error, frag_num %d larger than limit",
2115 				__func__, nfrags);
2116 			return status;
2117 		}
2118 		bytes -= frag_bytes;
2119 		nfrags++;
2120 	} while (bytes > 0);
2121 
2122 	/* Make sure we have resources to handle this request */
2123 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2124 	if (pipe_info->num_sends_allowed < nfrags) {
2125 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2126 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2127 		return QDF_STATUS_E_RESOURCES;
2128 	}
2129 	pipe_info->num_sends_allowed -= nfrags;
2130 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2131 
2132 	if (qdf_unlikely(!ce_hdl)) {
2133 		HIF_ERROR("%s: error CE handle is null", __func__);
2134 		return A_ERROR;
2135 	}
2136 
2137 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2138 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2139 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2140 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2141 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2142 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2143 
2144 	return status;
2145 }
2146 
2147 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2148 								int force)
2149 {
2150 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2151 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2152 
2153 	if (!force) {
2154 		int resources;
2155 		/*
2156 		 * Decide whether to actually poll for completions, or just
2157 		 * wait for a later chance. If there seem to be plenty of
2158 		 * resources left, then just wait, since checking involves
2159 		 * reading a CE register, which is a relatively expensive
2160 		 * operation.
2161 		 */
2162 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2163 		/*
2164 		 * If at least 50% of the total resources are still available,
2165 		 * don't bother checking again yet.
2166 		 */
2167 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2168 									 1))
2169 			return;
2170 	}
2171 #if ATH_11AC_TXCOMPACT
2172 	ce_per_engine_servicereap(scn, pipe);
2173 #else
2174 	ce_per_engine_service(scn, pipe);
2175 #endif
2176 }
2177 
2178 uint16_t
2179 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2180 {
2181 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2182 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2183 	uint16_t rv;
2184 
2185 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2186 	rv = pipe_info->num_sends_allowed;
2187 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2188 	return rv;
2189 }
2190 
2191 /* Called by lower (CE) layer when a send to Target completes. */
2192 static void
2193 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2194 		     void *transfer_context, qdf_dma_addr_t CE_data,
2195 		     unsigned int nbytes, unsigned int transfer_id,
2196 		     unsigned int sw_index, unsigned int hw_index,
2197 		     unsigned int toeplitz_hash_result)
2198 {
2199 	struct HIF_CE_pipe_info *pipe_info =
2200 		(struct HIF_CE_pipe_info *)ce_context;
2201 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2202 	struct hif_msg_callbacks *msg_callbacks =
2203 		&pipe_info->pipe_callbacks;
2204 
2205 	do {
2206 		/*
2207 		 * The upper layer callback will be triggered
2208 		 * when last fragment is complteted.
2209 		 */
2210 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2211 			msg_callbacks->txCompletionHandler(
2212 				msg_callbacks->Context,
2213 				transfer_context, transfer_id,
2214 				toeplitz_hash_result);
2215 
2216 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2217 		pipe_info->num_sends_allowed++;
2218 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2219 	} while (ce_completed_send_next(copyeng,
2220 			&ce_context, &transfer_context,
2221 			&CE_data, &nbytes, &transfer_id,
2222 			&sw_idx, &hw_idx,
2223 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2224 }
2225 
2226 /**
2227  * hif_ce_do_recv(): send message from copy engine to upper layers
2228  * @msg_callbacks: structure containing callback and callback context
2229  * @netbuff: skb containing message
2230  * @nbytes: number of bytes in the message
2231  * @pipe_info: used for the pipe_number info
2232  *
2233  * Checks the packet length, configures the length in the netbuff,
2234  * and calls the upper layer callback.
2235  *
2236  * return: None
2237  */
2238 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2239 		qdf_nbuf_t netbuf, int nbytes,
2240 		struct HIF_CE_pipe_info *pipe_info) {
2241 	if (nbytes <= pipe_info->buf_sz) {
2242 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2243 		msg_callbacks->
2244 			rxCompletionHandler(msg_callbacks->Context,
2245 					netbuf, pipe_info->pipe_num);
2246 	} else {
2247 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
2248 				__func__, netbuf, nbytes);
2249 
2250 		qdf_nbuf_free(netbuf);
2251 	}
2252 }
2253 
2254 /* Called by lower (CE) layer when data is received from the Target. */
2255 static void
2256 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2257 		     void *transfer_context, qdf_dma_addr_t CE_data,
2258 		     unsigned int nbytes, unsigned int transfer_id,
2259 		     unsigned int flags)
2260 {
2261 	struct HIF_CE_pipe_info *pipe_info =
2262 		(struct HIF_CE_pipe_info *)ce_context;
2263 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2264 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2265 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2266 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2267 	struct hif_msg_callbacks *msg_callbacks =
2268 		 &pipe_info->pipe_callbacks;
2269 
2270 	do {
2271 		hif_pm_runtime_mark_last_busy(hif_ctx);
2272 		qdf_nbuf_unmap_single(scn->qdf_dev,
2273 				      (qdf_nbuf_t) transfer_context,
2274 				      QDF_DMA_FROM_DEVICE);
2275 
2276 		atomic_inc(&pipe_info->recv_bufs_needed);
2277 		hif_post_recv_buffers_for_pipe(pipe_info);
2278 		if (scn->target_status == TARGET_STATUS_RESET)
2279 			qdf_nbuf_free(transfer_context);
2280 		else
2281 			hif_ce_do_recv(msg_callbacks, transfer_context,
2282 				nbytes, pipe_info);
2283 
2284 		/* Set up force_break flag if num of receices reaches
2285 		 * MAX_NUM_OF_RECEIVES
2286 		 */
2287 		ce_state->receive_count++;
2288 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2289 			ce_state->force_break = 1;
2290 			break;
2291 		}
2292 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2293 					&CE_data, &nbytes, &transfer_id,
2294 					&flags) == QDF_STATUS_SUCCESS);
2295 
2296 }
2297 
2298 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2299 
2300 void
2301 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2302 	      struct hif_msg_callbacks *callbacks)
2303 {
2304 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2305 
2306 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2307 	spin_lock_init(&pcie_access_log_lock);
2308 #endif
2309 	/* Save callbacks for later installation */
2310 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2311 		 sizeof(hif_state->msg_callbacks_pending));
2312 
2313 }
2314 
2315 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2316 {
2317 	struct CE_handle *ce_diag = hif_state->ce_diag;
2318 	int pipe_num;
2319 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2320 	struct hif_msg_callbacks *hif_msg_callbacks =
2321 		&hif_state->msg_callbacks_current;
2322 
2323 	/* daemonize("hif_compl_thread"); */
2324 
2325 	if (scn->ce_count == 0) {
2326 		HIF_ERROR("%s: Invalid ce_count", __func__);
2327 		return -EINVAL;
2328 	}
2329 
2330 	if (!hif_msg_callbacks ||
2331 			!hif_msg_callbacks->rxCompletionHandler ||
2332 			!hif_msg_callbacks->txCompletionHandler) {
2333 		HIF_ERROR("%s: no completion handler registered", __func__);
2334 		return -EFAULT;
2335 	}
2336 
2337 	A_TARGET_ACCESS_LIKELY(scn);
2338 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2339 		struct CE_attr attr;
2340 		struct HIF_CE_pipe_info *pipe_info;
2341 
2342 		pipe_info = &hif_state->pipe_info[pipe_num];
2343 		if (pipe_info->ce_hdl == ce_diag)
2344 			continue;       /* Handle Diagnostic CE specially */
2345 		attr = hif_state->host_ce_config[pipe_num];
2346 		if (attr.src_nentries) {
2347 			/* pipe used to send to target */
2348 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2349 					 __func__, pipe_num, pipe_info);
2350 			ce_send_cb_register(pipe_info->ce_hdl,
2351 					    hif_pci_ce_send_done, pipe_info,
2352 					    attr.flags & CE_ATTR_DISABLE_INTR);
2353 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2354 		}
2355 		if (attr.dest_nentries) {
2356 			/* pipe used to receive from target */
2357 			ce_recv_cb_register(pipe_info->ce_hdl,
2358 					    hif_pci_ce_recv_data, pipe_info,
2359 					    attr.flags & CE_ATTR_DISABLE_INTR);
2360 		}
2361 
2362 		if (attr.src_nentries)
2363 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2364 
2365 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2366 					sizeof(pipe_info->pipe_callbacks));
2367 	}
2368 
2369 	A_TARGET_ACCESS_UNLIKELY(scn);
2370 	return 0;
2371 }
2372 
2373 /*
2374  * Install pending msg callbacks.
2375  *
2376  * TBDXXX: This hack is needed because upper layers install msg callbacks
2377  * for use with HTC before BMI is done; yet this HIF implementation
2378  * needs to continue to use BMI msg callbacks. Really, upper layers
2379  * should not register HTC callbacks until AFTER BMI phase.
2380  */
2381 static void hif_msg_callbacks_install(struct hif_softc *scn)
2382 {
2383 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2384 
2385 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2386 		 &hif_state->msg_callbacks_pending,
2387 		 sizeof(hif_state->msg_callbacks_pending));
2388 }
2389 
2390 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2391 							uint8_t *DLPipe)
2392 {
2393 	int ul_is_polled, dl_is_polled;
2394 
2395 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2396 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2397 }
2398 
2399 /**
2400  * hif_dump_pipe_debug_count() - Log error count
2401  * @scn: hif_softc pointer.
2402  *
2403  * Output the pipe error counts of each pipe to log file
2404  *
2405  * Return: N/A
2406  */
2407 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2408 {
2409 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2410 	int pipe_num;
2411 
2412 	if (!hif_state) {
2413 		HIF_ERROR("%s hif_state is NULL", __func__);
2414 		return;
2415 	}
2416 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2417 		struct HIF_CE_pipe_info *pipe_info;
2418 
2419 	pipe_info = &hif_state->pipe_info[pipe_num];
2420 
2421 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2422 			pipe_info->nbuf_dma_err_count > 0 ||
2423 			pipe_info->nbuf_ce_enqueue_err_count)
2424 		HIF_ERROR(
2425 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2426 			__func__, pipe_info->pipe_num,
2427 			atomic_read(&pipe_info->recv_bufs_needed),
2428 			pipe_info->nbuf_alloc_err_count,
2429 			pipe_info->nbuf_dma_err_count,
2430 			pipe_info->nbuf_ce_enqueue_err_count);
2431 	}
2432 }
2433 
2434 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2435 					  void *nbuf, uint32_t *error_cnt,
2436 					  enum hif_ce_event_type failure_type,
2437 					  const char *failure_type_string)
2438 {
2439 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2440 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2441 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2442 	int ce_id = CE_state->id;
2443 	uint32_t error_cnt_tmp;
2444 
2445 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2446 	error_cnt_tmp = ++(*error_cnt);
2447 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2448 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2449 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2450 		  failure_type_string);
2451 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2452 				 NULL, nbuf, bufs_needed_tmp, 0);
2453 	/* if we fail to allocate the last buffer for an rx pipe,
2454 	 *	there is no trigger to refill the ce and we will
2455 	 *	eventually crash
2456 	 */
2457 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
2458 	    (ce_srng_based(scn) &&
2459 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
2460 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2461 
2462 }
2463 
2464 
2465 
2466 
2467 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2468 {
2469 	struct CE_handle *ce_hdl;
2470 	qdf_size_t buf_sz;
2471 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2472 	QDF_STATUS status;
2473 	uint32_t bufs_posted = 0;
2474 	unsigned int ce_id;
2475 
2476 	buf_sz = pipe_info->buf_sz;
2477 	if (buf_sz == 0) {
2478 		/* Unused Copy Engine */
2479 		return QDF_STATUS_SUCCESS;
2480 	}
2481 
2482 	ce_hdl = pipe_info->ce_hdl;
2483 	ce_id = ((struct CE_state *)ce_hdl)->id;
2484 
2485 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2486 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2487 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2488 		qdf_nbuf_t nbuf;
2489 
2490 		atomic_dec(&pipe_info->recv_bufs_needed);
2491 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2492 
2493 		hif_record_ce_desc_event(scn, ce_id,
2494 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
2495 					 0, 0);
2496 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2497 		if (!nbuf) {
2498 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2499 					&pipe_info->nbuf_alloc_err_count,
2500 					 HIF_RX_NBUF_ALLOC_FAILURE,
2501 					"HIF_RX_NBUF_ALLOC_FAILURE");
2502 			return QDF_STATUS_E_NOMEM;
2503 		}
2504 
2505 		hif_record_ce_desc_event(scn, ce_id,
2506 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
2507 					 0, 0);
2508 		/*
2509 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2510 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2511 		 * DMA_FROM_DEVICE);
2512 		 */
2513 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2514 					    QDF_DMA_FROM_DEVICE);
2515 
2516 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2517 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2518 					&pipe_info->nbuf_dma_err_count,
2519 					 HIF_RX_NBUF_MAP_FAILURE,
2520 					"HIF_RX_NBUF_MAP_FAILURE");
2521 			qdf_nbuf_free(nbuf);
2522 			return status;
2523 		}
2524 
2525 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2526 		hif_record_ce_desc_event(scn, ce_id,
2527 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
2528 					 0, 0);
2529 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2530 					       buf_sz, DMA_FROM_DEVICE);
2531 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2532 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2533 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2534 					&pipe_info->nbuf_ce_enqueue_err_count,
2535 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2536 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2537 
2538 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2539 						QDF_DMA_FROM_DEVICE);
2540 			qdf_nbuf_free(nbuf);
2541 			return status;
2542 		}
2543 
2544 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2545 		bufs_posted++;
2546 	}
2547 	pipe_info->nbuf_alloc_err_count =
2548 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2549 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2550 	pipe_info->nbuf_dma_err_count =
2551 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2552 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2553 	pipe_info->nbuf_ce_enqueue_err_count =
2554 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2555 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2556 
2557 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2558 
2559 	return QDF_STATUS_SUCCESS;
2560 }
2561 
2562 /*
2563  * Try to post all desired receive buffers for all pipes.
2564  * Returns 0 for non fastpath rx copy engine as
2565  * oom_allocation_work will be scheduled to recover any
2566  * failures, non-zero if unable to completely replenish
2567  * receive buffers for fastpath rx Copy engine.
2568  */
2569 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2570 {
2571 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2572 	int pipe_num;
2573 	struct CE_state *ce_state = NULL;
2574 	QDF_STATUS qdf_status;
2575 
2576 	A_TARGET_ACCESS_LIKELY(scn);
2577 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2578 		struct HIF_CE_pipe_info *pipe_info;
2579 
2580 		ce_state = scn->ce_id_to_state[pipe_num];
2581 		pipe_info = &hif_state->pipe_info[pipe_num];
2582 
2583 		if (hif_is_nss_wifi_enabled(scn) &&
2584 		    ce_state && (ce_state->htt_rx_data))
2585 			continue;
2586 
2587 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2588 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2589 			ce_state->htt_rx_data &&
2590 			scn->fastpath_mode_on) {
2591 			A_TARGET_ACCESS_UNLIKELY(scn);
2592 			return qdf_status;
2593 		}
2594 	}
2595 
2596 	A_TARGET_ACCESS_UNLIKELY(scn);
2597 
2598 	return QDF_STATUS_SUCCESS;
2599 }
2600 
2601 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2602 {
2603 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2604 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2605 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2606 
2607 	hif_update_fastpath_recv_bufs_cnt(scn);
2608 
2609 	hif_msg_callbacks_install(scn);
2610 
2611 	if (hif_completion_thread_startup(hif_state))
2612 		return QDF_STATUS_E_FAILURE;
2613 
2614 	/* enable buffer cleanup */
2615 	hif_state->started = true;
2616 
2617 	/* Post buffers once to start things off. */
2618 	qdf_status = hif_post_recv_buffers(scn);
2619 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2620 		/* cleanup is done in hif_ce_disable */
2621 		HIF_ERROR("%s:failed to post buffers", __func__);
2622 		return qdf_status;
2623 	}
2624 
2625 	return qdf_status;
2626 }
2627 
2628 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2629 {
2630 	struct hif_softc *scn;
2631 	struct CE_handle *ce_hdl;
2632 	uint32_t buf_sz;
2633 	struct HIF_CE_state *hif_state;
2634 	qdf_nbuf_t netbuf;
2635 	qdf_dma_addr_t CE_data;
2636 	void *per_CE_context;
2637 
2638 	buf_sz = pipe_info->buf_sz;
2639 	/* Unused Copy Engine */
2640 	if (buf_sz == 0)
2641 		return;
2642 
2643 
2644 	hif_state = pipe_info->HIF_CE_state;
2645 	if (!hif_state->started)
2646 		return;
2647 
2648 	scn = HIF_GET_SOFTC(hif_state);
2649 	ce_hdl = pipe_info->ce_hdl;
2650 
2651 	if (!scn->qdf_dev)
2652 		return;
2653 	while (ce_revoke_recv_next
2654 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2655 			&CE_data) == QDF_STATUS_SUCCESS) {
2656 		if (netbuf) {
2657 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2658 					      QDF_DMA_FROM_DEVICE);
2659 			qdf_nbuf_free(netbuf);
2660 		}
2661 	}
2662 }
2663 
2664 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2665 {
2666 	struct CE_handle *ce_hdl;
2667 	struct HIF_CE_state *hif_state;
2668 	struct hif_softc *scn;
2669 	qdf_nbuf_t netbuf;
2670 	void *per_CE_context;
2671 	qdf_dma_addr_t CE_data;
2672 	unsigned int nbytes;
2673 	unsigned int id;
2674 	uint32_t buf_sz;
2675 	uint32_t toeplitz_hash_result;
2676 
2677 	buf_sz = pipe_info->buf_sz;
2678 	if (buf_sz == 0) {
2679 		/* Unused Copy Engine */
2680 		return;
2681 	}
2682 
2683 	hif_state = pipe_info->HIF_CE_state;
2684 	if (!hif_state->started) {
2685 		return;
2686 	}
2687 
2688 	scn = HIF_GET_SOFTC(hif_state);
2689 
2690 	ce_hdl = pipe_info->ce_hdl;
2691 
2692 	while (ce_cancel_send_next
2693 		       (ce_hdl, &per_CE_context,
2694 		       (void **)&netbuf, &CE_data, &nbytes,
2695 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2696 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2697 			/*
2698 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2699 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2700 			 * freed in htt_htc_misc_pkt_pool_free() in
2701 			 * wlantl_close(), so do not free them here again
2702 			 * by checking whether it's the endpoint
2703 			 * which they are queued in.
2704 			 */
2705 			if (id == scn->htc_htt_tx_endpoint)
2706 				return;
2707 			/* Indicate the completion to higher
2708 			 * layer to free the buffer
2709 			 */
2710 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2711 				pipe_info->pipe_callbacks.
2712 				    txCompletionHandler(pipe_info->
2713 					    pipe_callbacks.Context,
2714 					    netbuf, id, toeplitz_hash_result);
2715 		}
2716 	}
2717 }
2718 
2719 /*
2720  * Cleanup residual buffers for device shutdown:
2721  *    buffers that were enqueued for receive
2722  *    buffers that were to be sent
2723  * Note: Buffers that had completed but which were
2724  * not yet processed are on a completion queue. They
2725  * are handled when the completion thread shuts down.
2726  */
2727 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2728 {
2729 	int pipe_num;
2730 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2731 	struct CE_state *ce_state;
2732 
2733 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2734 		struct HIF_CE_pipe_info *pipe_info;
2735 
2736 		ce_state = scn->ce_id_to_state[pipe_num];
2737 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2738 				((ce_state->htt_tx_data) ||
2739 				 (ce_state->htt_rx_data))) {
2740 			continue;
2741 		}
2742 
2743 		pipe_info = &hif_state->pipe_info[pipe_num];
2744 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2745 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2746 	}
2747 }
2748 
2749 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2750 {
2751 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2752 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2753 
2754 	hif_buffer_cleanup(hif_state);
2755 }
2756 
2757 static void hif_destroy_oom_work(struct hif_softc *scn)
2758 {
2759 	struct CE_state *ce_state;
2760 	int ce_id;
2761 
2762 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2763 		ce_state = scn->ce_id_to_state[ce_id];
2764 		if (ce_state)
2765 			qdf_destroy_work(scn->qdf_dev,
2766 					 &ce_state->oom_allocation_work);
2767 	}
2768 }
2769 
2770 void hif_ce_stop(struct hif_softc *scn)
2771 {
2772 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2773 	int pipe_num;
2774 
2775 	/*
2776 	 * before cleaning up any memory, ensure irq &
2777 	 * bottom half contexts will not be re-entered
2778 	 */
2779 	hif_disable_isr(&scn->osc);
2780 	hif_destroy_oom_work(scn);
2781 	scn->hif_init_done = false;
2782 
2783 	/*
2784 	 * At this point, asynchronous threads are stopped,
2785 	 * The Target should not DMA nor interrupt, Host code may
2786 	 * not initiate anything more.  So we just need to clean
2787 	 * up Host-side state.
2788 	 */
2789 
2790 	if (scn->athdiag_procfs_inited) {
2791 		athdiag_procfs_remove();
2792 		scn->athdiag_procfs_inited = false;
2793 	}
2794 
2795 	hif_buffer_cleanup(hif_state);
2796 
2797 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2798 		struct HIF_CE_pipe_info *pipe_info;
2799 		struct CE_attr attr;
2800 		struct CE_handle *ce_diag = hif_state->ce_diag;
2801 
2802 		pipe_info = &hif_state->pipe_info[pipe_num];
2803 		if (pipe_info->ce_hdl) {
2804 			if (pipe_info->ce_hdl != ce_diag &&
2805 			    hif_state->started) {
2806 				attr = hif_state->host_ce_config[pipe_num];
2807 				if (attr.src_nentries)
2808 					qdf_spinlock_destroy(&pipe_info->
2809 							completion_freeq_lock);
2810 			}
2811 			ce_fini(pipe_info->ce_hdl);
2812 			pipe_info->ce_hdl = NULL;
2813 			pipe_info->buf_sz = 0;
2814 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2815 		}
2816 	}
2817 
2818 	if (hif_state->sleep_timer_init) {
2819 		qdf_timer_stop(&hif_state->sleep_timer);
2820 		qdf_timer_free(&hif_state->sleep_timer);
2821 		hif_state->sleep_timer_init = false;
2822 	}
2823 
2824 	hif_state->started = false;
2825 }
2826 
2827 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2828 				   struct shadow_reg_cfg
2829 				   **target_shadow_reg_cfg_ret,
2830 				   uint32_t *shadow_cfg_sz_ret)
2831 {
2832 	if (target_shadow_reg_cfg_ret)
2833 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2834 	if (shadow_cfg_sz_ret)
2835 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2836 }
2837 
2838 /**
2839  * hif_get_target_ce_config() - get copy engine configuration
2840  * @target_ce_config_ret: basic copy engine configuration
2841  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2842  * @target_service_to_ce_map_ret: service mapping for the copy engines
2843  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2844  * @target_shadow_reg_cfg_ret: shadow register configuration
2845  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2846  *
2847  * providing accessor to these values outside of this file.
2848  * currently these are stored in static pointers to const sections.
2849  * there are multiple configurations that are selected from at compile time.
2850  * Runtime selection would need to consider mode, target type and bus type.
2851  *
2852  * Return: return by parameter.
2853  */
2854 void hif_get_target_ce_config(struct hif_softc *scn,
2855 		struct CE_pipe_config **target_ce_config_ret,
2856 		uint32_t *target_ce_config_sz_ret,
2857 		struct service_to_pipe **target_service_to_ce_map_ret,
2858 		uint32_t *target_service_to_ce_map_sz_ret,
2859 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2860 		uint32_t *shadow_cfg_sz_ret)
2861 {
2862 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2863 
2864 	*target_ce_config_ret = hif_state->target_ce_config;
2865 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2866 
2867 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2868 				       target_service_to_ce_map_sz_ret);
2869 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2870 			       shadow_cfg_sz_ret);
2871 }
2872 
2873 #ifdef CONFIG_SHADOW_V2
2874 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2875 {
2876 	int i;
2877 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2878 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2879 
2880 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2881 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2882 		     "%s: i %d, val %x", __func__, i,
2883 		     cfg->shadow_reg_v2_cfg[i].addr);
2884 	}
2885 }
2886 
2887 #else
2888 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2889 {
2890 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2891 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2892 }
2893 #endif
2894 
2895 #ifdef ADRASTEA_RRI_ON_DDR
2896 /**
2897  * hif_get_src_ring_read_index(): Called to get the SRRI
2898  *
2899  * @scn: hif_softc pointer
2900  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2901  *
2902  * This function returns the SRRI to the caller. For CEs that
2903  * dont have interrupts enabled, we look at the DDR based SRRI
2904  *
2905  * Return: SRRI
2906  */
2907 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2908 		uint32_t CE_ctrl_addr)
2909 {
2910 	struct CE_attr attr;
2911 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2912 
2913 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2914 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2915 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2916 	} else {
2917 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2918 			return A_TARGET_READ(scn,
2919 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2920 		else
2921 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2922 					CE_ctrl_addr);
2923 	}
2924 }
2925 
2926 /**
2927  * hif_get_dst_ring_read_index(): Called to get the DRRI
2928  *
2929  * @scn: hif_softc pointer
2930  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2931  *
2932  * This function returns the DRRI to the caller. For CEs that
2933  * dont have interrupts enabled, we look at the DDR based DRRI
2934  *
2935  * Return: DRRI
2936  */
2937 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2938 		uint32_t CE_ctrl_addr)
2939 {
2940 	struct CE_attr attr;
2941 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2942 
2943 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2944 
2945 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2946 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2947 	} else {
2948 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2949 			return A_TARGET_READ(scn,
2950 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2951 		else
2952 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2953 					CE_ctrl_addr);
2954 	}
2955 }
2956 
2957 /**
2958  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2959  * @scn: hif_softc pointer
2960  *
2961  * Return: qdf status
2962  */
2963 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2964 {
2965 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
2966 
2967 	scn->vaddr_rri_on_ddr =
2968 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2969 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2970 		&paddr_rri_on_ddr);
2971 
2972 	if (!scn->vaddr_rri_on_ddr) {
2973 		hif_err("dmaable page alloc fail");
2974 		return QDF_STATUS_E_NOMEM;
2975 	}
2976 
2977 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2978 
2979 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2980 
2981 	return QDF_STATUS_SUCCESS;
2982 }
2983 #endif
2984 
2985 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2986 /**
2987  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2988  *
2989  * @scn: hif_softc pointer
2990  *
2991  * This function allocates non cached memory on ddr and sends
2992  * the physical address of this memory to the CE hardware. The
2993  * hardware updates the RRI on this particular location.
2994  *
2995  * Return: None
2996  */
2997 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2998 {
2999 	unsigned int i;
3000 	uint32_t high_paddr, low_paddr;
3001 
3002 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3003 		return;
3004 
3005 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
3006 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
3007 
3008 	HIF_DBG("%s using srri and drri from DDR", __func__);
3009 
3010 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3011 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3012 
3013 	for (i = 0; i < CE_COUNT; i++)
3014 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3015 }
3016 #else
3017 /**
3018  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3019  *
3020  * @scn: hif_softc pointer
3021  *
3022  * This is a dummy implementation for platforms that don't
3023  * support this functionality.
3024  *
3025  * Return: None
3026  */
3027 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3028 {
3029 }
3030 #endif
3031 
3032 /**
3033  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
3034  *                                    QMI command
3035  * @scn: hif context
3036  * @cfg: wlan enable config
3037  *
3038  * In case of Genoa, rri_over_ddr memory configuration is passed
3039  * to firmware through QMI configure command.
3040  */
3041 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3042 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3043 					   struct pld_wlan_enable_cfg *cfg)
3044 {
3045 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3046 		return;
3047 
3048 	cfg->rri_over_ddr_cfg_valid = true;
3049 	cfg->rri_over_ddr_cfg.base_addr_low =
3050 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3051 	cfg->rri_over_ddr_cfg.base_addr_high =
3052 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3053 }
3054 #else
3055 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3056 					   struct pld_wlan_enable_cfg *cfg)
3057 {
3058 }
3059 #endif
3060 
3061 /**
3062  * hif_wlan_enable(): call the platform driver to enable wlan
3063  * @scn: HIF Context
3064  *
3065  * This function passes the con_mode and CE configuration to
3066  * platform driver to enable wlan.
3067  *
3068  * Return: linux error code
3069  */
3070 int hif_wlan_enable(struct hif_softc *scn)
3071 {
3072 	struct pld_wlan_enable_cfg cfg;
3073 	enum pld_driver_mode mode;
3074 	uint32_t con_mode = hif_get_conparam(scn);
3075 
3076 	hif_get_target_ce_config(scn,
3077 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3078 			&cfg.num_ce_tgt_cfg,
3079 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3080 			&cfg.num_ce_svc_pipe_cfg,
3081 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3082 			&cfg.num_shadow_reg_cfg);
3083 
3084 	/* translate from structure size to array size */
3085 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3086 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3087 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3088 
3089 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3090 			      &cfg.num_shadow_reg_v2_cfg);
3091 
3092 	hif_print_hal_shadow_register_cfg(&cfg);
3093 
3094 	hif_update_rri_over_ddr_config(scn, &cfg);
3095 
3096 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3097 		mode = PLD_FTM;
3098 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3099 		mode = PLD_COLDBOOT_CALIBRATION;
3100 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3101 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3102 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3103 		mode = PLD_EPPING;
3104 	else
3105 		mode = PLD_MISSION;
3106 
3107 	if (BYPASS_QMI)
3108 		return 0;
3109 	else
3110 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3111 }
3112 
3113 #ifdef WLAN_FEATURE_EPPING
3114 
3115 #define CE_EPPING_USES_IRQ true
3116 
3117 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3118 {
3119 	if (CE_EPPING_USES_IRQ)
3120 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3121 	else
3122 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3123 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3124 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3125 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3126 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3127 }
3128 #endif
3129 
3130 #ifdef QCN7605_SUPPORT
3131 static inline
3132 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3133 			       struct HIF_CE_state *hif_state)
3134 {
3135 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3136 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3137 	hif_state->target_ce_config_sz =
3138 				 sizeof(target_ce_config_wlan_qcn7605);
3139 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3140 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3141 	scn->ce_count = QCN7605_CE_COUNT;
3142 }
3143 #else
3144 static inline
3145 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3146 			       struct HIF_CE_state *hif_state)
3147 {
3148 	HIF_ERROR("QCN7605 not supported");
3149 }
3150 #endif
3151 
3152 #ifdef CE_SVC_CMN_INIT
3153 #ifdef QCA_WIFI_SUPPORT_SRNG
3154 static inline void hif_ce_service_init(void)
3155 {
3156 	ce_service_srng_init();
3157 }
3158 #else
3159 static inline void hif_ce_service_init(void)
3160 {
3161 	ce_service_legacy_init();
3162 }
3163 #endif
3164 #else
3165 static inline void hif_ce_service_init(void)
3166 {
3167 }
3168 #endif
3169 
3170 
3171 /**
3172  * hif_ce_prepare_config() - load the correct static tables.
3173  * @scn: hif context
3174  *
3175  * Epping uses different static attribute tables than mission mode.
3176  */
3177 void hif_ce_prepare_config(struct hif_softc *scn)
3178 {
3179 	uint32_t mode = hif_get_conparam(scn);
3180 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3181 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3182 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3183 
3184 	hif_ce_service_init();
3185 	hif_state->ce_services = ce_services_attach(scn);
3186 
3187 	scn->ce_count = HOST_CE_COUNT;
3188 	/* if epping is enabled we need to use the epping configuration. */
3189 	if (QDF_IS_EPPING_ENABLED(mode)) {
3190 		hif_ce_prepare_epping_config(hif_state);
3191 		return;
3192 	}
3193 
3194 	switch (tgt_info->target_type) {
3195 	default:
3196 		hif_state->host_ce_config = host_ce_config_wlan;
3197 		hif_state->target_ce_config = target_ce_config_wlan;
3198 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3199 		break;
3200 	case TARGET_TYPE_QCN7605:
3201 		hif_set_ce_config_qcn7605(scn, hif_state);
3202 		break;
3203 	case TARGET_TYPE_AR900B:
3204 	case TARGET_TYPE_QCA9984:
3205 	case TARGET_TYPE_IPQ4019:
3206 	case TARGET_TYPE_QCA9888:
3207 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3208 			hif_state->host_ce_config =
3209 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3210 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3211 			hif_state->host_ce_config =
3212 				host_lowdesc_ce_cfg_wlan_ar900b;
3213 		} else {
3214 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3215 		}
3216 
3217 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3218 		hif_state->target_ce_config_sz =
3219 				sizeof(target_ce_config_wlan_ar900b);
3220 
3221 		break;
3222 
3223 	case TARGET_TYPE_AR9888:
3224 	case TARGET_TYPE_AR9888V2:
3225 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3226 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3227 		} else {
3228 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3229 		}
3230 
3231 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3232 		hif_state->target_ce_config_sz =
3233 					sizeof(target_ce_config_wlan_ar9888);
3234 
3235 		break;
3236 
3237 	case TARGET_TYPE_QCA8074:
3238 	case TARGET_TYPE_QCA8074V2:
3239 	case TARGET_TYPE_QCA6018:
3240 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3241 			hif_state->host_ce_config =
3242 					host_ce_config_wlan_qca8074_pci;
3243 			hif_state->target_ce_config =
3244 				target_ce_config_wlan_qca8074_pci;
3245 			hif_state->target_ce_config_sz =
3246 				sizeof(target_ce_config_wlan_qca8074_pci);
3247 		} else {
3248 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3249 			hif_state->target_ce_config =
3250 					target_ce_config_wlan_qca8074;
3251 			hif_state->target_ce_config_sz =
3252 				sizeof(target_ce_config_wlan_qca8074);
3253 		}
3254 		break;
3255 	case TARGET_TYPE_QCA6290:
3256 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3257 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3258 		hif_state->target_ce_config_sz =
3259 					sizeof(target_ce_config_wlan_qca6290);
3260 
3261 		scn->ce_count = QCA_6290_CE_COUNT;
3262 		break;
3263 	case TARGET_TYPE_QCN9000:
3264 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3265 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3266 		hif_state->target_ce_config_sz =
3267 					sizeof(target_ce_config_wlan_qcn9000);
3268 		scn->ce_count = QCN_9000_CE_COUNT;
3269 		scn->disable_wake_irq = 1;
3270 		break;
3271 	case TARGET_TYPE_QCA5018:
3272 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
3273 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
3274 		hif_state->target_ce_config_sz =
3275 					sizeof(target_ce_config_wlan_qca5018);
3276 		scn->ce_count = QCA_5018_CE_COUNT;
3277 		break;
3278 	case TARGET_TYPE_QCA6390:
3279 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3280 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3281 		hif_state->target_ce_config_sz =
3282 					sizeof(target_ce_config_wlan_qca6390);
3283 
3284 		scn->ce_count = QCA_6390_CE_COUNT;
3285 		break;
3286 	case TARGET_TYPE_QCA6490:
3287 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3288 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3289 		hif_state->target_ce_config_sz =
3290 					sizeof(target_ce_config_wlan_qca6490);
3291 
3292 		scn->ce_count = QCA_6490_CE_COUNT;
3293 		break;
3294 	case TARGET_TYPE_QCA6750:
3295 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
3296 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
3297 		hif_state->target_ce_config_sz =
3298 					sizeof(target_ce_config_wlan_qca6750);
3299 
3300 		scn->ce_count = QCA_6750_CE_COUNT;
3301 		break;
3302 	case TARGET_TYPE_ADRASTEA:
3303 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3304 			hif_state->host_ce_config =
3305 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3306 			hif_state->target_ce_config =
3307 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3308 			hif_state->target_ce_config_sz =
3309 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3310 		} else {
3311 			hif_state->host_ce_config =
3312 				host_ce_config_wlan_adrastea;
3313 			hif_state->target_ce_config =
3314 					target_ce_config_wlan_adrastea;
3315 			hif_state->target_ce_config_sz =
3316 					sizeof(target_ce_config_wlan_adrastea);
3317 		}
3318 		break;
3319 
3320 	}
3321 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3322 }
3323 
3324 /**
3325  * hif_ce_open() - do ce specific allocations
3326  * @hif_sc: pointer to hif context
3327  *
3328  * return: 0 for success or QDF_STATUS_E_NOMEM
3329  */
3330 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3331 {
3332 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3333 
3334 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3335 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3336 	return QDF_STATUS_SUCCESS;
3337 }
3338 
3339 /**
3340  * hif_ce_close() - do ce specific free
3341  * @hif_sc: pointer to hif context
3342  */
3343 void hif_ce_close(struct hif_softc *hif_sc)
3344 {
3345 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3346 
3347 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3348 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3349 }
3350 
3351 /**
3352  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3353  * @hif_sc: hif context
3354  *
3355  * uses state variables to support cleaning up when hif_config_ce fails.
3356  */
3357 void hif_unconfig_ce(struct hif_softc *hif_sc)
3358 {
3359 	int pipe_num;
3360 	struct HIF_CE_pipe_info *pipe_info;
3361 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3362 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3363 
3364 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3365 		pipe_info = &hif_state->pipe_info[pipe_num];
3366 		if (pipe_info->ce_hdl) {
3367 			ce_unregister_irq(hif_state, (1 << pipe_num));
3368 		}
3369 	}
3370 	deinit_tasklet_workers(hif_hdl);
3371 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3372 		pipe_info = &hif_state->pipe_info[pipe_num];
3373 		if (pipe_info->ce_hdl) {
3374 			ce_fini(pipe_info->ce_hdl);
3375 			pipe_info->ce_hdl = NULL;
3376 			pipe_info->buf_sz = 0;
3377 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3378 		}
3379 	}
3380 	if (hif_sc->athdiag_procfs_inited) {
3381 		athdiag_procfs_remove();
3382 		hif_sc->athdiag_procfs_inited = false;
3383 	}
3384 }
3385 
3386 #ifdef CONFIG_BYPASS_QMI
3387 #ifdef QCN7605_SUPPORT
3388 /**
3389  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3390  * @scn: pointer to HIF structure
3391  *
3392  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3393  *
3394  * Return: void
3395  */
3396 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3397 {
3398 	void *target_va;
3399 	phys_addr_t target_pa;
3400 	struct ce_info *ce_info_ptr;
3401 	uint32_t msi_data_start;
3402 	uint32_t msi_data_count;
3403 	uint32_t msi_irq_start;
3404 	uint32_t i = 0;
3405 	int ret;
3406 
3407 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3408 					     scn->qdf_dev->dev,
3409 					     FW_SHARED_MEM +
3410 					     sizeof(struct ce_info),
3411 					     &target_pa);
3412 	if (!target_va)
3413 		return;
3414 
3415 	ce_info_ptr = (struct ce_info *)target_va;
3416 
3417 	if (scn->vaddr_rri_on_ddr) {
3418 		ce_info_ptr->rri_over_ddr_low_paddr  =
3419 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3420 		ce_info_ptr->rri_over_ddr_high_paddr =
3421 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3422 	}
3423 
3424 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3425 					  &msi_data_count, &msi_data_start,
3426 					  &msi_irq_start);
3427 	if (ret) {
3428 		hif_err("Failed to get CE msi config");
3429 		return;
3430 	}
3431 
3432 	for (i = 0; i < CE_COUNT_MAX; i++) {
3433 		ce_info_ptr->cfg[i].ce_id = i;
3434 		ce_info_ptr->cfg[i].msi_vector =
3435 			 (i % msi_data_count) + msi_irq_start;
3436 	}
3437 
3438 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3439 	hif_info("target va %pK target pa %pa", target_va, &target_pa);
3440 }
3441 #else
3442 /**
3443  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3444  * @scn: pointer to HIF structure
3445  *
3446  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3447  *
3448  * Return: void
3449  */
3450 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3451 {
3452 	void *target_va;
3453 	phys_addr_t target_pa;
3454 
3455 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3456 				FW_SHARED_MEM, &target_pa);
3457 	if (!target_va) {
3458 		HIF_TRACE("Memory allocation failed could not post target buf");
3459 		return;
3460 	}
3461 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3462 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
3463 }
3464 #endif
3465 
3466 #else
3467 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3468 {
3469 }
3470 #endif
3471 
3472 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3473 				bool wait_for_it)
3474 {
3475 	/* todo */
3476 	return 0;
3477 }
3478 
3479 /**
3480  * hif_config_ce() - configure copy engines
3481  * @scn: hif context
3482  *
3483  * Prepares fw, copy engine hardware and host sw according
3484  * to the attributes selected by hif_ce_prepare_config.
3485  *
3486  * also calls athdiag_procfs_init
3487  *
3488  * return: 0 for success nonzero for failure.
3489  */
3490 int hif_config_ce(struct hif_softc *scn)
3491 {
3492 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3493 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3494 	struct HIF_CE_pipe_info *pipe_info;
3495 	int pipe_num;
3496 	struct CE_state *ce_state = NULL;
3497 
3498 #ifdef ADRASTEA_SHADOW_REGISTERS
3499 	int i;
3500 #endif
3501 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3502 
3503 	scn->notice_send = true;
3504 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3505 
3506 	hif_post_static_buf_to_target(scn);
3507 
3508 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3509 
3510 	hif_config_rri_on_ddr(scn);
3511 
3512 	if (ce_srng_based(scn))
3513 		scn->bus_ops.hif_target_sleep_state_adjust =
3514 			&hif_srng_sleep_state_adjust;
3515 
3516 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3517 	 * index. Disable data storing
3518 	 */
3519 	reset_ce_debug_history(scn);
3520 
3521 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3522 		struct CE_attr *attr;
3523 
3524 		pipe_info = &hif_state->pipe_info[pipe_num];
3525 		pipe_info->pipe_num = pipe_num;
3526 		pipe_info->HIF_CE_state = hif_state;
3527 		attr = &hif_state->host_ce_config[pipe_num];
3528 
3529 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3530 		ce_state = scn->ce_id_to_state[pipe_num];
3531 		if (!ce_state) {
3532 			A_TARGET_ACCESS_UNLIKELY(scn);
3533 			goto err;
3534 		}
3535 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3536 		QDF_ASSERT(pipe_info->ce_hdl);
3537 		if (!pipe_info->ce_hdl) {
3538 			rv = QDF_STATUS_E_FAILURE;
3539 			A_TARGET_ACCESS_UNLIKELY(scn);
3540 			goto err;
3541 		}
3542 
3543 		ce_state->lro_data = qdf_lro_init();
3544 
3545 		if (attr->flags & CE_ATTR_DIAG) {
3546 			/* Reserve the ultimate CE for
3547 			 * Diagnostic Window support
3548 			 */
3549 			hif_state->ce_diag = pipe_info->ce_hdl;
3550 			continue;
3551 		}
3552 
3553 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3554 				(ce_state->htt_rx_data))
3555 			continue;
3556 
3557 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3558 		if (attr->dest_nentries > 0) {
3559 			atomic_set(&pipe_info->recv_bufs_needed,
3560 				   init_buffer_count(attr->dest_nentries - 1));
3561 			/*SRNG based CE has one entry less */
3562 			if (ce_srng_based(scn))
3563 				atomic_dec(&pipe_info->recv_bufs_needed);
3564 		} else {
3565 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3566 		}
3567 		ce_tasklet_init(hif_state, (1 << pipe_num));
3568 		ce_register_irq(hif_state, (1 << pipe_num));
3569 	}
3570 
3571 	if (athdiag_procfs_init(scn) != 0) {
3572 		A_TARGET_ACCESS_UNLIKELY(scn);
3573 		goto err;
3574 	}
3575 	scn->athdiag_procfs_inited = true;
3576 
3577 	HIF_DBG("%s: ce_init done", __func__);
3578 
3579 	init_tasklet_workers(hif_hdl);
3580 
3581 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3582 
3583 #ifdef ADRASTEA_SHADOW_REGISTERS
3584 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3585 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3586 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3587 			  __func__, i,
3588 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3589 	}
3590 #endif
3591 
3592 	return rv != QDF_STATUS_SUCCESS;
3593 
3594 err:
3595 	/* Failure, so clean up */
3596 	hif_unconfig_ce(scn);
3597 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3598 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3599 }
3600 
3601 #ifdef IPA_OFFLOAD
3602 /**
3603  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3604  * @scn: bus context
3605  * @ce_sr_base_paddr: copyengine source ring base physical address
3606  * @ce_sr_ring_size: copyengine source ring size
3607  * @ce_reg_paddr: copyengine register physical address
3608  *
3609  * IPA micro controller data path offload feature enabled,
3610  * HIF should release copy engine related resource information to IPA UC
3611  * IPA UC will access hardware resource with released information
3612  *
3613  * Return: None
3614  */
3615 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3616 			     qdf_shared_mem_t **ce_sr,
3617 			     uint32_t *ce_sr_ring_size,
3618 			     qdf_dma_addr_t *ce_reg_paddr)
3619 {
3620 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3621 	struct HIF_CE_pipe_info *pipe_info =
3622 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3623 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3624 
3625 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3626 			    ce_reg_paddr);
3627 }
3628 #endif /* IPA_OFFLOAD */
3629 
3630 
3631 #ifdef ADRASTEA_SHADOW_REGISTERS
3632 
3633 /*
3634  * Current shadow register config
3635  *
3636  * -----------------------------------------------------------
3637  * Shadow Register      |     CE   |    src/dst write index
3638  * -----------------------------------------------------------
3639  *         0            |     0    |           src
3640  *         1     No Config - Doesn't point to anything
3641  *         2     No Config - Doesn't point to anything
3642  *         3            |     3    |           src
3643  *         4            |     4    |           src
3644  *         5            |     5    |           src
3645  *         6     No Config - Doesn't point to anything
3646  *         7            |     7    |           src
3647  *         8     No Config - Doesn't point to anything
3648  *         9     No Config - Doesn't point to anything
3649  *         10    No Config - Doesn't point to anything
3650  *         11    No Config - Doesn't point to anything
3651  * -----------------------------------------------------------
3652  *         12    No Config - Doesn't point to anything
3653  *         13           |     1    |           dst
3654  *         14           |     2    |           dst
3655  *         15    No Config - Doesn't point to anything
3656  *         16    No Config - Doesn't point to anything
3657  *         17    No Config - Doesn't point to anything
3658  *         18    No Config - Doesn't point to anything
3659  *         19           |     7    |           dst
3660  *         20           |     8    |           dst
3661  *         21    No Config - Doesn't point to anything
3662  *         22    No Config - Doesn't point to anything
3663  *         23    No Config - Doesn't point to anything
3664  * -----------------------------------------------------------
3665  *
3666  *
3667  * ToDo - Move shadow register config to following in the future
3668  * This helps free up a block of shadow registers towards the end.
3669  * Can be used for other purposes
3670  *
3671  * -----------------------------------------------------------
3672  * Shadow Register      |     CE   |    src/dst write index
3673  * -----------------------------------------------------------
3674  *      0            |     0    |           src
3675  *      1            |     3    |           src
3676  *      2            |     4    |           src
3677  *      3            |     5    |           src
3678  *      4            |     7    |           src
3679  * -----------------------------------------------------------
3680  *      5            |     1    |           dst
3681  *      6            |     2    |           dst
3682  *      7            |     7    |           dst
3683  *      8            |     8    |           dst
3684  * -----------------------------------------------------------
3685  *      9     No Config - Doesn't point to anything
3686  *      12    No Config - Doesn't point to anything
3687  *      13    No Config - Doesn't point to anything
3688  *      14    No Config - Doesn't point to anything
3689  *      15    No Config - Doesn't point to anything
3690  *      16    No Config - Doesn't point to anything
3691  *      17    No Config - Doesn't point to anything
3692  *      18    No Config - Doesn't point to anything
3693  *      19    No Config - Doesn't point to anything
3694  *      20    No Config - Doesn't point to anything
3695  *      21    No Config - Doesn't point to anything
3696  *      22    No Config - Doesn't point to anything
3697  *      23    No Config - Doesn't point to anything
3698  * -----------------------------------------------------------
3699 */
3700 #ifndef QCN7605_SUPPORT
3701 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3702 {
3703 	u32 addr = 0;
3704 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3705 
3706 	switch (ce) {
3707 	case 0:
3708 		addr = SHADOW_VALUE0;
3709 		break;
3710 	case 3:
3711 		addr = SHADOW_VALUE3;
3712 		break;
3713 	case 4:
3714 		addr = SHADOW_VALUE4;
3715 		break;
3716 	case 5:
3717 		addr = SHADOW_VALUE5;
3718 		break;
3719 	case 7:
3720 		addr = SHADOW_VALUE7;
3721 		break;
3722 	default:
3723 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3724 		QDF_ASSERT(0);
3725 	}
3726 	return addr;
3727 
3728 }
3729 
3730 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3731 {
3732 	u32 addr = 0;
3733 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3734 
3735 	switch (ce) {
3736 	case 1:
3737 		addr = SHADOW_VALUE13;
3738 		break;
3739 	case 2:
3740 		addr = SHADOW_VALUE14;
3741 		break;
3742 	case 5:
3743 		addr = SHADOW_VALUE17;
3744 		break;
3745 	case 7:
3746 		addr = SHADOW_VALUE19;
3747 		break;
3748 	case 8:
3749 		addr = SHADOW_VALUE20;
3750 		break;
3751 	case 9:
3752 		addr = SHADOW_VALUE21;
3753 		break;
3754 	case 10:
3755 		addr = SHADOW_VALUE22;
3756 		break;
3757 	case 11:
3758 		addr = SHADOW_VALUE23;
3759 		break;
3760 	default:
3761 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3762 		QDF_ASSERT(0);
3763 	}
3764 
3765 	return addr;
3766 
3767 }
3768 #else
3769 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3770 {
3771 	u32 addr = 0;
3772 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3773 
3774 	switch (ce) {
3775 	case 0:
3776 		addr = SHADOW_VALUE0;
3777 		break;
3778 	case 4:
3779 		addr = SHADOW_VALUE4;
3780 		break;
3781 	case 5:
3782 		addr = SHADOW_VALUE5;
3783 		break;
3784 	default:
3785 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3786 		QDF_ASSERT(0);
3787 	}
3788 	return addr;
3789 }
3790 
3791 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3792 {
3793 	u32 addr = 0;
3794 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3795 
3796 	switch (ce) {
3797 	case 1:
3798 		addr = SHADOW_VALUE13;
3799 		break;
3800 	case 2:
3801 		addr = SHADOW_VALUE14;
3802 		break;
3803 	case 3:
3804 		addr = SHADOW_VALUE15;
3805 		break;
3806 	case 5:
3807 		addr = SHADOW_VALUE17;
3808 		break;
3809 	case 7:
3810 		addr = SHADOW_VALUE19;
3811 		break;
3812 	case 8:
3813 		addr = SHADOW_VALUE20;
3814 		break;
3815 	case 9:
3816 		addr = SHADOW_VALUE21;
3817 		break;
3818 	case 10:
3819 		addr = SHADOW_VALUE22;
3820 		break;
3821 	case 11:
3822 		addr = SHADOW_VALUE23;
3823 		break;
3824 	default:
3825 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3826 		QDF_ASSERT(0);
3827 	}
3828 
3829 	return addr;
3830 }
3831 #endif
3832 #endif
3833 
3834 #if defined(FEATURE_LRO)
3835 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3836 {
3837 	struct CE_state *ce_state;
3838 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3839 
3840 	ce_state = scn->ce_id_to_state[ctx_id];
3841 
3842 	return ce_state->lro_data;
3843 }
3844 #endif
3845 
3846 /**
3847  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3848  * this service
3849  * @scn: hif_softc pointer.
3850  * @svc_id: Service ID for which the mapping is needed.
3851  * @ul_pipe: address of the container in which ul pipe is returned.
3852  * @dl_pipe: address of the container in which dl pipe is returned.
3853  * @ul_is_polled: address of the container in which a bool
3854  *			indicating if the UL CE for this service
3855  *			is polled is returned.
3856  * @dl_is_polled: address of the container in which a bool
3857  *			indicating if the DL CE for this service
3858  *			is polled is returned.
3859  *
3860  * Return: Indicates whether the service has been found in the table.
3861  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3862  *         There will be warning logs if either leg has not been updated
3863  *         because it missed the entry in the table (but this is not an err).
3864  */
3865 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3866 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3867 			int *dl_is_polled)
3868 {
3869 	int status = QDF_STATUS_E_INVAL;
3870 	unsigned int i;
3871 	struct service_to_pipe element;
3872 	struct service_to_pipe *tgt_svc_map_to_use;
3873 	uint32_t sz_tgt_svc_map_to_use;
3874 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3875 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3876 	bool dl_updated = false;
3877 	bool ul_updated = false;
3878 
3879 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3880 				       &sz_tgt_svc_map_to_use);
3881 
3882 	*dl_is_polled = 0;  /* polling for received messages not supported */
3883 
3884 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3885 
3886 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3887 		if (element.service_id == svc_id) {
3888 			if (element.pipedir == PIPEDIR_OUT) {
3889 				*ul_pipe = element.pipenum;
3890 				*ul_is_polled =
3891 					(hif_state->host_ce_config[*ul_pipe].flags &
3892 					 CE_ATTR_DISABLE_INTR) != 0;
3893 				ul_updated = true;
3894 			} else if (element.pipedir == PIPEDIR_IN) {
3895 				*dl_pipe = element.pipenum;
3896 				dl_updated = true;
3897 			}
3898 			status = QDF_STATUS_SUCCESS;
3899 		}
3900 	}
3901 	if (ul_updated == false)
3902 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3903 	if (dl_updated == false)
3904 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3905 
3906 	return status;
3907 }
3908 
3909 #ifdef SHADOW_REG_DEBUG
3910 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3911 		uint32_t CE_ctrl_addr)
3912 {
3913 	uint32_t read_from_hw, srri_from_ddr = 0;
3914 
3915 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3916 
3917 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3918 
3919 	if (read_from_hw != srri_from_ddr) {
3920 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3921 		       __func__, srri_from_ddr, read_from_hw,
3922 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3923 		QDF_ASSERT(0);
3924 	}
3925 	return srri_from_ddr;
3926 }
3927 
3928 
3929 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3930 		uint32_t CE_ctrl_addr)
3931 {
3932 	uint32_t read_from_hw, drri_from_ddr = 0;
3933 
3934 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3935 
3936 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3937 
3938 	if (read_from_hw != drri_from_ddr) {
3939 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3940 		       drri_from_ddr, read_from_hw,
3941 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3942 		QDF_ASSERT(0);
3943 	}
3944 	return drri_from_ddr;
3945 }
3946 
3947 #endif
3948 
3949 /**
3950  * hif_dump_ce_registers() - dump ce registers
3951  * @scn: hif_opaque_softc pointer.
3952  *
3953  * Output the copy engine registers
3954  *
3955  * Return: 0 for success or error code
3956  */
3957 int hif_dump_ce_registers(struct hif_softc *scn)
3958 {
3959 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3960 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3961 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3962 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3963 	uint16_t i;
3964 	QDF_STATUS status;
3965 
3966 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3967 		if (!scn->ce_id_to_state[i]) {
3968 			HIF_DBG("CE%d not used.", i);
3969 			continue;
3970 		}
3971 
3972 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3973 					   (uint8_t *) &ce_reg_values[0],
3974 					   ce_reg_word_size * sizeof(uint32_t));
3975 
3976 		if (status != QDF_STATUS_SUCCESS) {
3977 			HIF_ERROR("Dumping CE register failed!");
3978 			return -EACCES;
3979 		}
3980 		HIF_ERROR("CE%d=>\n", i);
3981 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3982 				   (uint8_t *) &ce_reg_values[0],
3983 				   ce_reg_word_size * sizeof(uint32_t));
3984 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3985 				+ SR_WR_INDEX_ADDRESS),
3986 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3987 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3988 				+ CURRENT_SRRI_ADDRESS),
3989 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3990 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3991 				+ DST_WR_INDEX_ADDRESS),
3992 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3993 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3994 				+ CURRENT_DRRI_ADDRESS),
3995 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3996 		qdf_print("---");
3997 	}
3998 	return 0;
3999 }
4000 qdf_export_symbol(hif_dump_ce_registers);
4001 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
4002 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
4003 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
4004 {
4005 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4006 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4007 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
4008 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
4009 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4010 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
4011 	struct CE_ring_state *src_ring = ce_state->src_ring;
4012 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
4013 
4014 	if (src_ring) {
4015 		hif_info->ul_pipe.nentries = src_ring->nentries;
4016 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
4017 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
4018 		hif_info->ul_pipe.write_index = src_ring->write_index;
4019 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
4020 		hif_info->ul_pipe.base_addr_CE_space =
4021 			src_ring->base_addr_CE_space;
4022 		hif_info->ul_pipe.base_addr_owner_space =
4023 			src_ring->base_addr_owner_space;
4024 	}
4025 
4026 
4027 	if (dest_ring) {
4028 		hif_info->dl_pipe.nentries = dest_ring->nentries;
4029 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
4030 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
4031 		hif_info->dl_pipe.write_index = dest_ring->write_index;
4032 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
4033 		hif_info->dl_pipe.base_addr_CE_space =
4034 			dest_ring->base_addr_CE_space;
4035 		hif_info->dl_pipe.base_addr_owner_space =
4036 			dest_ring->base_addr_owner_space;
4037 	}
4038 
4039 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
4040 	hif_info->ctrl_addr = ce_state->ctrl_addr;
4041 
4042 	return hif_info;
4043 }
4044 qdf_export_symbol(hif_get_addl_pipe_info);
4045 
4046 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
4047 {
4048 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4049 
4050 	scn->nss_wifi_ol_mode = mode;
4051 	return 0;
4052 }
4053 qdf_export_symbol(hif_set_nss_wifiol_mode);
4054 #endif
4055 
4056 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
4057 {
4058 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4059 	scn->hif_attribute = hif_attrib;
4060 }
4061 
4062 
4063 /* disable interrupts (only applicable for legacy copy engine currently */
4064 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
4065 {
4066 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4067 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
4068 	uint32_t ctrl_addr = CE_state->ctrl_addr;
4069 
4070 	Q_TARGET_ACCESS_BEGIN(scn);
4071 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
4072 	Q_TARGET_ACCESS_END(scn);
4073 }
4074 qdf_export_symbol(hif_disable_interrupt);
4075 
4076 /**
4077  * hif_fw_event_handler() - hif fw event handler
4078  * @hif_state: pointer to hif ce state structure
4079  *
4080  * Process fw events and raise HTC callback to process fw events.
4081  *
4082  * Return: none
4083  */
4084 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4085 {
4086 	struct hif_msg_callbacks *msg_callbacks =
4087 		&hif_state->msg_callbacks_current;
4088 
4089 	if (!msg_callbacks->fwEventHandler)
4090 		return;
4091 
4092 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
4093 			QDF_STATUS_E_FAILURE);
4094 }
4095 
4096 #ifndef QCA_WIFI_3_0
4097 /**
4098  * hif_fw_interrupt_handler() - FW interrupt handler
4099  * @irq: irq number
4100  * @arg: the user pointer
4101  *
4102  * Called from the PCI interrupt handler when a
4103  * firmware-generated interrupt to the Host.
4104  *
4105  * only registered for legacy ce devices
4106  *
4107  * Return: status of handled irq
4108  */
4109 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4110 {
4111 	struct hif_softc *scn = arg;
4112 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4113 	uint32_t fw_indicator_address, fw_indicator;
4114 
4115 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4116 		return ATH_ISR_NOSCHED;
4117 
4118 	fw_indicator_address = hif_state->fw_indicator_address;
4119 	/* For sudden unplug this will return ~0 */
4120 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4121 
4122 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4123 		/* ACK: clear Target-side pending event */
4124 		A_TARGET_WRITE(scn, fw_indicator_address,
4125 			       fw_indicator & ~FW_IND_EVENT_PENDING);
4126 		if (Q_TARGET_ACCESS_END(scn) < 0)
4127 			return ATH_ISR_SCHED;
4128 
4129 		if (hif_state->started) {
4130 			hif_fw_event_handler(hif_state);
4131 		} else {
4132 			/*
4133 			 * Probable Target failure before we're prepared
4134 			 * to handle it.  Generally unexpected.
4135 			 * fw_indicator used as bitmap, and defined as below:
4136 			 *     FW_IND_EVENT_PENDING    0x1
4137 			 *     FW_IND_INITIALIZED      0x2
4138 			 *     FW_IND_NEEDRECOVER      0x4
4139 			 */
4140 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
4141 				("%s: Early firmware event indicated 0x%x\n",
4142 				 __func__, fw_indicator));
4143 		}
4144 	} else {
4145 		if (Q_TARGET_ACCESS_END(scn) < 0)
4146 			return ATH_ISR_SCHED;
4147 	}
4148 
4149 	return ATH_ISR_SCHED;
4150 }
4151 #else
4152 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4153 {
4154 	return ATH_ISR_SCHED;
4155 }
4156 #endif /* #ifdef QCA_WIFI_3_0 */
4157 
4158 
4159 /**
4160  * hif_wlan_disable(): call the platform driver to disable wlan
4161  * @scn: HIF Context
4162  *
4163  * This function passes the con_mode to platform driver to disable
4164  * wlan.
4165  *
4166  * Return: void
4167  */
4168 void hif_wlan_disable(struct hif_softc *scn)
4169 {
4170 	enum pld_driver_mode mode;
4171 	uint32_t con_mode = hif_get_conparam(scn);
4172 
4173 	if (scn->target_status == TARGET_STATUS_RESET)
4174 		return;
4175 
4176 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4177 		mode = PLD_FTM;
4178 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4179 		mode = PLD_EPPING;
4180 	else
4181 		mode = PLD_MISSION;
4182 
4183 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4184 }
4185 
4186 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4187 {
4188 	QDF_STATUS status;
4189 	uint8_t ul_pipe, dl_pipe;
4190 	int ul_is_polled, dl_is_polled;
4191 
4192 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4193 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4194 					 HTC_CTRL_RSVD_SVC,
4195 					 &ul_pipe, &dl_pipe,
4196 					 &ul_is_polled, &dl_is_polled);
4197 	if (status) {
4198 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4199 		return qdf_status_to_os_return(status);
4200 	}
4201 
4202 	*ce_id = dl_pipe;
4203 
4204 	return 0;
4205 }
4206 
4207 #ifdef HIF_CE_LOG_INFO
4208 /**
4209  * ce_get_index_info(): Get CE index info
4210  * @scn: HIF Context
4211  * @ce_state: CE opaque handle
4212  * @info: CE info
4213  *
4214  * Return: 0 for success and non zero for failure
4215  */
4216 static
4217 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
4218 		      struct ce_index *info)
4219 {
4220 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4221 
4222 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
4223 }
4224 
4225 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
4226 		     unsigned int *offset)
4227 {
4228 	struct hang_event_info info = {0};
4229 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
4230 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
4231 	uint8_t curr_index = 0;
4232 	uint8_t i;
4233 	uint16_t size;
4234 
4235 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
4236 	info.active_grp_tasklet_cnt =
4237 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
4238 
4239 	for (i = 0; i < scn->ce_count; i++) {
4240 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
4241 			continue;
4242 
4243 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
4244 				      &info.ce_info[curr_index]))
4245 			continue;
4246 
4247 		curr_index++;
4248 	}
4249 
4250 	info.ce_count = curr_index;
4251 	size = sizeof(info) -
4252 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
4253 
4254 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
4255 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
4256 
4257 	qdf_mem_copy(data + *offset, &info, size);
4258 	*offset = *offset + size;
4259 }
4260 #endif
4261