xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #include "qdf_module.h"
41 
42 #define CE_POLL_TIMEOUT 10      /* ms */
43 
44 #define AGC_DUMP         1
45 #define CHANINFO_DUMP    2
46 #define BB_WATCHDOG_DUMP 3
47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48 #define PCIE_ACCESS_DUMP 4
49 #endif
50 #include "mp_dev.h"
51 #ifdef HIF_CE_LOG_INFO
52 #include "qdf_hang_event_notifier.h"
53 #endif
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
56 	defined(QCA_WIFI_QCA6018) || defined(QCA_WIFI_QCA5018)) && \
57 	!defined(QCA_WIFI_SUPPORT_SRNG)
58 #define QCA_WIFI_SUPPORT_SRNG
59 #endif
60 
61 /* Forward references */
62 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
63 
64 /*
65  * Fix EV118783, poll to check whether a BMI response comes
66  * other than waiting for the interruption which may be lost.
67  */
68 /* #define BMI_RSP_POLLING */
69 #define BMI_RSP_TO_MILLISEC  1000
70 
71 #ifdef CONFIG_BYPASS_QMI
72 #define BYPASS_QMI 1
73 #else
74 #define BYPASS_QMI 0
75 #endif
76 
77 #ifdef ENABLE_10_4_FW_HDR
78 #if (ENABLE_10_4_FW_HDR == 1)
79 #define WDI_IPA_SERVICE_GROUP 5
80 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
81 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
82 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
83 #endif /* ENABLE_10_4_FW_HDR == 1 */
84 #endif /* ENABLE_10_4_FW_HDR */
85 
86 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
87 static void hif_config_rri_on_ddr(struct hif_softc *scn);
88 
89 /**
90  * hif_target_access_log_dump() - dump access log
91  *
92  * dump access log
93  *
94  * Return: n/a
95  */
96 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
97 static void hif_target_access_log_dump(void)
98 {
99 	hif_target_dump_access_log();
100 }
101 #endif
102 
103 
104 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
105 		      uint8_t cmd_id, bool start)
106 {
107 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
108 
109 	switch (cmd_id) {
110 	case AGC_DUMP:
111 		if (start)
112 			priv_start_agc(scn);
113 		else
114 			priv_dump_agc(scn);
115 		break;
116 	case CHANINFO_DUMP:
117 		if (start)
118 			priv_start_cap_chaninfo(scn);
119 		else
120 			priv_dump_chaninfo(scn);
121 		break;
122 	case BB_WATCHDOG_DUMP:
123 		priv_dump_bbwatchdog(scn);
124 		break;
125 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
126 	case PCIE_ACCESS_DUMP:
127 		hif_target_access_log_dump();
128 		break;
129 #endif
130 	default:
131 		HIF_ERROR("%s: Invalid htc dump command", __func__);
132 		break;
133 	}
134 }
135 
136 static void ce_poll_timeout(void *arg)
137 {
138 	struct CE_state *CE_state = (struct CE_state *)arg;
139 
140 	if (CE_state->timer_inited) {
141 		ce_per_engine_service(CE_state->scn, CE_state->id);
142 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
143 	}
144 }
145 
146 static unsigned int roundup_pwr2(unsigned int n)
147 {
148 	int i;
149 	unsigned int test_pwr2;
150 
151 	if (!(n & (n - 1)))
152 		return n; /* already a power of 2 */
153 
154 	test_pwr2 = 4;
155 	for (i = 0; i < 29; i++) {
156 		if (test_pwr2 > n)
157 			return test_pwr2;
158 		test_pwr2 = test_pwr2 << 1;
159 	}
160 
161 	QDF_ASSERT(0); /* n too large */
162 	return 0;
163 }
164 
165 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
166 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
167 
168 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
169 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
174 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
175 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
177 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
178 #ifdef QCA_WIFI_3_0_ADRASTEA
179 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
181 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
182 #endif
183 };
184 
185 #ifdef QCN7605_SUPPORT
186 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
187 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
190 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
191 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
192 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
193 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
194 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
195 };
196 #endif
197 
198 #ifdef WLAN_FEATURE_EPPING
199 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
200 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
204 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
205 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
206 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
207 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
208 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
209 };
210 #endif
211 
212 /* CE_PCI TABLE */
213 /*
214  * NOTE: the table below is out of date, though still a useful reference.
215  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
216  * mapping of HTC services to HIF pipes.
217  */
218 /*
219  * This authoritative table defines Copy Engine configuration and the mapping
220  * of services/endpoints to CEs.  A subset of this information is passed to
221  * the Target during startup as a prerequisite to entering BMI phase.
222  * See:
223  *    target_service_to_ce_map - Target-side mapping
224  *    hif_map_service_to_pipe      - Host-side mapping
225  *    target_ce_config         - Target-side configuration
226  *    host_ce_config           - Host-side configuration
227    ============================================================================
228    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
229  |                      |      | ctio | Size     | Frequency
230  |                      |      | n    |          |
231    ============================================================================
232    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
233    descriptor |                      |      |      | O(100B)  | and regular
234    download   |                      |      |      |          |
235    ----------------------------------------------------------------------------
236    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
237    indication |                      |      |      | O(10B)   | regular
238    upload     |                      |      |      |          |
239    ----------------------------------------------------------------------------
240    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
241    upload     |                      |      |      | O(1000B) | (frequent
242    e.g. noise |                      |      |      |          | during IP1.0
243    packets    |                      |      |      |          | testing)
244    ----------------------------------------------------------------------------
245    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
246    download   |                      |      |      | O(1000B) | (frequent
247    e.g.       |                      |      |      |          | during IP1.0
248    misdirecte |                      |      |      |          | testing)
249    d EAPOL    |                      |      |      |          |
250    packets    |                      |      |      |          |
251    ----------------------------------------------------------------------------
252    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
253  | DATA_VO (uplink)     |      |      |          |
254    ----------------------------------------------------------------------------
255    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
256  | DATA_VO (downlink)   |      |      |          |
257    ----------------------------------------------------------------------------
258    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
259  |                      |      |      | O(100B)  |
260    ----------------------------------------------------------------------------
261    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
262    messages   | (downlink)           |      |      | O(100B)  |
263  |                      |      |      |          |
264    ----------------------------------------------------------------------------
265    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
266  | HTC_RAW_STREAMS      |      |      |          |
267  | (uplink)             |      |      |          |
268    ----------------------------------------------------------------------------
269    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
270  | HTC_RAW_STREAMS      |      |      |          |
271  | (downlink)           |      |      |          |
272    ----------------------------------------------------------------------------
273    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
274  |                      |      |      |          | infrequent
275    ============================================================================
276  */
277 
278 /*
279  * Map from service/endpoint to Copy Engine.
280  * This table is derived from the CE_PCI TABLE, above.
281  * It is passed to the Target at startup for use by firmware.
282  */
283 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
284 	{
285 		WMI_DATA_VO_SVC,
286 		PIPEDIR_OUT,    /* out = UL = host -> target */
287 		3,
288 	},
289 	{
290 		WMI_DATA_VO_SVC,
291 		PIPEDIR_IN,     /* in = DL = target -> host */
292 		2,
293 	},
294 	{
295 		WMI_DATA_BK_SVC,
296 		PIPEDIR_OUT,    /* out = UL = host -> target */
297 		3,
298 	},
299 	{
300 		WMI_DATA_BK_SVC,
301 		PIPEDIR_IN,     /* in = DL = target -> host */
302 		2,
303 	},
304 	{
305 		WMI_DATA_BE_SVC,
306 		PIPEDIR_OUT,    /* out = UL = host -> target */
307 		3,
308 	},
309 	{
310 		WMI_DATA_BE_SVC,
311 		PIPEDIR_IN,     /* in = DL = target -> host */
312 		2,
313 	},
314 	{
315 		WMI_DATA_VI_SVC,
316 		PIPEDIR_OUT,    /* out = UL = host -> target */
317 		3,
318 	},
319 	{
320 		WMI_DATA_VI_SVC,
321 		PIPEDIR_IN,     /* in = DL = target -> host */
322 		2,
323 	},
324 	{
325 		WMI_CONTROL_SVC,
326 		PIPEDIR_OUT,    /* out = UL = host -> target */
327 		3,
328 	},
329 	{
330 		WMI_CONTROL_SVC,
331 		PIPEDIR_IN,     /* in = DL = target -> host */
332 		2,
333 	},
334 	{
335 		HTC_CTRL_RSVD_SVC,
336 		PIPEDIR_OUT,    /* out = UL = host -> target */
337 		0,              /* could be moved to 3 (share with WMI) */
338 	},
339 	{
340 		HTC_CTRL_RSVD_SVC,
341 		PIPEDIR_IN,     /* in = DL = target -> host */
342 		2,
343 	},
344 	{
345 		HTC_RAW_STREAMS_SVC, /* not currently used */
346 		PIPEDIR_OUT,    /* out = UL = host -> target */
347 		0,
348 	},
349 	{
350 		HTC_RAW_STREAMS_SVC, /* not currently used */
351 		PIPEDIR_IN,     /* in = DL = target -> host */
352 		2,
353 	},
354 	{
355 		HTT_DATA_MSG_SVC,
356 		PIPEDIR_OUT,    /* out = UL = host -> target */
357 		4,
358 	},
359 	{
360 		HTT_DATA_MSG_SVC,
361 		PIPEDIR_IN,     /* in = DL = target -> host */
362 		1,
363 	},
364 	{
365 		WDI_IPA_TX_SVC,
366 		PIPEDIR_OUT,    /* in = DL = target -> host */
367 		5,
368 	},
369 #if defined(QCA_WIFI_3_0_ADRASTEA)
370 	{
371 		HTT_DATA2_MSG_SVC,
372 		PIPEDIR_IN,    /* in = DL = target -> host */
373 		9,
374 	},
375 	{
376 		HTT_DATA3_MSG_SVC,
377 		PIPEDIR_IN,    /* in = DL = target -> host */
378 		10,
379 	},
380 	{
381 		PACKET_LOG_SVC,
382 		PIPEDIR_IN,    /* in = DL = target -> host */
383 		11,
384 	},
385 #endif
386 	/* (Additions here) */
387 
388 	{                       /* Must be last */
389 		0,
390 		0,
391 		0,
392 	},
393 };
394 
395 /* PIPEDIR_OUT = HOST to Target */
396 /* PIPEDIR_IN  = TARGET to HOST */
397 #if (defined(QCA_WIFI_QCA8074))
398 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
399 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
400 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
401 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
402 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
403 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
404 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
405 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
406 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
407 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
408 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
409 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
410 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
411 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
412 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
413 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
414 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
415 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
416 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
417 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
418 	/* (Additions here) */
419 	{ 0, 0, 0, },
420 };
421 #else
422 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
423 };
424 #endif
425 
426 #if (defined(QCA_WIFI_QCA8074V2))
427 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
428 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
429 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
430 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
431 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
432 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
433 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
434 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
435 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
436 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
437 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
438 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
439 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
440 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
441 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
442 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
443 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
444 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
445 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
446 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
447 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
448 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
449 	/* (Additions here) */
450 	{ 0, 0, 0, },
451 };
452 #else
453 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
454 };
455 #endif
456 
457 #if (defined(QCA_WIFI_QCA6018))
458 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
459 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
460 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
461 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
462 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
463 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
464 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
465 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
466 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
467 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
468 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
469 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
470 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
471 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
472 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
473 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
474 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
475 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
476 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
477 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
478 	/* (Additions here) */
479 	{ 0, 0, 0, },
480 };
481 #else
482 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
483 };
484 #endif
485 
486 #if (defined(QCA_WIFI_QCN9000))
487 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
488 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
489 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
490 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
491 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
492 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
493 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
494 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
495 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
496 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
497 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
498 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
499 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
500 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
501 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
502 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
503 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
504 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
505 	/* (Additions here) */
506 	{ 0, 0, 0, },
507 };
508 #else
509 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
510 };
511 #endif
512 
513 #if (defined(QCA_WIFI_QCA5018))
514 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
515 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
516 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
517 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
518 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
519 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
520 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
521 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
522 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
523 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
524 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
525 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
526 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
527 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
528 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
529 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
530 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
531 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
532 	/* (Additions here) */
533 	{ 0, 0, 0, },
534 };
535 #else
536 static struct service_to_pipe target_service_to_ce_map_qca5018[] = {
537 };
538 #endif
539 
540 /* PIPEDIR_OUT = HOST to Target */
541 /* PIPEDIR_IN  = TARGET to HOST */
542 #ifdef QCN7605_SUPPORT
543 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
544 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
545 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
546 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
547 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
548 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
549 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
550 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
551 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
552 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
553 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
554 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
555 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
556 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
557 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
558 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
559 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
560 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
561 #ifdef IPA_OFFLOAD
562 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
563 #else
564 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
565 #endif
566 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
567 	/* (Additions here) */
568 	{ 0, 0, 0, },
569 };
570 #endif
571 
572 #if (defined(QCA_WIFI_QCA6290))
573 #ifdef QCA_6290_AP_MODE
574 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
575 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
576 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
577 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
578 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
579 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
580 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
581 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
582 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
583 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
584 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
585 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
586 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
587 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
588 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
589 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
590 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
591 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
592 	/* (Additions here) */
593 	{ 0, 0, 0, },
594 };
595 #else
596 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
597 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
598 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
599 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
600 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
601 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
602 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
603 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
604 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
605 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
606 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
607 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
608 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
609 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
610 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
611 	/* (Additions here) */
612 	{ 0, 0, 0, },
613 };
614 #endif
615 #else
616 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
617 };
618 #endif
619 
620 #if (defined(QCA_WIFI_QCA6390))
621 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
622 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
623 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
624 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
625 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
626 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
627 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
628 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
629 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
630 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
631 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
632 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
633 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
634 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
635 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
636 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
637 	/* (Additions here) */
638 	{ 0, 0, 0, },
639 };
640 #else
641 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
642 };
643 #endif
644 
645 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
646 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
647 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
648 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
649 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
650 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
651 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
652 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
653 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
654 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
655 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
656 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
657 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
658 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
659 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
660 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
661 	/* (Additions here) */
662 	{ 0, 0, 0, },
663 };
664 
665 #if (defined(QCA_WIFI_QCA6750))
666 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
667 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
668 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
669 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
670 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
671 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
672 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
673 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
674 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
675 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
676 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
677 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
678 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
679 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
680 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
681 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
682 	/* (Additions here) */
683 	{ 0, 0, 0, },
684 };
685 #else
686 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
687 };
688 #endif
689 
690 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
691 	{
692 		WMI_DATA_VO_SVC,
693 		PIPEDIR_OUT,    /* out = UL = host -> target */
694 		3,
695 	},
696 	{
697 		WMI_DATA_VO_SVC,
698 		PIPEDIR_IN,     /* in = DL = target -> host */
699 		2,
700 	},
701 	{
702 		WMI_DATA_BK_SVC,
703 		PIPEDIR_OUT,    /* out = UL = host -> target */
704 		3,
705 	},
706 	{
707 		WMI_DATA_BK_SVC,
708 		PIPEDIR_IN,     /* in = DL = target -> host */
709 		2,
710 	},
711 	{
712 		WMI_DATA_BE_SVC,
713 		PIPEDIR_OUT,    /* out = UL = host -> target */
714 		3,
715 	},
716 	{
717 		WMI_DATA_BE_SVC,
718 		PIPEDIR_IN,     /* in = DL = target -> host */
719 		2,
720 	},
721 	{
722 		WMI_DATA_VI_SVC,
723 		PIPEDIR_OUT,    /* out = UL = host -> target */
724 		3,
725 	},
726 	{
727 		WMI_DATA_VI_SVC,
728 		PIPEDIR_IN,     /* in = DL = target -> host */
729 		2,
730 	},
731 	{
732 		WMI_CONTROL_SVC,
733 		PIPEDIR_OUT,    /* out = UL = host -> target */
734 		3,
735 	},
736 	{
737 		WMI_CONTROL_SVC,
738 		PIPEDIR_IN,     /* in = DL = target -> host */
739 		2,
740 	},
741 	{
742 		HTC_CTRL_RSVD_SVC,
743 		PIPEDIR_OUT,    /* out = UL = host -> target */
744 		0,              /* could be moved to 3 (share with WMI) */
745 	},
746 	{
747 		HTC_CTRL_RSVD_SVC,
748 		PIPEDIR_IN,     /* in = DL = target -> host */
749 		1,
750 	},
751 	{
752 		HTC_RAW_STREAMS_SVC, /* not currently used */
753 		PIPEDIR_OUT,    /* out = UL = host -> target */
754 		0,
755 	},
756 	{
757 		HTC_RAW_STREAMS_SVC, /* not currently used */
758 		PIPEDIR_IN,     /* in = DL = target -> host */
759 		1,
760 	},
761 	{
762 		HTT_DATA_MSG_SVC,
763 		PIPEDIR_OUT,    /* out = UL = host -> target */
764 		4,
765 	},
766 #ifdef WLAN_FEATURE_FASTPATH
767 	{
768 		HTT_DATA_MSG_SVC,
769 		PIPEDIR_IN,     /* in = DL = target -> host */
770 		5,
771 	},
772 #else /* WLAN_FEATURE_FASTPATH */
773 	{
774 		HTT_DATA_MSG_SVC,
775 		PIPEDIR_IN,  /* in = DL = target -> host */
776 		1,
777 	},
778 #endif /* WLAN_FEATURE_FASTPATH */
779 
780 	/* (Additions here) */
781 
782 	{                       /* Must be last */
783 		0,
784 		0,
785 		0,
786 	},
787 };
788 
789 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
790 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
791 
792 #ifdef WLAN_FEATURE_EPPING
793 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
794 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
795 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
796 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
797 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
798 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
799 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
800 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
801 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
802 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
803 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
804 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
805 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
806 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
807 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
808 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
809 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
810 	{0, 0, 0,},             /* Must be last */
811 };
812 
813 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
814 					   **tgt_svc_map_to_use,
815 					   uint32_t *sz_tgt_svc_map_to_use)
816 {
817 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
818 	*sz_tgt_svc_map_to_use =
819 			sizeof(target_service_to_ce_map_wlan_epping);
820 }
821 #endif
822 
823 #ifdef QCN7605_SUPPORT
824 static inline
825 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
826 			       uint32_t *sz_tgt_svc_map_to_use)
827 {
828 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
829 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
830 }
831 #else
832 static inline
833 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
834 			       uint32_t *sz_tgt_svc_map_to_use)
835 {
836 	HIF_ERROR("%s: QCN7605 not supported", __func__);
837 }
838 #endif
839 
840 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
841 				    struct service_to_pipe **tgt_svc_map_to_use,
842 				    uint32_t *sz_tgt_svc_map_to_use)
843 {
844 	uint32_t mode = hif_get_conparam(scn);
845 	struct hif_target_info *tgt_info = &scn->target_info;
846 
847 	if (QDF_IS_EPPING_ENABLED(mode)) {
848 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
849 						      sz_tgt_svc_map_to_use);
850 	} else {
851 		switch (tgt_info->target_type) {
852 		default:
853 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
854 			*sz_tgt_svc_map_to_use =
855 				sizeof(target_service_to_ce_map_wlan);
856 			break;
857 		case TARGET_TYPE_QCN7605:
858 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
859 						  sz_tgt_svc_map_to_use);
860 			break;
861 		case TARGET_TYPE_AR900B:
862 		case TARGET_TYPE_QCA9984:
863 		case TARGET_TYPE_IPQ4019:
864 		case TARGET_TYPE_QCA9888:
865 		case TARGET_TYPE_AR9888:
866 		case TARGET_TYPE_AR9888V2:
867 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
868 			*sz_tgt_svc_map_to_use =
869 				sizeof(target_service_to_ce_map_ar900b);
870 			break;
871 		case TARGET_TYPE_QCA6290:
872 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
873 			*sz_tgt_svc_map_to_use =
874 				sizeof(target_service_to_ce_map_qca6290);
875 			break;
876 		case TARGET_TYPE_QCA6390:
877 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
878 			*sz_tgt_svc_map_to_use =
879 				sizeof(target_service_to_ce_map_qca6390);
880 			break;
881 		case TARGET_TYPE_QCA6490:
882 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
883 			*sz_tgt_svc_map_to_use =
884 				sizeof(target_service_to_ce_map_qca6490);
885 			break;
886 		case TARGET_TYPE_QCA6750:
887 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
888 			*sz_tgt_svc_map_to_use =
889 				sizeof(target_service_to_ce_map_qca6750);
890 			break;
891 		case TARGET_TYPE_QCA8074:
892 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
893 			*sz_tgt_svc_map_to_use =
894 				sizeof(target_service_to_ce_map_qca8074);
895 			break;
896 		case TARGET_TYPE_QCA8074V2:
897 			*tgt_svc_map_to_use =
898 				target_service_to_ce_map_qca8074_v2;
899 			*sz_tgt_svc_map_to_use =
900 				sizeof(target_service_to_ce_map_qca8074_v2);
901 			break;
902 		case TARGET_TYPE_QCA6018:
903 			*tgt_svc_map_to_use =
904 				target_service_to_ce_map_qca6018;
905 			*sz_tgt_svc_map_to_use =
906 				sizeof(target_service_to_ce_map_qca6018);
907 			break;
908 		case TARGET_TYPE_QCN9000:
909 			*tgt_svc_map_to_use =
910 				target_service_to_ce_map_qcn9000;
911 			*sz_tgt_svc_map_to_use =
912 				sizeof(target_service_to_ce_map_qcn9000);
913 			break;
914 		case TARGET_TYPE_QCA5018:
915 			*tgt_svc_map_to_use =
916 				target_service_to_ce_map_qca5018;
917 			*sz_tgt_svc_map_to_use =
918 				sizeof(target_service_to_ce_map_qca5018);
919 			break;
920 		}
921 	}
922 }
923 
924 /**
925  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
926  * @ce_state : pointer to the state context of the CE
927  *
928  * Description:
929  *   Sets htt_rx_data attribute of the state structure if the
930  *   CE serves one of the HTT DATA services.
931  *
932  * Return:
933  *  false (attribute set to false)
934  *  true  (attribute set to true);
935  */
936 static bool ce_mark_datapath(struct CE_state *ce_state)
937 {
938 	struct service_to_pipe *svc_map;
939 	uint32_t map_sz, map_len;
940 	int    i;
941 	bool   rc = false;
942 
943 	if (ce_state) {
944 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
945 					       &map_sz);
946 
947 		map_len = map_sz / sizeof(struct service_to_pipe);
948 		for (i = 0; i < map_len; i++) {
949 			if ((svc_map[i].pipenum == ce_state->id) &&
950 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
951 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
952 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
953 				/* HTT CEs are unidirectional */
954 				if (svc_map[i].pipedir == PIPEDIR_IN)
955 					ce_state->htt_rx_data = true;
956 				else
957 					ce_state->htt_tx_data = true;
958 				rc = true;
959 			}
960 		}
961 	}
962 	return rc;
963 }
964 
965 /**
966  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
967  * @ce_id: ce in question
968  * @ring: ring state being examined
969  * @type: "src_ring" or "dest_ring" string for identifying the ring
970  *
971  * Warns on non-zero index values.
972  * Causes a kernel panic if the ring is not empty durring initialization.
973  */
974 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
975 					 char *type)
976 {
977 	if (ring->write_index != 0 || ring->sw_index != 0)
978 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
979 			  ce_id, type, ring->sw_index, ring->write_index);
980 	if (ring->write_index != ring->sw_index)
981 		QDF_BUG(0);
982 }
983 
984 #ifdef IPA_OFFLOAD
985 /**
986  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
987  * @scn: softc instance
988  * @ce_id: ce in question
989  * @base_addr: pointer to copyengine ring base address
990  * @ce_ring: copyengine instance
991  * @nentries: number of entries should be allocated
992  * @desc_size: ce desc size
993  *
994  * Return: QDF_STATUS_SUCCESS - for success
995  */
996 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
997 				     qdf_dma_addr_t *base_addr,
998 				     struct CE_ring_state *ce_ring,
999 				     unsigned int nentries, uint32_t desc_size)
1000 {
1001 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1002 	    !ce_srng_based(scn)) {
1003 		if (!scn->ipa_ce_ring) {
1004 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
1005 				scn->qdf_dev,
1006 				nentries * desc_size + CE_DESC_RING_ALIGN);
1007 			if (!scn->ipa_ce_ring) {
1008 				HIF_ERROR(
1009 				"%s: Failed to allocate memory for IPA ce ring",
1010 				__func__);
1011 				return QDF_STATUS_E_NOMEM;
1012 			}
1013 		}
1014 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
1015 						&scn->ipa_ce_ring->mem_info);
1016 		ce_ring->base_addr_owner_space_unaligned =
1017 						scn->ipa_ce_ring->vaddr;
1018 	} else {
1019 		ce_ring->base_addr_owner_space_unaligned =
1020 			qdf_mem_alloc_consistent(scn->qdf_dev,
1021 						 scn->qdf_dev->dev,
1022 						 (nentries * desc_size +
1023 						 CE_DESC_RING_ALIGN),
1024 						 base_addr);
1025 		if (!ce_ring->base_addr_owner_space_unaligned) {
1026 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1027 				  __func__, CE_id);
1028 			return QDF_STATUS_E_NOMEM;
1029 		}
1030 	}
1031 	return QDF_STATUS_SUCCESS;
1032 }
1033 
1034 /**
1035  * ce_free_desc_ring() - Frees copyengine descriptor ring
1036  * @scn: softc instance
1037  * @ce_id: ce in question
1038  * @ce_ring: copyengine instance
1039  * @desc_size: ce desc size
1040  *
1041  * Return: None
1042  */
1043 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1044 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1045 {
1046 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1047 	    !ce_srng_based(scn)) {
1048 		if (scn->ipa_ce_ring) {
1049 			qdf_mem_shared_mem_free(scn->qdf_dev,
1050 						scn->ipa_ce_ring);
1051 			scn->ipa_ce_ring = NULL;
1052 		}
1053 		ce_ring->base_addr_owner_space_unaligned = NULL;
1054 	} else {
1055 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1056 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1057 			ce_ring->base_addr_owner_space_unaligned,
1058 			ce_ring->base_addr_CE_space, 0);
1059 		ce_ring->base_addr_owner_space_unaligned = NULL;
1060 	}
1061 }
1062 #else
1063 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1064 				     qdf_dma_addr_t *base_addr,
1065 				     struct CE_ring_state *ce_ring,
1066 				     unsigned int nentries, uint32_t desc_size)
1067 {
1068 	ce_ring->base_addr_owner_space_unaligned =
1069 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1070 					 (nentries * desc_size +
1071 					 CE_DESC_RING_ALIGN), base_addr);
1072 	if (!ce_ring->base_addr_owner_space_unaligned) {
1073 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1074 			  __func__, CE_id);
1075 		return QDF_STATUS_E_NOMEM;
1076 	}
1077 	return QDF_STATUS_SUCCESS;
1078 }
1079 
1080 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1081 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1082 {
1083 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1084 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1085 		ce_ring->base_addr_owner_space_unaligned,
1086 		ce_ring->base_addr_CE_space, 0);
1087 	ce_ring->base_addr_owner_space_unaligned = NULL;
1088 }
1089 #endif /* IPA_OFFLOAD */
1090 
1091 /*
1092  * TODO: Need to explore the possibility of having this as part of a
1093  * target context instead of a global array.
1094  */
1095 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1096 
1097 void ce_service_register_module(enum ce_target_type target_type,
1098 				struct ce_ops* (*ce_attach)(void))
1099 {
1100 	if (target_type < CE_MAX_TARGET_TYPE)
1101 		ce_attach_register[target_type] = ce_attach;
1102 }
1103 
1104 qdf_export_symbol(ce_service_register_module);
1105 
1106 /**
1107  * ce_srng_based() - Does this target use srng
1108  * @ce_state : pointer to the state context of the CE
1109  *
1110  * Description:
1111  *   returns true if the target is SRNG based
1112  *
1113  * Return:
1114  *  false (attribute set to false)
1115  *  true  (attribute set to true);
1116  */
1117 bool ce_srng_based(struct hif_softc *scn)
1118 {
1119 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1120 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1121 
1122 	switch (tgt_info->target_type) {
1123 	case TARGET_TYPE_QCA8074:
1124 	case TARGET_TYPE_QCA8074V2:
1125 	case TARGET_TYPE_QCA6290:
1126 	case TARGET_TYPE_QCA6390:
1127 	case TARGET_TYPE_QCA6490:
1128 	case TARGET_TYPE_QCA6750:
1129 	case TARGET_TYPE_QCA6018:
1130 	case TARGET_TYPE_QCN9000:
1131 	case TARGET_TYPE_QCA5018:
1132 		return true;
1133 	default:
1134 		return false;
1135 	}
1136 	return false;
1137 }
1138 qdf_export_symbol(ce_srng_based);
1139 
1140 #ifdef QCA_WIFI_SUPPORT_SRNG
1141 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1142 {
1143 	struct ce_ops *ops = NULL;
1144 
1145 	if (ce_srng_based(scn)) {
1146 		if (ce_attach_register[CE_SVC_SRNG])
1147 			ops = ce_attach_register[CE_SVC_SRNG]();
1148 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1149 		ops = ce_attach_register[CE_SVC_LEGACY]();
1150 	}
1151 
1152 	return ops;
1153 }
1154 
1155 
1156 #else	/* QCA_LITHIUM */
1157 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1158 {
1159 	if (ce_attach_register[CE_SVC_LEGACY])
1160 		return ce_attach_register[CE_SVC_LEGACY]();
1161 
1162 	return NULL;
1163 }
1164 #endif /* QCA_LITHIUM */
1165 
1166 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1167 		struct pld_shadow_reg_v2_cfg **shadow_config,
1168 		int *num_shadow_registers_configured) {
1169 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1170 
1171 	hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1172 			scn, shadow_config, num_shadow_registers_configured);
1173 
1174 	return;
1175 }
1176 
1177 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1178 						uint8_t ring_type)
1179 {
1180 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1181 
1182 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1183 }
1184 
1185 
1186 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1187 		uint8_t ring_type, uint32_t nentries)
1188 {
1189 	uint32_t ce_nbytes;
1190 	char *ptr;
1191 	qdf_dma_addr_t base_addr;
1192 	struct CE_ring_state *ce_ring;
1193 	uint32_t desc_size;
1194 	struct hif_softc *scn = CE_state->scn;
1195 
1196 	ce_nbytes = sizeof(struct CE_ring_state)
1197 		+ (nentries * sizeof(void *));
1198 	ptr = qdf_mem_malloc(ce_nbytes);
1199 	if (!ptr)
1200 		return NULL;
1201 
1202 	ce_ring = (struct CE_ring_state *)ptr;
1203 	ptr += sizeof(struct CE_ring_state);
1204 	ce_ring->nentries = nentries;
1205 	ce_ring->nentries_mask = nentries - 1;
1206 
1207 	ce_ring->low_water_mark_nentries = 0;
1208 	ce_ring->high_water_mark_nentries = nentries;
1209 	ce_ring->per_transfer_context = (void **)ptr;
1210 
1211 	desc_size = ce_get_desc_size(scn, ring_type);
1212 
1213 	/* Legacy platforms that do not support cache
1214 	 * coherent DMA are unsupported
1215 	 */
1216 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1217 			       ce_ring, nentries,
1218 			       desc_size) !=
1219 	    QDF_STATUS_SUCCESS) {
1220 		HIF_ERROR("%s: ring has no DMA mem",
1221 				__func__);
1222 		qdf_mem_free(ce_ring);
1223 		return NULL;
1224 	}
1225 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1226 
1227 	/* Correctly initialize memory to 0 to
1228 	 * prevent garbage data crashing system
1229 	 * when download firmware
1230 	 */
1231 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1232 			nentries * desc_size +
1233 			CE_DESC_RING_ALIGN);
1234 
1235 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1236 
1237 		ce_ring->base_addr_CE_space =
1238 			(ce_ring->base_addr_CE_space_unaligned +
1239 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1240 
1241 		ce_ring->base_addr_owner_space = (void *)
1242 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1243 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1244 	} else {
1245 		ce_ring->base_addr_CE_space =
1246 				ce_ring->base_addr_CE_space_unaligned;
1247 		ce_ring->base_addr_owner_space =
1248 				ce_ring->base_addr_owner_space_unaligned;
1249 	}
1250 
1251 	return ce_ring;
1252 }
1253 
1254 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1255 			uint32_t ce_id, struct CE_ring_state *ring,
1256 			struct CE_attr *attr)
1257 {
1258 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1259 
1260 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1261 					      ring, attr);
1262 }
1263 
1264 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1265 {
1266 	uint8_t ul_pipe, dl_pipe;
1267 	int ce_id, status, ul_is_polled, dl_is_polled;
1268 	struct CE_state *ce_state;
1269 
1270 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1271 					 &ul_pipe, &dl_pipe,
1272 					 &ul_is_polled, &dl_is_polled);
1273 	if (status) {
1274 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1275 		return status;
1276 	}
1277 
1278 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1279 		if (ce_id == ul_pipe)
1280 			continue;
1281 		if (ce_id == dl_pipe)
1282 			continue;
1283 
1284 		ce_state = scn->ce_id_to_state[ce_id];
1285 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1286 		if (ce_state->state == CE_RUNNING)
1287 			ce_state->state = CE_PAUSED;
1288 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1289 	}
1290 
1291 	return status;
1292 }
1293 
1294 int hif_ce_bus_late_resume(struct hif_softc *scn)
1295 {
1296 	int ce_id;
1297 	struct CE_state *ce_state;
1298 	int write_index = 0;
1299 	bool index_updated;
1300 
1301 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1302 		ce_state = scn->ce_id_to_state[ce_id];
1303 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1304 		if (ce_state->state == CE_PENDING) {
1305 			write_index = ce_state->src_ring->write_index;
1306 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1307 					write_index);
1308 			ce_state->state = CE_RUNNING;
1309 			index_updated = true;
1310 		} else {
1311 			index_updated = false;
1312 		}
1313 
1314 		if (ce_state->state == CE_PAUSED)
1315 			ce_state->state = CE_RUNNING;
1316 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1317 
1318 		if (index_updated)
1319 			hif_record_ce_desc_event(scn, ce_id,
1320 				RESUME_WRITE_INDEX_UPDATE,
1321 				NULL, NULL, write_index, 0);
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 /**
1328  * ce_oom_recovery() - try to recover rx ce from oom condition
1329  * @context: CE_state of the CE with oom rx ring
1330  *
1331  * the executing work Will continue to be rescheduled until
1332  * at least 1 descriptor is successfully posted to the rx ring.
1333  *
1334  * return: none
1335  */
1336 static void ce_oom_recovery(void *context)
1337 {
1338 	struct CE_state *ce_state = context;
1339 	struct hif_softc *scn = ce_state->scn;
1340 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1341 	struct HIF_CE_pipe_info *pipe_info =
1342 		&ce_softc->pipe_info[ce_state->id];
1343 
1344 	hif_post_recv_buffers_for_pipe(pipe_info);
1345 }
1346 
1347 #ifdef HIF_CE_DEBUG_DATA_BUF
1348 /**
1349  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1350  * the CE descriptors.
1351  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1352  * @scn: hif scn handle
1353  * ce_id: Copy Engine Id
1354  *
1355  * Return: QDF_STATUS
1356  */
1357 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1358 {
1359 	struct hif_ce_desc_event *event = NULL;
1360 	struct hif_ce_desc_event *hist_ev = NULL;
1361 	uint32_t index = 0;
1362 
1363 	hist_ev =
1364 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1365 
1366 	if (!hist_ev)
1367 		return QDF_STATUS_E_NOMEM;
1368 
1369 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1370 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1371 		event = &hist_ev[index];
1372 		event->data =
1373 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1374 		if (!event->data) {
1375 			hif_err_rl("ce debug data alloc failed");
1376 			return QDF_STATUS_E_NOMEM;
1377 		}
1378 	}
1379 	return QDF_STATUS_SUCCESS;
1380 }
1381 
1382 /**
1383  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1384  * the CE descriptors.
1385  * @scn: hif scn handle
1386  * ce_id: Copy Engine Id
1387  *
1388  * Return:
1389  */
1390 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1391 {
1392 	struct hif_ce_desc_event *event = NULL;
1393 	struct hif_ce_desc_event *hist_ev = NULL;
1394 	uint32_t index = 0;
1395 
1396 	hist_ev =
1397 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1398 
1399 	if (!hist_ev)
1400 		return;
1401 
1402 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1403 		event = &hist_ev[index];
1404 		if (event->data)
1405 			qdf_mem_free(event->data);
1406 		event->data = NULL;
1407 		event = NULL;
1408 	}
1409 
1410 }
1411 #endif /* HIF_CE_DEBUG_DATA_BUF */
1412 
1413 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
1414 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1415 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1416 
1417 /**
1418  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1419  * @scn: hif scn handle
1420  * @ce_id: Copy Engine Id
1421  * @src_nentries: source ce ring entries
1422  * Return: QDF_STATUS
1423  */
1424 static QDF_STATUS
1425 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1426 			   uint32_t src_nentries)
1427 {
1428 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1429 
1430 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1431 	ce_hist->enable[ce_id] = 1;
1432 
1433 	if (src_nentries)
1434 		alloc_mem_ce_debug_hist_data(scn, ce_id);
1435 	else
1436 		ce_hist->data_enable[ce_id] = false;
1437 
1438 	return QDF_STATUS_SUCCESS;
1439 }
1440 
1441 /**
1442  * free_mem_ce_debug_history() - Free CE descriptor history
1443  * @scn: hif scn handle
1444  * @ce_id: Copy Engine Id
1445  *
1446  * Return: None
1447  */
1448 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1449 {
1450 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1451 
1452 	ce_hist->enable[ce_id] = 0;
1453 	if (ce_hist->data_enable[ce_id]) {
1454 		ce_hist->data_enable[ce_id] = false;
1455 		free_mem_ce_debug_hist_data(scn, ce_id);
1456 	}
1457 	ce_hist->hist_ev[ce_id] = NULL;
1458 }
1459 #else
1460 static inline QDF_STATUS
1461 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1462 			   uint32_t src_nentries)
1463 {
1464 	return QDF_STATUS_SUCCESS;
1465 }
1466 
1467 static inline void
1468 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1469 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
1470 #else
1471 #if defined(HIF_CE_DEBUG_DATA_BUF)
1472 
1473 static QDF_STATUS
1474 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1475 			   uint32_t src_nentries)
1476 {
1477 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1478 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1479 
1480 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
1481 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1482 		return QDF_STATUS_E_NOMEM;
1483 	} else {
1484 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1485 		return QDF_STATUS_SUCCESS;
1486 	}
1487 }
1488 
1489 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1490 {
1491 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1492 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1493 
1494 	if (!hist_ev)
1495 		return;
1496 
1497 	if (ce_hist->data_enable[CE_id]) {
1498 		ce_hist->data_enable[CE_id] = false;
1499 		free_mem_ce_debug_hist_data(scn, CE_id);
1500 	}
1501 
1502 	ce_hist->enable[CE_id] = 0;
1503 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1504 	ce_hist->hist_ev[CE_id] = NULL;
1505 }
1506 
1507 #else
1508 
1509 static inline QDF_STATUS
1510 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1511 			   uint32_t src_nentries)
1512 {
1513 	return QDF_STATUS_SUCCESS;
1514 }
1515 
1516 static inline void
1517 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1518 #endif /* HIF_CE_DEBUG_DATA_BUF */
1519 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
1520 
1521 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1522 /**
1523  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1524  * CE records on the console using sysfs.
1525  * @scn: hif scn handle
1526  *
1527  * Return:
1528  */
1529 static inline void reset_ce_debug_history(struct hif_softc *scn)
1530 {
1531 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1532 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1533 	 * index. Disable data storing
1534 	 */
1535 	ce_hist->hist_index = 0;
1536 	ce_hist->hist_id = 0;
1537 }
1538 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1539 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
1540 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1541 
1542 void ce_enable_polling(void *cestate)
1543 {
1544 	struct CE_state *CE_state = (struct CE_state *)cestate;
1545 
1546 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1547 		CE_state->timer_inited = true;
1548 }
1549 
1550 void ce_disable_polling(void *cestate)
1551 {
1552 	struct CE_state *CE_state = (struct CE_state *)cestate;
1553 
1554 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1555 		CE_state->timer_inited = false;
1556 }
1557 
1558 /*
1559  * Initialize a Copy Engine based on caller-supplied attributes.
1560  * This may be called once to initialize both source and destination
1561  * rings or it may be called twice for separate source and destination
1562  * initialization. It may be that only one side or the other is
1563  * initialized by software/firmware.
1564  *
1565  * This should be called durring the initialization sequence before
1566  * interupts are enabled, so we don't have to worry about thread safety.
1567  */
1568 struct CE_handle *ce_init(struct hif_softc *scn,
1569 			  unsigned int CE_id, struct CE_attr *attr)
1570 {
1571 	struct CE_state *CE_state;
1572 	uint32_t ctrl_addr;
1573 	unsigned int nentries;
1574 	bool malloc_CE_state = false;
1575 	bool malloc_src_ring = false;
1576 	int status;
1577 
1578 	QDF_ASSERT(CE_id < scn->ce_count);
1579 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1580 	CE_state = scn->ce_id_to_state[CE_id];
1581 
1582 	if (!CE_state) {
1583 		CE_state =
1584 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1585 		if (!CE_state)
1586 			return NULL;
1587 
1588 		malloc_CE_state = true;
1589 		qdf_spinlock_create(&CE_state->ce_index_lock);
1590 
1591 		CE_state->id = CE_id;
1592 		CE_state->ctrl_addr = ctrl_addr;
1593 		CE_state->state = CE_RUNNING;
1594 		CE_state->attr_flags = attr->flags;
1595 	}
1596 	CE_state->scn = scn;
1597 	CE_state->service = ce_engine_service_reg;
1598 
1599 	qdf_atomic_init(&CE_state->rx_pending);
1600 	if (!attr) {
1601 		/* Already initialized; caller wants the handle */
1602 		return (struct CE_handle *)CE_state;
1603 	}
1604 
1605 	if (CE_state->src_sz_max)
1606 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1607 	else
1608 		CE_state->src_sz_max = attr->src_sz_max;
1609 
1610 	ce_init_ce_desc_event_log(scn, CE_id,
1611 				  attr->src_nentries + attr->dest_nentries);
1612 
1613 	/* source ring setup */
1614 	nentries = attr->src_nentries;
1615 	if (nentries) {
1616 		struct CE_ring_state *src_ring;
1617 
1618 		nentries = roundup_pwr2(nentries);
1619 		if (CE_state->src_ring) {
1620 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1621 		} else {
1622 			src_ring = CE_state->src_ring =
1623 				ce_alloc_ring_state(CE_state,
1624 						CE_RING_SRC,
1625 						nentries);
1626 			if (!src_ring) {
1627 				/* cannot allocate src ring. If the
1628 				 * CE_state is allocated locally free
1629 				 * CE_State and return error.
1630 				 */
1631 				HIF_ERROR("%s: src ring has no mem", __func__);
1632 				if (malloc_CE_state) {
1633 					/* allocated CE_state locally */
1634 					qdf_mem_free(CE_state);
1635 					malloc_CE_state = false;
1636 				}
1637 				return NULL;
1638 			}
1639 			/* we can allocate src ring. Mark that the src ring is
1640 			 * allocated locally
1641 			 */
1642 			malloc_src_ring = true;
1643 
1644 			/*
1645 			 * Also allocate a shadow src ring in
1646 			 * regular mem to use for faster access.
1647 			 */
1648 			src_ring->shadow_base_unaligned =
1649 				qdf_mem_malloc(nentries *
1650 					       sizeof(struct CE_src_desc) +
1651 					       CE_DESC_RING_ALIGN);
1652 			if (!src_ring->shadow_base_unaligned)
1653 				goto error_no_dma_mem;
1654 
1655 			src_ring->shadow_base = (struct CE_src_desc *)
1656 				(((size_t) src_ring->shadow_base_unaligned +
1657 				CE_DESC_RING_ALIGN - 1) &
1658 				 ~(CE_DESC_RING_ALIGN - 1));
1659 
1660 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1661 					       src_ring, attr);
1662 			if (status < 0)
1663 				goto error_target_access;
1664 
1665 			ce_ring_test_initial_indexes(CE_id, src_ring,
1666 						     "src_ring");
1667 		}
1668 	}
1669 
1670 	/* destination ring setup */
1671 	nentries = attr->dest_nentries;
1672 	if (nentries) {
1673 		struct CE_ring_state *dest_ring;
1674 
1675 		nentries = roundup_pwr2(nentries);
1676 		if (CE_state->dest_ring) {
1677 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1678 		} else {
1679 			dest_ring = CE_state->dest_ring =
1680 				ce_alloc_ring_state(CE_state,
1681 						CE_RING_DEST,
1682 						nentries);
1683 			if (!dest_ring) {
1684 				/* cannot allocate dst ring. If the CE_state
1685 				 * or src ring is allocated locally free
1686 				 * CE_State and src ring and return error.
1687 				 */
1688 				HIF_ERROR("%s: dest ring has no mem",
1689 					  __func__);
1690 				goto error_no_dma_mem;
1691 			}
1692 
1693 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1694 				      dest_ring, attr);
1695 			if (status < 0)
1696 				goto error_target_access;
1697 
1698 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1699 						     "dest_ring");
1700 
1701 			/* For srng based target, init status ring here */
1702 			if (ce_srng_based(CE_state->scn)) {
1703 				CE_state->status_ring =
1704 					ce_alloc_ring_state(CE_state,
1705 							CE_RING_STATUS,
1706 							nentries);
1707 				if (!CE_state->status_ring) {
1708 					/*Allocation failed. Cleanup*/
1709 					qdf_mem_free(CE_state->dest_ring);
1710 					if (malloc_src_ring) {
1711 						qdf_mem_free
1712 							(CE_state->src_ring);
1713 						CE_state->src_ring = NULL;
1714 						malloc_src_ring = false;
1715 					}
1716 					if (malloc_CE_state) {
1717 						/* allocated CE_state locally */
1718 						scn->ce_id_to_state[CE_id] =
1719 							NULL;
1720 						qdf_mem_free(CE_state);
1721 						malloc_CE_state = false;
1722 					}
1723 
1724 					return NULL;
1725 				}
1726 
1727 				status = ce_ring_setup(scn, CE_RING_STATUS,
1728 					       CE_id, CE_state->status_ring,
1729 					       attr);
1730 				if (status < 0)
1731 					goto error_target_access;
1732 
1733 			}
1734 
1735 			/* epping */
1736 			/* poll timer */
1737 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1738 				qdf_timer_init(scn->qdf_dev,
1739 						&CE_state->poll_timer,
1740 						ce_poll_timeout,
1741 						CE_state,
1742 						QDF_TIMER_TYPE_WAKE_APPS);
1743 				ce_enable_polling(CE_state);
1744 				qdf_timer_mod(&CE_state->poll_timer,
1745 						      CE_POLL_TIMEOUT);
1746 			}
1747 		}
1748 	}
1749 
1750 	if (!ce_srng_based(scn)) {
1751 		/* Enable CE error interrupts */
1752 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1753 			goto error_target_access;
1754 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1755 		if (Q_TARGET_ACCESS_END(scn) < 0)
1756 			goto error_target_access;
1757 	}
1758 
1759 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1760 			ce_oom_recovery, CE_state);
1761 
1762 	/* update the htt_data attribute */
1763 	ce_mark_datapath(CE_state);
1764 	scn->ce_id_to_state[CE_id] = CE_state;
1765 
1766 	alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
1767 
1768 	return (struct CE_handle *)CE_state;
1769 
1770 error_target_access:
1771 error_no_dma_mem:
1772 	ce_fini((struct CE_handle *)CE_state);
1773 	return NULL;
1774 }
1775 
1776 /**
1777  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1778  * @hif_ctx: HIF Context
1779  *
1780  * API to check if polling is enabled on all CEs. Returns true when polling
1781  * is enabled on all CEs.
1782  *
1783  * Return: bool
1784  */
1785 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1786 {
1787 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1788 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1789 	struct CE_attr *attr;
1790 	int id;
1791 
1792 	for (id = 0; id < scn->ce_count; id++) {
1793 		attr = &hif_state->host_ce_config[id];
1794 		if (attr && (attr->dest_nentries) &&
1795 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1796 			return false;
1797 	}
1798 	return true;
1799 }
1800 qdf_export_symbol(hif_is_polled_mode_enabled);
1801 
1802 #ifdef WLAN_FEATURE_FASTPATH
1803 /**
1804  * hif_enable_fastpath() Update that we have enabled fastpath mode
1805  * @hif_ctx: HIF context
1806  *
1807  * For use in data path
1808  *
1809  * Retrun: void
1810  */
1811 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1812 {
1813 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1814 
1815 	if (ce_srng_based(scn)) {
1816 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1817 		return;
1818 	}
1819 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1820 	scn->fastpath_mode_on = true;
1821 }
1822 
1823 /**
1824  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1825  * @hif_ctx: HIF Context
1826  *
1827  * For use in data path to skip HTC
1828  *
1829  * Return: bool
1830  */
1831 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1832 {
1833 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1834 
1835 	return scn->fastpath_mode_on;
1836 }
1837 
1838 /**
1839  * hif_get_ce_handle - API to get CE handle for FastPath mode
1840  * @hif_ctx: HIF Context
1841  * @id: CopyEngine Id
1842  *
1843  * API to return CE handle for fastpath mode
1844  *
1845  * Return: void
1846  */
1847 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1848 {
1849 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1850 
1851 	return scn->ce_id_to_state[id];
1852 }
1853 qdf_export_symbol(hif_get_ce_handle);
1854 
1855 /**
1856  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1857  * No processing is required inside this function.
1858  * @ce_hdl: Cope engine handle
1859  * Using an assert, this function makes sure that,
1860  * the TX CE has been processed completely.
1861  *
1862  * This is called while dismantling CE structures. No other thread
1863  * should be using these structures while dismantling is occurring
1864  * therfore no locking is needed.
1865  *
1866  * Return: none
1867  */
1868 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1869 {
1870 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1871 	struct CE_ring_state *src_ring = ce_state->src_ring;
1872 	struct hif_softc *sc = ce_state->scn;
1873 	uint32_t sw_index, write_index;
1874 
1875 	if (hif_is_nss_wifi_enabled(sc))
1876 		return;
1877 
1878 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1879 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1880 			 __func__, __LINE__);
1881 		sw_index = src_ring->sw_index;
1882 		write_index = src_ring->sw_index;
1883 
1884 		/* At this point Tx CE should be clean */
1885 		qdf_assert_always(sw_index == write_index);
1886 	}
1887 }
1888 
1889 /**
1890  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1891  * @ce_hdl: Handle to CE
1892  *
1893  * These buffers are never allocated on the fly, but
1894  * are allocated only once during HIF start and freed
1895  * only once during HIF stop.
1896  * NOTE:
1897  * The assumption here is there is no in-flight DMA in progress
1898  * currently, so that buffers can be freed up safely.
1899  *
1900  * Return: NONE
1901  */
1902 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1903 {
1904 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1905 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1906 	qdf_nbuf_t nbuf;
1907 	int i;
1908 
1909 	if (ce_state->scn->fastpath_mode_on == false)
1910 		return;
1911 
1912 	if (!ce_state->htt_rx_data)
1913 		return;
1914 
1915 	/*
1916 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1917 	 * this CE is completely full: does not leave one blank space, to
1918 	 * distinguish between empty queue & full queue. So free all the
1919 	 * entries.
1920 	 */
1921 	for (i = 0; i < dst_ring->nentries; i++) {
1922 		nbuf = dst_ring->per_transfer_context[i];
1923 
1924 		/*
1925 		 * The reasons for doing this check are:
1926 		 * 1) Protect against calling cleanup before allocating buffers
1927 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1928 		 *    could have a partially filled ring, because of a memory
1929 		 *    allocation failure in the middle of allocating ring.
1930 		 *    This check accounts for that case, checking
1931 		 *    fastpath_mode_on flag or started flag would not have
1932 		 *    covered that case. This is not in performance path,
1933 		 *    so OK to do this.
1934 		 */
1935 		if (nbuf) {
1936 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1937 					      QDF_DMA_FROM_DEVICE);
1938 			qdf_nbuf_free(nbuf);
1939 		}
1940 	}
1941 }
1942 
1943 /**
1944  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1945  * @scn: HIF handle
1946  *
1947  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1948  * Hence we have to post all the entries in the pipe, even, in the beginning
1949  * unlike for other CE pipes where one less than dest_nentries are filled in
1950  * the beginning.
1951  *
1952  * Return: None
1953  */
1954 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1955 {
1956 	int pipe_num;
1957 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1958 
1959 	if (scn->fastpath_mode_on == false)
1960 		return;
1961 
1962 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1963 		struct HIF_CE_pipe_info *pipe_info =
1964 			&hif_state->pipe_info[pipe_num];
1965 		struct CE_state *ce_state =
1966 			scn->ce_id_to_state[pipe_info->pipe_num];
1967 
1968 		if (ce_state->htt_rx_data)
1969 			atomic_inc(&pipe_info->recv_bufs_needed);
1970 	}
1971 }
1972 #else
1973 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1974 {
1975 }
1976 
1977 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1978 {
1979 	return false;
1980 }
1981 #endif /* WLAN_FEATURE_FASTPATH */
1982 
1983 void ce_fini(struct CE_handle *copyeng)
1984 {
1985 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1986 	unsigned int CE_id = CE_state->id;
1987 	struct hif_softc *scn = CE_state->scn;
1988 	uint32_t desc_size;
1989 
1990 	bool inited = CE_state->timer_inited;
1991 	CE_state->state = CE_UNUSED;
1992 	scn->ce_id_to_state[CE_id] = NULL;
1993 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1994 	ce_disable_polling(CE_state);
1995 
1996 	qdf_lro_deinit(CE_state->lro_data);
1997 
1998 	if (CE_state->src_ring) {
1999 		/* Cleanup the datapath Tx ring */
2000 		ce_h2t_tx_ce_cleanup(copyeng);
2001 
2002 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
2003 		if (CE_state->src_ring->shadow_base_unaligned)
2004 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
2005 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
2006 			ce_free_desc_ring(scn, CE_state->id,
2007 					  CE_state->src_ring,
2008 					  desc_size);
2009 		qdf_mem_free(CE_state->src_ring);
2010 	}
2011 	if (CE_state->dest_ring) {
2012 		/* Cleanup the datapath Rx ring */
2013 		ce_t2h_msg_ce_cleanup(copyeng);
2014 
2015 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
2016 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
2017 			ce_free_desc_ring(scn, CE_state->id,
2018 					  CE_state->dest_ring,
2019 					  desc_size);
2020 		qdf_mem_free(CE_state->dest_ring);
2021 
2022 		/* epping */
2023 		if (inited) {
2024 			qdf_timer_free(&CE_state->poll_timer);
2025 		}
2026 	}
2027 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
2028 		/* Cleanup the datapath Tx ring */
2029 		ce_h2t_tx_ce_cleanup(copyeng);
2030 
2031 		if (CE_state->status_ring->shadow_base_unaligned)
2032 			qdf_mem_free(
2033 				CE_state->status_ring->shadow_base_unaligned);
2034 
2035 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
2036 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
2037 			ce_free_desc_ring(scn, CE_state->id,
2038 					  CE_state->status_ring,
2039 					  desc_size);
2040 		qdf_mem_free(CE_state->status_ring);
2041 	}
2042 
2043 	free_mem_ce_debug_history(scn, CE_id);
2044 	reset_ce_debug_history(scn);
2045 	ce_deinit_ce_desc_event_log(scn, CE_id);
2046 
2047 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2048 	qdf_mem_free(CE_state);
2049 }
2050 
2051 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2052 {
2053 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2054 
2055 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2056 		  sizeof(hif_state->msg_callbacks_pending));
2057 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2058 		  sizeof(hif_state->msg_callbacks_current));
2059 }
2060 
2061 /* Send the first nbytes bytes of the buffer */
2062 QDF_STATUS
2063 hif_send_head(struct hif_opaque_softc *hif_ctx,
2064 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2065 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2066 {
2067 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2068 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2069 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2070 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2071 	int bytes = nbytes, nfrags = 0;
2072 	struct ce_sendlist sendlist;
2073 	int i = 0;
2074 	QDF_STATUS status;
2075 	unsigned int mux_id = 0;
2076 
2077 	if (nbytes > qdf_nbuf_len(nbuf)) {
2078 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
2079 			  (uint32_t)qdf_nbuf_len(nbuf));
2080 		QDF_ASSERT(0);
2081 	}
2082 
2083 	transfer_id =
2084 		(mux_id & MUX_ID_MASK) |
2085 		(transfer_id & TRANSACTION_ID_MASK);
2086 	data_attr &= DESC_DATA_FLAG_MASK;
2087 	/*
2088 	 * The common case involves sending multiple fragments within a
2089 	 * single download (the tx descriptor and the tx frame header).
2090 	 * So, optimize for the case of multiple fragments by not even
2091 	 * checking whether it's necessary to use a sendlist.
2092 	 * The overhead of using a sendlist for a single buffer download
2093 	 * is not a big deal, since it happens rarely (for WMI messages).
2094 	 */
2095 	ce_sendlist_init(&sendlist);
2096 	do {
2097 		qdf_dma_addr_t frag_paddr;
2098 		int frag_bytes;
2099 
2100 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2101 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2102 		/*
2103 		 * Clear the packet offset for all but the first CE desc.
2104 		 */
2105 		if (i++ > 0)
2106 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2107 
2108 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2109 				    frag_bytes >
2110 				    bytes ? bytes : frag_bytes,
2111 				    qdf_nbuf_get_frag_is_wordstream
2112 				    (nbuf,
2113 				    nfrags) ? 0 :
2114 				    CE_SEND_FLAG_SWAP_DISABLE,
2115 				    data_attr);
2116 		if (status != QDF_STATUS_SUCCESS) {
2117 			HIF_ERROR("%s: error, frag_num %d larger than limit",
2118 				__func__, nfrags);
2119 			return status;
2120 		}
2121 		bytes -= frag_bytes;
2122 		nfrags++;
2123 	} while (bytes > 0);
2124 
2125 	/* Make sure we have resources to handle this request */
2126 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2127 	if (pipe_info->num_sends_allowed < nfrags) {
2128 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2129 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2130 		return QDF_STATUS_E_RESOURCES;
2131 	}
2132 	pipe_info->num_sends_allowed -= nfrags;
2133 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2134 
2135 	if (qdf_unlikely(!ce_hdl)) {
2136 		HIF_ERROR("%s: error CE handle is null", __func__);
2137 		return A_ERROR;
2138 	}
2139 
2140 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2141 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2142 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2143 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2144 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2145 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2146 
2147 	return status;
2148 }
2149 
2150 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2151 								int force)
2152 {
2153 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2154 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2155 
2156 	if (!force) {
2157 		int resources;
2158 		/*
2159 		 * Decide whether to actually poll for completions, or just
2160 		 * wait for a later chance. If there seem to be plenty of
2161 		 * resources left, then just wait, since checking involves
2162 		 * reading a CE register, which is a relatively expensive
2163 		 * operation.
2164 		 */
2165 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2166 		/*
2167 		 * If at least 50% of the total resources are still available,
2168 		 * don't bother checking again yet.
2169 		 */
2170 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2171 									 1))
2172 			return;
2173 	}
2174 #if ATH_11AC_TXCOMPACT
2175 	ce_per_engine_servicereap(scn, pipe);
2176 #else
2177 	ce_per_engine_service(scn, pipe);
2178 #endif
2179 }
2180 
2181 uint16_t
2182 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2183 {
2184 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2185 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2186 	uint16_t rv;
2187 
2188 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2189 	rv = pipe_info->num_sends_allowed;
2190 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2191 	return rv;
2192 }
2193 
2194 /* Called by lower (CE) layer when a send to Target completes. */
2195 static void
2196 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2197 		     void *transfer_context, qdf_dma_addr_t CE_data,
2198 		     unsigned int nbytes, unsigned int transfer_id,
2199 		     unsigned int sw_index, unsigned int hw_index,
2200 		     unsigned int toeplitz_hash_result)
2201 {
2202 	struct HIF_CE_pipe_info *pipe_info =
2203 		(struct HIF_CE_pipe_info *)ce_context;
2204 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2205 	struct hif_msg_callbacks *msg_callbacks =
2206 		&pipe_info->pipe_callbacks;
2207 
2208 	do {
2209 		/*
2210 		 * The upper layer callback will be triggered
2211 		 * when last fragment is complteted.
2212 		 */
2213 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2214 			msg_callbacks->txCompletionHandler(
2215 				msg_callbacks->Context,
2216 				transfer_context, transfer_id,
2217 				toeplitz_hash_result);
2218 
2219 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2220 		pipe_info->num_sends_allowed++;
2221 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2222 	} while (ce_completed_send_next(copyeng,
2223 			&ce_context, &transfer_context,
2224 			&CE_data, &nbytes, &transfer_id,
2225 			&sw_idx, &hw_idx,
2226 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2227 }
2228 
2229 /**
2230  * hif_ce_do_recv(): send message from copy engine to upper layers
2231  * @msg_callbacks: structure containing callback and callback context
2232  * @netbuff: skb containing message
2233  * @nbytes: number of bytes in the message
2234  * @pipe_info: used for the pipe_number info
2235  *
2236  * Checks the packet length, configures the length in the netbuff,
2237  * and calls the upper layer callback.
2238  *
2239  * return: None
2240  */
2241 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2242 		qdf_nbuf_t netbuf, int nbytes,
2243 		struct HIF_CE_pipe_info *pipe_info) {
2244 	if (nbytes <= pipe_info->buf_sz) {
2245 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2246 		msg_callbacks->
2247 			rxCompletionHandler(msg_callbacks->Context,
2248 					netbuf, pipe_info->pipe_num);
2249 	} else {
2250 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
2251 				__func__, netbuf, nbytes);
2252 
2253 		qdf_nbuf_free(netbuf);
2254 	}
2255 }
2256 
2257 /* Called by lower (CE) layer when data is received from the Target. */
2258 static void
2259 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2260 		     void *transfer_context, qdf_dma_addr_t CE_data,
2261 		     unsigned int nbytes, unsigned int transfer_id,
2262 		     unsigned int flags)
2263 {
2264 	struct HIF_CE_pipe_info *pipe_info =
2265 		(struct HIF_CE_pipe_info *)ce_context;
2266 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2267 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2268 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2269 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2270 	struct hif_msg_callbacks *msg_callbacks =
2271 		 &pipe_info->pipe_callbacks;
2272 
2273 	do {
2274 		hif_pm_runtime_mark_last_busy(hif_ctx);
2275 		qdf_nbuf_unmap_single(scn->qdf_dev,
2276 				      (qdf_nbuf_t) transfer_context,
2277 				      QDF_DMA_FROM_DEVICE);
2278 
2279 		atomic_inc(&pipe_info->recv_bufs_needed);
2280 		hif_post_recv_buffers_for_pipe(pipe_info);
2281 		if (scn->target_status == TARGET_STATUS_RESET)
2282 			qdf_nbuf_free(transfer_context);
2283 		else
2284 			hif_ce_do_recv(msg_callbacks, transfer_context,
2285 				nbytes, pipe_info);
2286 
2287 		/* Set up force_break flag if num of receices reaches
2288 		 * MAX_NUM_OF_RECEIVES
2289 		 */
2290 		ce_state->receive_count++;
2291 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2292 			ce_state->force_break = 1;
2293 			break;
2294 		}
2295 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2296 					&CE_data, &nbytes, &transfer_id,
2297 					&flags) == QDF_STATUS_SUCCESS);
2298 
2299 }
2300 
2301 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2302 
2303 void
2304 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2305 	      struct hif_msg_callbacks *callbacks)
2306 {
2307 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2308 
2309 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2310 	spin_lock_init(&pcie_access_log_lock);
2311 #endif
2312 	/* Save callbacks for later installation */
2313 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2314 		 sizeof(hif_state->msg_callbacks_pending));
2315 
2316 }
2317 
2318 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2319 {
2320 	struct CE_handle *ce_diag = hif_state->ce_diag;
2321 	int pipe_num;
2322 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2323 	struct hif_msg_callbacks *hif_msg_callbacks =
2324 		&hif_state->msg_callbacks_current;
2325 
2326 	/* daemonize("hif_compl_thread"); */
2327 
2328 	if (scn->ce_count == 0) {
2329 		HIF_ERROR("%s: Invalid ce_count", __func__);
2330 		return -EINVAL;
2331 	}
2332 
2333 	if (!hif_msg_callbacks ||
2334 			!hif_msg_callbacks->rxCompletionHandler ||
2335 			!hif_msg_callbacks->txCompletionHandler) {
2336 		HIF_ERROR("%s: no completion handler registered", __func__);
2337 		return -EFAULT;
2338 	}
2339 
2340 	A_TARGET_ACCESS_LIKELY(scn);
2341 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2342 		struct CE_attr attr;
2343 		struct HIF_CE_pipe_info *pipe_info;
2344 
2345 		pipe_info = &hif_state->pipe_info[pipe_num];
2346 		if (pipe_info->ce_hdl == ce_diag)
2347 			continue;       /* Handle Diagnostic CE specially */
2348 		attr = hif_state->host_ce_config[pipe_num];
2349 		if (attr.src_nentries) {
2350 			/* pipe used to send to target */
2351 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2352 					 __func__, pipe_num, pipe_info);
2353 			ce_send_cb_register(pipe_info->ce_hdl,
2354 					    hif_pci_ce_send_done, pipe_info,
2355 					    attr.flags & CE_ATTR_DISABLE_INTR);
2356 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2357 		}
2358 		if (attr.dest_nentries) {
2359 			/* pipe used to receive from target */
2360 			ce_recv_cb_register(pipe_info->ce_hdl,
2361 					    hif_pci_ce_recv_data, pipe_info,
2362 					    attr.flags & CE_ATTR_DISABLE_INTR);
2363 		}
2364 
2365 		if (attr.src_nentries)
2366 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2367 
2368 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2369 					sizeof(pipe_info->pipe_callbacks));
2370 	}
2371 
2372 	A_TARGET_ACCESS_UNLIKELY(scn);
2373 	return 0;
2374 }
2375 
2376 /*
2377  * Install pending msg callbacks.
2378  *
2379  * TBDXXX: This hack is needed because upper layers install msg callbacks
2380  * for use with HTC before BMI is done; yet this HIF implementation
2381  * needs to continue to use BMI msg callbacks. Really, upper layers
2382  * should not register HTC callbacks until AFTER BMI phase.
2383  */
2384 static void hif_msg_callbacks_install(struct hif_softc *scn)
2385 {
2386 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2387 
2388 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2389 		 &hif_state->msg_callbacks_pending,
2390 		 sizeof(hif_state->msg_callbacks_pending));
2391 }
2392 
2393 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2394 							uint8_t *DLPipe)
2395 {
2396 	int ul_is_polled, dl_is_polled;
2397 
2398 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2399 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2400 }
2401 
2402 /**
2403  * hif_dump_pipe_debug_count() - Log error count
2404  * @scn: hif_softc pointer.
2405  *
2406  * Output the pipe error counts of each pipe to log file
2407  *
2408  * Return: N/A
2409  */
2410 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2411 {
2412 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2413 	int pipe_num;
2414 
2415 	if (!hif_state) {
2416 		HIF_ERROR("%s hif_state is NULL", __func__);
2417 		return;
2418 	}
2419 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2420 		struct HIF_CE_pipe_info *pipe_info;
2421 
2422 	pipe_info = &hif_state->pipe_info[pipe_num];
2423 
2424 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2425 			pipe_info->nbuf_dma_err_count > 0 ||
2426 			pipe_info->nbuf_ce_enqueue_err_count)
2427 		HIF_ERROR(
2428 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2429 			__func__, pipe_info->pipe_num,
2430 			atomic_read(&pipe_info->recv_bufs_needed),
2431 			pipe_info->nbuf_alloc_err_count,
2432 			pipe_info->nbuf_dma_err_count,
2433 			pipe_info->nbuf_ce_enqueue_err_count);
2434 	}
2435 }
2436 
2437 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2438 					  void *nbuf, uint32_t *error_cnt,
2439 					  enum hif_ce_event_type failure_type,
2440 					  const char *failure_type_string)
2441 {
2442 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2443 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2444 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2445 	int ce_id = CE_state->id;
2446 	uint32_t error_cnt_tmp;
2447 
2448 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2449 	error_cnt_tmp = ++(*error_cnt);
2450 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2451 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2452 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2453 		  failure_type_string);
2454 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2455 				 NULL, nbuf, bufs_needed_tmp, 0);
2456 	/* if we fail to allocate the last buffer for an rx pipe,
2457 	 *	there is no trigger to refill the ce and we will
2458 	 *	eventually crash
2459 	 */
2460 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1 ||
2461 	    (ce_srng_based(scn) &&
2462 	     bufs_needed_tmp == CE_state->dest_ring->nentries - 2))
2463 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2464 
2465 }
2466 
2467 
2468 
2469 
2470 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2471 {
2472 	struct CE_handle *ce_hdl;
2473 	qdf_size_t buf_sz;
2474 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2475 	QDF_STATUS status;
2476 	uint32_t bufs_posted = 0;
2477 	unsigned int ce_id;
2478 
2479 	buf_sz = pipe_info->buf_sz;
2480 	if (buf_sz == 0) {
2481 		/* Unused Copy Engine */
2482 		return QDF_STATUS_SUCCESS;
2483 	}
2484 
2485 	ce_hdl = pipe_info->ce_hdl;
2486 	ce_id = ((struct CE_state *)ce_hdl)->id;
2487 
2488 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2489 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2490 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2491 		qdf_nbuf_t nbuf;
2492 
2493 		atomic_dec(&pipe_info->recv_bufs_needed);
2494 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2495 
2496 		hif_record_ce_desc_event(scn, ce_id,
2497 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
2498 					 0, 0);
2499 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2500 		if (!nbuf) {
2501 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2502 					&pipe_info->nbuf_alloc_err_count,
2503 					 HIF_RX_NBUF_ALLOC_FAILURE,
2504 					"HIF_RX_NBUF_ALLOC_FAILURE");
2505 			return QDF_STATUS_E_NOMEM;
2506 		}
2507 
2508 		hif_record_ce_desc_event(scn, ce_id,
2509 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
2510 					 0, 0);
2511 		/*
2512 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2513 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2514 		 * DMA_FROM_DEVICE);
2515 		 */
2516 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2517 					    QDF_DMA_FROM_DEVICE);
2518 
2519 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2520 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2521 					&pipe_info->nbuf_dma_err_count,
2522 					 HIF_RX_NBUF_MAP_FAILURE,
2523 					"HIF_RX_NBUF_MAP_FAILURE");
2524 			qdf_nbuf_free(nbuf);
2525 			return status;
2526 		}
2527 
2528 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2529 		hif_record_ce_desc_event(scn, ce_id,
2530 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
2531 					 0, 0);
2532 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2533 					       buf_sz, DMA_FROM_DEVICE);
2534 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2535 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2536 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2537 					&pipe_info->nbuf_ce_enqueue_err_count,
2538 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2539 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2540 
2541 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2542 						QDF_DMA_FROM_DEVICE);
2543 			qdf_nbuf_free(nbuf);
2544 			return status;
2545 		}
2546 
2547 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2548 		bufs_posted++;
2549 	}
2550 	pipe_info->nbuf_alloc_err_count =
2551 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2552 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2553 	pipe_info->nbuf_dma_err_count =
2554 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2555 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2556 	pipe_info->nbuf_ce_enqueue_err_count =
2557 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2558 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2559 
2560 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2561 
2562 	return QDF_STATUS_SUCCESS;
2563 }
2564 
2565 /*
2566  * Try to post all desired receive buffers for all pipes.
2567  * Returns 0 for non fastpath rx copy engine as
2568  * oom_allocation_work will be scheduled to recover any
2569  * failures, non-zero if unable to completely replenish
2570  * receive buffers for fastpath rx Copy engine.
2571  */
2572 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2573 {
2574 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2575 	int pipe_num;
2576 	struct CE_state *ce_state = NULL;
2577 	QDF_STATUS qdf_status;
2578 
2579 	A_TARGET_ACCESS_LIKELY(scn);
2580 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2581 		struct HIF_CE_pipe_info *pipe_info;
2582 
2583 		ce_state = scn->ce_id_to_state[pipe_num];
2584 		pipe_info = &hif_state->pipe_info[pipe_num];
2585 
2586 		if (hif_is_nss_wifi_enabled(scn) &&
2587 		    ce_state && (ce_state->htt_rx_data))
2588 			continue;
2589 
2590 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2591 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2592 			ce_state->htt_rx_data &&
2593 			scn->fastpath_mode_on) {
2594 			A_TARGET_ACCESS_UNLIKELY(scn);
2595 			return qdf_status;
2596 		}
2597 	}
2598 
2599 	A_TARGET_ACCESS_UNLIKELY(scn);
2600 
2601 	return QDF_STATUS_SUCCESS;
2602 }
2603 
2604 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2605 {
2606 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2607 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2608 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2609 
2610 	hif_update_fastpath_recv_bufs_cnt(scn);
2611 
2612 	hif_msg_callbacks_install(scn);
2613 
2614 	if (hif_completion_thread_startup(hif_state))
2615 		return QDF_STATUS_E_FAILURE;
2616 
2617 	/* enable buffer cleanup */
2618 	hif_state->started = true;
2619 
2620 	/* Post buffers once to start things off. */
2621 	qdf_status = hif_post_recv_buffers(scn);
2622 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2623 		/* cleanup is done in hif_ce_disable */
2624 		HIF_ERROR("%s:failed to post buffers", __func__);
2625 		return qdf_status;
2626 	}
2627 
2628 	return qdf_status;
2629 }
2630 
2631 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2632 {
2633 	struct hif_softc *scn;
2634 	struct CE_handle *ce_hdl;
2635 	uint32_t buf_sz;
2636 	struct HIF_CE_state *hif_state;
2637 	qdf_nbuf_t netbuf;
2638 	qdf_dma_addr_t CE_data;
2639 	void *per_CE_context;
2640 
2641 	buf_sz = pipe_info->buf_sz;
2642 	/* Unused Copy Engine */
2643 	if (buf_sz == 0)
2644 		return;
2645 
2646 
2647 	hif_state = pipe_info->HIF_CE_state;
2648 	if (!hif_state->started)
2649 		return;
2650 
2651 	scn = HIF_GET_SOFTC(hif_state);
2652 	ce_hdl = pipe_info->ce_hdl;
2653 
2654 	if (!scn->qdf_dev)
2655 		return;
2656 	while (ce_revoke_recv_next
2657 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2658 			&CE_data) == QDF_STATUS_SUCCESS) {
2659 		if (netbuf) {
2660 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2661 					      QDF_DMA_FROM_DEVICE);
2662 			qdf_nbuf_free(netbuf);
2663 		}
2664 	}
2665 }
2666 
2667 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2668 {
2669 	struct CE_handle *ce_hdl;
2670 	struct HIF_CE_state *hif_state;
2671 	struct hif_softc *scn;
2672 	qdf_nbuf_t netbuf;
2673 	void *per_CE_context;
2674 	qdf_dma_addr_t CE_data;
2675 	unsigned int nbytes;
2676 	unsigned int id;
2677 	uint32_t buf_sz;
2678 	uint32_t toeplitz_hash_result;
2679 
2680 	buf_sz = pipe_info->buf_sz;
2681 	if (buf_sz == 0) {
2682 		/* Unused Copy Engine */
2683 		return;
2684 	}
2685 
2686 	hif_state = pipe_info->HIF_CE_state;
2687 	if (!hif_state->started) {
2688 		return;
2689 	}
2690 
2691 	scn = HIF_GET_SOFTC(hif_state);
2692 
2693 	ce_hdl = pipe_info->ce_hdl;
2694 
2695 	while (ce_cancel_send_next
2696 		       (ce_hdl, &per_CE_context,
2697 		       (void **)&netbuf, &CE_data, &nbytes,
2698 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2699 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2700 			/*
2701 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2702 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2703 			 * freed in htt_htc_misc_pkt_pool_free() in
2704 			 * wlantl_close(), so do not free them here again
2705 			 * by checking whether it's the endpoint
2706 			 * which they are queued in.
2707 			 */
2708 			if (id == scn->htc_htt_tx_endpoint)
2709 				return;
2710 			/* Indicate the completion to higher
2711 			 * layer to free the buffer
2712 			 */
2713 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2714 				pipe_info->pipe_callbacks.
2715 				    txCompletionHandler(pipe_info->
2716 					    pipe_callbacks.Context,
2717 					    netbuf, id, toeplitz_hash_result);
2718 		}
2719 	}
2720 }
2721 
2722 /*
2723  * Cleanup residual buffers for device shutdown:
2724  *    buffers that were enqueued for receive
2725  *    buffers that were to be sent
2726  * Note: Buffers that had completed but which were
2727  * not yet processed are on a completion queue. They
2728  * are handled when the completion thread shuts down.
2729  */
2730 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2731 {
2732 	int pipe_num;
2733 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2734 	struct CE_state *ce_state;
2735 
2736 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2737 		struct HIF_CE_pipe_info *pipe_info;
2738 
2739 		ce_state = scn->ce_id_to_state[pipe_num];
2740 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2741 				((ce_state->htt_tx_data) ||
2742 				 (ce_state->htt_rx_data))) {
2743 			continue;
2744 		}
2745 
2746 		pipe_info = &hif_state->pipe_info[pipe_num];
2747 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2748 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2749 	}
2750 }
2751 
2752 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2753 {
2754 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2755 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2756 
2757 	hif_buffer_cleanup(hif_state);
2758 }
2759 
2760 static void hif_destroy_oom_work(struct hif_softc *scn)
2761 {
2762 	struct CE_state *ce_state;
2763 	int ce_id;
2764 
2765 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2766 		ce_state = scn->ce_id_to_state[ce_id];
2767 		if (ce_state)
2768 			qdf_destroy_work(scn->qdf_dev,
2769 					 &ce_state->oom_allocation_work);
2770 	}
2771 }
2772 
2773 void hif_ce_stop(struct hif_softc *scn)
2774 {
2775 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2776 	int pipe_num;
2777 
2778 	/*
2779 	 * before cleaning up any memory, ensure irq &
2780 	 * bottom half contexts will not be re-entered
2781 	 */
2782 	hif_disable_isr(&scn->osc);
2783 	hif_destroy_oom_work(scn);
2784 	scn->hif_init_done = false;
2785 
2786 	/*
2787 	 * At this point, asynchronous threads are stopped,
2788 	 * The Target should not DMA nor interrupt, Host code may
2789 	 * not initiate anything more.  So we just need to clean
2790 	 * up Host-side state.
2791 	 */
2792 
2793 	if (scn->athdiag_procfs_inited) {
2794 		athdiag_procfs_remove();
2795 		scn->athdiag_procfs_inited = false;
2796 	}
2797 
2798 	hif_buffer_cleanup(hif_state);
2799 
2800 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2801 		struct HIF_CE_pipe_info *pipe_info;
2802 		struct CE_attr attr;
2803 		struct CE_handle *ce_diag = hif_state->ce_diag;
2804 
2805 		pipe_info = &hif_state->pipe_info[pipe_num];
2806 		if (pipe_info->ce_hdl) {
2807 			if (pipe_info->ce_hdl != ce_diag &&
2808 			    hif_state->started) {
2809 				attr = hif_state->host_ce_config[pipe_num];
2810 				if (attr.src_nentries)
2811 					qdf_spinlock_destroy(&pipe_info->
2812 							completion_freeq_lock);
2813 			}
2814 			ce_fini(pipe_info->ce_hdl);
2815 			pipe_info->ce_hdl = NULL;
2816 			pipe_info->buf_sz = 0;
2817 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2818 		}
2819 	}
2820 
2821 	if (hif_state->sleep_timer_init) {
2822 		qdf_timer_stop(&hif_state->sleep_timer);
2823 		qdf_timer_free(&hif_state->sleep_timer);
2824 		hif_state->sleep_timer_init = false;
2825 	}
2826 
2827 	hif_state->started = false;
2828 }
2829 
2830 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2831 				   struct shadow_reg_cfg
2832 				   **target_shadow_reg_cfg_ret,
2833 				   uint32_t *shadow_cfg_sz_ret)
2834 {
2835 	if (target_shadow_reg_cfg_ret)
2836 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2837 	if (shadow_cfg_sz_ret)
2838 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2839 }
2840 
2841 /**
2842  * hif_get_target_ce_config() - get copy engine configuration
2843  * @target_ce_config_ret: basic copy engine configuration
2844  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2845  * @target_service_to_ce_map_ret: service mapping for the copy engines
2846  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2847  * @target_shadow_reg_cfg_ret: shadow register configuration
2848  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2849  *
2850  * providing accessor to these values outside of this file.
2851  * currently these are stored in static pointers to const sections.
2852  * there are multiple configurations that are selected from at compile time.
2853  * Runtime selection would need to consider mode, target type and bus type.
2854  *
2855  * Return: return by parameter.
2856  */
2857 void hif_get_target_ce_config(struct hif_softc *scn,
2858 		struct CE_pipe_config **target_ce_config_ret,
2859 		uint32_t *target_ce_config_sz_ret,
2860 		struct service_to_pipe **target_service_to_ce_map_ret,
2861 		uint32_t *target_service_to_ce_map_sz_ret,
2862 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2863 		uint32_t *shadow_cfg_sz_ret)
2864 {
2865 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2866 
2867 	*target_ce_config_ret = hif_state->target_ce_config;
2868 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2869 
2870 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2871 				       target_service_to_ce_map_sz_ret);
2872 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2873 			       shadow_cfg_sz_ret);
2874 }
2875 
2876 #ifdef CONFIG_SHADOW_V2
2877 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2878 {
2879 	int i;
2880 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2881 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2882 
2883 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2884 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2885 		     "%s: i %d, val %x", __func__, i,
2886 		     cfg->shadow_reg_v2_cfg[i].addr);
2887 	}
2888 }
2889 
2890 #else
2891 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2892 {
2893 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2894 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2895 }
2896 #endif
2897 
2898 #ifdef ADRASTEA_RRI_ON_DDR
2899 /**
2900  * hif_get_src_ring_read_index(): Called to get the SRRI
2901  *
2902  * @scn: hif_softc pointer
2903  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2904  *
2905  * This function returns the SRRI to the caller. For CEs that
2906  * dont have interrupts enabled, we look at the DDR based SRRI
2907  *
2908  * Return: SRRI
2909  */
2910 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2911 		uint32_t CE_ctrl_addr)
2912 {
2913 	struct CE_attr attr;
2914 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2915 
2916 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2917 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2918 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2919 	} else {
2920 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2921 			return A_TARGET_READ(scn,
2922 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2923 		else
2924 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2925 					CE_ctrl_addr);
2926 	}
2927 }
2928 
2929 /**
2930  * hif_get_dst_ring_read_index(): Called to get the DRRI
2931  *
2932  * @scn: hif_softc pointer
2933  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2934  *
2935  * This function returns the DRRI to the caller. For CEs that
2936  * dont have interrupts enabled, we look at the DDR based DRRI
2937  *
2938  * Return: DRRI
2939  */
2940 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2941 		uint32_t CE_ctrl_addr)
2942 {
2943 	struct CE_attr attr;
2944 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2945 
2946 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2947 
2948 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2949 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2950 	} else {
2951 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2952 			return A_TARGET_READ(scn,
2953 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2954 		else
2955 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2956 					CE_ctrl_addr);
2957 	}
2958 }
2959 
2960 /**
2961  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2962  * @scn: hif_softc pointer
2963  *
2964  * Return: qdf status
2965  */
2966 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2967 {
2968 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
2969 
2970 	scn->vaddr_rri_on_ddr =
2971 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2972 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2973 		&paddr_rri_on_ddr);
2974 
2975 	if (!scn->vaddr_rri_on_ddr) {
2976 		hif_err("dmaable page alloc fail");
2977 		return QDF_STATUS_E_NOMEM;
2978 	}
2979 
2980 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2981 
2982 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2983 
2984 	return QDF_STATUS_SUCCESS;
2985 }
2986 #endif
2987 
2988 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2989 /**
2990  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2991  *
2992  * @scn: hif_softc pointer
2993  *
2994  * This function allocates non cached memory on ddr and sends
2995  * the physical address of this memory to the CE hardware. The
2996  * hardware updates the RRI on this particular location.
2997  *
2998  * Return: None
2999  */
3000 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3001 {
3002 	unsigned int i;
3003 	uint32_t high_paddr, low_paddr;
3004 
3005 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3006 		return;
3007 
3008 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
3009 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
3010 
3011 	HIF_DBG("%s using srri and drri from DDR", __func__);
3012 
3013 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3014 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3015 
3016 	for (i = 0; i < CE_COUNT; i++)
3017 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3018 }
3019 #else
3020 /**
3021  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3022  *
3023  * @scn: hif_softc pointer
3024  *
3025  * This is a dummy implementation for platforms that don't
3026  * support this functionality.
3027  *
3028  * Return: None
3029  */
3030 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3031 {
3032 }
3033 #endif
3034 
3035 /**
3036  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
3037  *                                    QMI command
3038  * @scn: hif context
3039  * @cfg: wlan enable config
3040  *
3041  * In case of Genoa, rri_over_ddr memory configuration is passed
3042  * to firmware through QMI configure command.
3043  */
3044 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3045 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3046 					   struct pld_wlan_enable_cfg *cfg)
3047 {
3048 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3049 		return;
3050 
3051 	cfg->rri_over_ddr_cfg_valid = true;
3052 	cfg->rri_over_ddr_cfg.base_addr_low =
3053 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3054 	cfg->rri_over_ddr_cfg.base_addr_high =
3055 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3056 }
3057 #else
3058 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3059 					   struct pld_wlan_enable_cfg *cfg)
3060 {
3061 }
3062 #endif
3063 
3064 /**
3065  * hif_wlan_enable(): call the platform driver to enable wlan
3066  * @scn: HIF Context
3067  *
3068  * This function passes the con_mode and CE configuration to
3069  * platform driver to enable wlan.
3070  *
3071  * Return: linux error code
3072  */
3073 int hif_wlan_enable(struct hif_softc *scn)
3074 {
3075 	struct pld_wlan_enable_cfg cfg;
3076 	enum pld_driver_mode mode;
3077 	uint32_t con_mode = hif_get_conparam(scn);
3078 
3079 	hif_get_target_ce_config(scn,
3080 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3081 			&cfg.num_ce_tgt_cfg,
3082 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3083 			&cfg.num_ce_svc_pipe_cfg,
3084 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3085 			&cfg.num_shadow_reg_cfg);
3086 
3087 	/* translate from structure size to array size */
3088 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3089 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3090 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3091 
3092 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3093 			      &cfg.num_shadow_reg_v2_cfg);
3094 
3095 	hif_print_hal_shadow_register_cfg(&cfg);
3096 
3097 	hif_update_rri_over_ddr_config(scn, &cfg);
3098 
3099 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3100 		mode = PLD_FTM;
3101 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3102 		mode = PLD_COLDBOOT_CALIBRATION;
3103 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3104 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3105 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3106 		mode = PLD_EPPING;
3107 	else
3108 		mode = PLD_MISSION;
3109 
3110 	if (BYPASS_QMI)
3111 		return 0;
3112 	else
3113 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3114 }
3115 
3116 #ifdef WLAN_FEATURE_EPPING
3117 
3118 #define CE_EPPING_USES_IRQ true
3119 
3120 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3121 {
3122 	if (CE_EPPING_USES_IRQ)
3123 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3124 	else
3125 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3126 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3127 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3128 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3129 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3130 }
3131 #endif
3132 
3133 #ifdef QCN7605_SUPPORT
3134 static inline
3135 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3136 			       struct HIF_CE_state *hif_state)
3137 {
3138 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3139 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3140 	hif_state->target_ce_config_sz =
3141 				 sizeof(target_ce_config_wlan_qcn7605);
3142 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3143 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3144 	scn->ce_count = QCN7605_CE_COUNT;
3145 }
3146 #else
3147 static inline
3148 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3149 			       struct HIF_CE_state *hif_state)
3150 {
3151 	HIF_ERROR("QCN7605 not supported");
3152 }
3153 #endif
3154 
3155 #ifdef CE_SVC_CMN_INIT
3156 #ifdef QCA_WIFI_SUPPORT_SRNG
3157 static inline void hif_ce_service_init(void)
3158 {
3159 	ce_service_srng_init();
3160 }
3161 #else
3162 static inline void hif_ce_service_init(void)
3163 {
3164 	ce_service_legacy_init();
3165 }
3166 #endif
3167 #else
3168 static inline void hif_ce_service_init(void)
3169 {
3170 }
3171 #endif
3172 
3173 
3174 /**
3175  * hif_ce_prepare_config() - load the correct static tables.
3176  * @scn: hif context
3177  *
3178  * Epping uses different static attribute tables than mission mode.
3179  */
3180 void hif_ce_prepare_config(struct hif_softc *scn)
3181 {
3182 	uint32_t mode = hif_get_conparam(scn);
3183 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3184 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3185 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3186 
3187 	hif_ce_service_init();
3188 	hif_state->ce_services = ce_services_attach(scn);
3189 
3190 	scn->ce_count = HOST_CE_COUNT;
3191 	/* if epping is enabled we need to use the epping configuration. */
3192 	if (QDF_IS_EPPING_ENABLED(mode)) {
3193 		hif_ce_prepare_epping_config(hif_state);
3194 		return;
3195 	}
3196 
3197 	switch (tgt_info->target_type) {
3198 	default:
3199 		hif_state->host_ce_config = host_ce_config_wlan;
3200 		hif_state->target_ce_config = target_ce_config_wlan;
3201 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3202 		break;
3203 	case TARGET_TYPE_QCN7605:
3204 		hif_set_ce_config_qcn7605(scn, hif_state);
3205 		break;
3206 	case TARGET_TYPE_AR900B:
3207 	case TARGET_TYPE_QCA9984:
3208 	case TARGET_TYPE_IPQ4019:
3209 	case TARGET_TYPE_QCA9888:
3210 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3211 			hif_state->host_ce_config =
3212 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3213 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3214 			hif_state->host_ce_config =
3215 				host_lowdesc_ce_cfg_wlan_ar900b;
3216 		} else {
3217 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3218 		}
3219 
3220 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3221 		hif_state->target_ce_config_sz =
3222 				sizeof(target_ce_config_wlan_ar900b);
3223 
3224 		break;
3225 
3226 	case TARGET_TYPE_AR9888:
3227 	case TARGET_TYPE_AR9888V2:
3228 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3229 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3230 		} else {
3231 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3232 		}
3233 
3234 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3235 		hif_state->target_ce_config_sz =
3236 					sizeof(target_ce_config_wlan_ar9888);
3237 
3238 		break;
3239 
3240 	case TARGET_TYPE_QCA8074:
3241 	case TARGET_TYPE_QCA8074V2:
3242 	case TARGET_TYPE_QCA6018:
3243 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3244 			hif_state->host_ce_config =
3245 					host_ce_config_wlan_qca8074_pci;
3246 			hif_state->target_ce_config =
3247 				target_ce_config_wlan_qca8074_pci;
3248 			hif_state->target_ce_config_sz =
3249 				sizeof(target_ce_config_wlan_qca8074_pci);
3250 		} else {
3251 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3252 			hif_state->target_ce_config =
3253 					target_ce_config_wlan_qca8074;
3254 			hif_state->target_ce_config_sz =
3255 				sizeof(target_ce_config_wlan_qca8074);
3256 		}
3257 		break;
3258 	case TARGET_TYPE_QCA6290:
3259 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3260 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3261 		hif_state->target_ce_config_sz =
3262 					sizeof(target_ce_config_wlan_qca6290);
3263 
3264 		scn->ce_count = QCA_6290_CE_COUNT;
3265 		break;
3266 	case TARGET_TYPE_QCN9000:
3267 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3268 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3269 		hif_state->target_ce_config_sz =
3270 					sizeof(target_ce_config_wlan_qcn9000);
3271 		scn->ce_count = QCN_9000_CE_COUNT;
3272 		scn->disable_wake_irq = 1;
3273 		break;
3274 	case TARGET_TYPE_QCA5018:
3275 		hif_state->host_ce_config = host_ce_config_wlan_qca5018;
3276 		hif_state->target_ce_config = target_ce_config_wlan_qca5018;
3277 		hif_state->target_ce_config_sz =
3278 					sizeof(target_ce_config_wlan_qca5018);
3279 		scn->ce_count = QCA_5018_CE_COUNT;
3280 		break;
3281 	case TARGET_TYPE_QCA6390:
3282 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3283 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3284 		hif_state->target_ce_config_sz =
3285 					sizeof(target_ce_config_wlan_qca6390);
3286 
3287 		scn->ce_count = QCA_6390_CE_COUNT;
3288 		break;
3289 	case TARGET_TYPE_QCA6490:
3290 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3291 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3292 		hif_state->target_ce_config_sz =
3293 					sizeof(target_ce_config_wlan_qca6490);
3294 
3295 		scn->ce_count = QCA_6490_CE_COUNT;
3296 		break;
3297 	case TARGET_TYPE_QCA6750:
3298 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
3299 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
3300 		hif_state->target_ce_config_sz =
3301 					sizeof(target_ce_config_wlan_qca6750);
3302 
3303 		scn->ce_count = QCA_6750_CE_COUNT;
3304 		break;
3305 	case TARGET_TYPE_ADRASTEA:
3306 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3307 			hif_state->host_ce_config =
3308 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3309 			hif_state->target_ce_config =
3310 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3311 			hif_state->target_ce_config_sz =
3312 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3313 		} else {
3314 			hif_state->host_ce_config =
3315 				host_ce_config_wlan_adrastea;
3316 			hif_state->target_ce_config =
3317 					target_ce_config_wlan_adrastea;
3318 			hif_state->target_ce_config_sz =
3319 					sizeof(target_ce_config_wlan_adrastea);
3320 		}
3321 		break;
3322 
3323 	}
3324 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3325 }
3326 
3327 /**
3328  * hif_ce_open() - do ce specific allocations
3329  * @hif_sc: pointer to hif context
3330  *
3331  * return: 0 for success or QDF_STATUS_E_NOMEM
3332  */
3333 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3334 {
3335 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3336 
3337 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3338 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3339 	return QDF_STATUS_SUCCESS;
3340 }
3341 
3342 /**
3343  * hif_ce_close() - do ce specific free
3344  * @hif_sc: pointer to hif context
3345  */
3346 void hif_ce_close(struct hif_softc *hif_sc)
3347 {
3348 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3349 
3350 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3351 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3352 }
3353 
3354 /**
3355  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3356  * @hif_sc: hif context
3357  *
3358  * uses state variables to support cleaning up when hif_config_ce fails.
3359  */
3360 void hif_unconfig_ce(struct hif_softc *hif_sc)
3361 {
3362 	int pipe_num;
3363 	struct HIF_CE_pipe_info *pipe_info;
3364 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3365 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3366 
3367 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3368 		pipe_info = &hif_state->pipe_info[pipe_num];
3369 		if (pipe_info->ce_hdl) {
3370 			ce_unregister_irq(hif_state, (1 << pipe_num));
3371 		}
3372 	}
3373 	deinit_tasklet_workers(hif_hdl);
3374 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3375 		pipe_info = &hif_state->pipe_info[pipe_num];
3376 		if (pipe_info->ce_hdl) {
3377 			ce_fini(pipe_info->ce_hdl);
3378 			pipe_info->ce_hdl = NULL;
3379 			pipe_info->buf_sz = 0;
3380 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3381 		}
3382 	}
3383 	if (hif_sc->athdiag_procfs_inited) {
3384 		athdiag_procfs_remove();
3385 		hif_sc->athdiag_procfs_inited = false;
3386 	}
3387 }
3388 
3389 #ifdef CONFIG_BYPASS_QMI
3390 #ifdef QCN7605_SUPPORT
3391 /**
3392  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3393  * @scn: pointer to HIF structure
3394  *
3395  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3396  *
3397  * Return: void
3398  */
3399 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3400 {
3401 	void *target_va;
3402 	phys_addr_t target_pa;
3403 	struct ce_info *ce_info_ptr;
3404 	uint32_t msi_data_start;
3405 	uint32_t msi_data_count;
3406 	uint32_t msi_irq_start;
3407 	uint32_t i = 0;
3408 	int ret;
3409 
3410 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3411 					     scn->qdf_dev->dev,
3412 					     FW_SHARED_MEM +
3413 					     sizeof(struct ce_info),
3414 					     &target_pa);
3415 	if (!target_va)
3416 		return;
3417 
3418 	ce_info_ptr = (struct ce_info *)target_va;
3419 
3420 	if (scn->vaddr_rri_on_ddr) {
3421 		ce_info_ptr->rri_over_ddr_low_paddr  =
3422 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3423 		ce_info_ptr->rri_over_ddr_high_paddr =
3424 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3425 	}
3426 
3427 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3428 					  &msi_data_count, &msi_data_start,
3429 					  &msi_irq_start);
3430 	if (ret) {
3431 		hif_err("Failed to get CE msi config");
3432 		return;
3433 	}
3434 
3435 	for (i = 0; i < CE_COUNT_MAX; i++) {
3436 		ce_info_ptr->cfg[i].ce_id = i;
3437 		ce_info_ptr->cfg[i].msi_vector =
3438 			 (i % msi_data_count) + msi_irq_start;
3439 	}
3440 
3441 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3442 	hif_info("target va %pK target pa %pa", target_va, &target_pa);
3443 }
3444 #else
3445 /**
3446  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3447  * @scn: pointer to HIF structure
3448  *
3449  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3450  *
3451  * Return: void
3452  */
3453 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3454 {
3455 	void *target_va;
3456 	phys_addr_t target_pa;
3457 
3458 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3459 				FW_SHARED_MEM, &target_pa);
3460 	if (!target_va) {
3461 		HIF_TRACE("Memory allocation failed could not post target buf");
3462 		return;
3463 	}
3464 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3465 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
3466 }
3467 #endif
3468 
3469 #else
3470 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3471 {
3472 }
3473 #endif
3474 
3475 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3476 				bool wait_for_it)
3477 {
3478 	/* todo */
3479 	return 0;
3480 }
3481 
3482 /**
3483  * hif_config_ce() - configure copy engines
3484  * @scn: hif context
3485  *
3486  * Prepares fw, copy engine hardware and host sw according
3487  * to the attributes selected by hif_ce_prepare_config.
3488  *
3489  * also calls athdiag_procfs_init
3490  *
3491  * return: 0 for success nonzero for failure.
3492  */
3493 int hif_config_ce(struct hif_softc *scn)
3494 {
3495 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3496 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3497 	struct HIF_CE_pipe_info *pipe_info;
3498 	int pipe_num;
3499 	struct CE_state *ce_state = NULL;
3500 
3501 #ifdef ADRASTEA_SHADOW_REGISTERS
3502 	int i;
3503 #endif
3504 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3505 
3506 	scn->notice_send = true;
3507 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3508 
3509 	hif_post_static_buf_to_target(scn);
3510 
3511 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3512 
3513 	hif_config_rri_on_ddr(scn);
3514 
3515 	if (ce_srng_based(scn))
3516 		scn->bus_ops.hif_target_sleep_state_adjust =
3517 			&hif_srng_sleep_state_adjust;
3518 
3519 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3520 	 * index. Disable data storing
3521 	 */
3522 	reset_ce_debug_history(scn);
3523 
3524 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3525 		struct CE_attr *attr;
3526 
3527 		pipe_info = &hif_state->pipe_info[pipe_num];
3528 		pipe_info->pipe_num = pipe_num;
3529 		pipe_info->HIF_CE_state = hif_state;
3530 		attr = &hif_state->host_ce_config[pipe_num];
3531 
3532 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3533 		ce_state = scn->ce_id_to_state[pipe_num];
3534 		if (!ce_state) {
3535 			A_TARGET_ACCESS_UNLIKELY(scn);
3536 			goto err;
3537 		}
3538 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3539 		QDF_ASSERT(pipe_info->ce_hdl);
3540 		if (!pipe_info->ce_hdl) {
3541 			rv = QDF_STATUS_E_FAILURE;
3542 			A_TARGET_ACCESS_UNLIKELY(scn);
3543 			goto err;
3544 		}
3545 
3546 		ce_state->lro_data = qdf_lro_init();
3547 
3548 		if (attr->flags & CE_ATTR_DIAG) {
3549 			/* Reserve the ultimate CE for
3550 			 * Diagnostic Window support
3551 			 */
3552 			hif_state->ce_diag = pipe_info->ce_hdl;
3553 			continue;
3554 		}
3555 
3556 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3557 				(ce_state->htt_rx_data))
3558 			continue;
3559 
3560 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3561 		if (attr->dest_nentries > 0) {
3562 			atomic_set(&pipe_info->recv_bufs_needed,
3563 				   init_buffer_count(attr->dest_nentries - 1));
3564 			/*SRNG based CE has one entry less */
3565 			if (ce_srng_based(scn))
3566 				atomic_dec(&pipe_info->recv_bufs_needed);
3567 		} else {
3568 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3569 		}
3570 		ce_tasklet_init(hif_state, (1 << pipe_num));
3571 		ce_register_irq(hif_state, (1 << pipe_num));
3572 	}
3573 
3574 	if (athdiag_procfs_init(scn) != 0) {
3575 		A_TARGET_ACCESS_UNLIKELY(scn);
3576 		goto err;
3577 	}
3578 	scn->athdiag_procfs_inited = true;
3579 
3580 	HIF_DBG("%s: ce_init done", __func__);
3581 
3582 	init_tasklet_workers(hif_hdl);
3583 
3584 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3585 
3586 #ifdef ADRASTEA_SHADOW_REGISTERS
3587 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3588 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3589 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3590 			  __func__, i,
3591 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3592 	}
3593 #endif
3594 
3595 	return rv != QDF_STATUS_SUCCESS;
3596 
3597 err:
3598 	/* Failure, so clean up */
3599 	hif_unconfig_ce(scn);
3600 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3601 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3602 }
3603 
3604 #ifdef IPA_OFFLOAD
3605 /**
3606  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3607  * @scn: bus context
3608  * @ce_sr_base_paddr: copyengine source ring base physical address
3609  * @ce_sr_ring_size: copyengine source ring size
3610  * @ce_reg_paddr: copyengine register physical address
3611  *
3612  * IPA micro controller data path offload feature enabled,
3613  * HIF should release copy engine related resource information to IPA UC
3614  * IPA UC will access hardware resource with released information
3615  *
3616  * Return: None
3617  */
3618 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3619 			     qdf_shared_mem_t **ce_sr,
3620 			     uint32_t *ce_sr_ring_size,
3621 			     qdf_dma_addr_t *ce_reg_paddr)
3622 {
3623 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3624 	struct HIF_CE_pipe_info *pipe_info =
3625 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3626 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3627 
3628 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3629 			    ce_reg_paddr);
3630 }
3631 #endif /* IPA_OFFLOAD */
3632 
3633 
3634 #ifdef ADRASTEA_SHADOW_REGISTERS
3635 
3636 /*
3637  * Current shadow register config
3638  *
3639  * -----------------------------------------------------------
3640  * Shadow Register      |     CE   |    src/dst write index
3641  * -----------------------------------------------------------
3642  *         0            |     0    |           src
3643  *         1     No Config - Doesn't point to anything
3644  *         2     No Config - Doesn't point to anything
3645  *         3            |     3    |           src
3646  *         4            |     4    |           src
3647  *         5            |     5    |           src
3648  *         6     No Config - Doesn't point to anything
3649  *         7            |     7    |           src
3650  *         8     No Config - Doesn't point to anything
3651  *         9     No Config - Doesn't point to anything
3652  *         10    No Config - Doesn't point to anything
3653  *         11    No Config - Doesn't point to anything
3654  * -----------------------------------------------------------
3655  *         12    No Config - Doesn't point to anything
3656  *         13           |     1    |           dst
3657  *         14           |     2    |           dst
3658  *         15    No Config - Doesn't point to anything
3659  *         16    No Config - Doesn't point to anything
3660  *         17    No Config - Doesn't point to anything
3661  *         18    No Config - Doesn't point to anything
3662  *         19           |     7    |           dst
3663  *         20           |     8    |           dst
3664  *         21    No Config - Doesn't point to anything
3665  *         22    No Config - Doesn't point to anything
3666  *         23    No Config - Doesn't point to anything
3667  * -----------------------------------------------------------
3668  *
3669  *
3670  * ToDo - Move shadow register config to following in the future
3671  * This helps free up a block of shadow registers towards the end.
3672  * Can be used for other purposes
3673  *
3674  * -----------------------------------------------------------
3675  * Shadow Register      |     CE   |    src/dst write index
3676  * -----------------------------------------------------------
3677  *      0            |     0    |           src
3678  *      1            |     3    |           src
3679  *      2            |     4    |           src
3680  *      3            |     5    |           src
3681  *      4            |     7    |           src
3682  * -----------------------------------------------------------
3683  *      5            |     1    |           dst
3684  *      6            |     2    |           dst
3685  *      7            |     7    |           dst
3686  *      8            |     8    |           dst
3687  * -----------------------------------------------------------
3688  *      9     No Config - Doesn't point to anything
3689  *      12    No Config - Doesn't point to anything
3690  *      13    No Config - Doesn't point to anything
3691  *      14    No Config - Doesn't point to anything
3692  *      15    No Config - Doesn't point to anything
3693  *      16    No Config - Doesn't point to anything
3694  *      17    No Config - Doesn't point to anything
3695  *      18    No Config - Doesn't point to anything
3696  *      19    No Config - Doesn't point to anything
3697  *      20    No Config - Doesn't point to anything
3698  *      21    No Config - Doesn't point to anything
3699  *      22    No Config - Doesn't point to anything
3700  *      23    No Config - Doesn't point to anything
3701  * -----------------------------------------------------------
3702 */
3703 #ifndef QCN7605_SUPPORT
3704 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3705 {
3706 	u32 addr = 0;
3707 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3708 
3709 	switch (ce) {
3710 	case 0:
3711 		addr = SHADOW_VALUE0;
3712 		break;
3713 	case 3:
3714 		addr = SHADOW_VALUE3;
3715 		break;
3716 	case 4:
3717 		addr = SHADOW_VALUE4;
3718 		break;
3719 	case 5:
3720 		addr = SHADOW_VALUE5;
3721 		break;
3722 	case 7:
3723 		addr = SHADOW_VALUE7;
3724 		break;
3725 	default:
3726 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3727 		QDF_ASSERT(0);
3728 	}
3729 	return addr;
3730 
3731 }
3732 
3733 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3734 {
3735 	u32 addr = 0;
3736 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3737 
3738 	switch (ce) {
3739 	case 1:
3740 		addr = SHADOW_VALUE13;
3741 		break;
3742 	case 2:
3743 		addr = SHADOW_VALUE14;
3744 		break;
3745 	case 5:
3746 		addr = SHADOW_VALUE17;
3747 		break;
3748 	case 7:
3749 		addr = SHADOW_VALUE19;
3750 		break;
3751 	case 8:
3752 		addr = SHADOW_VALUE20;
3753 		break;
3754 	case 9:
3755 		addr = SHADOW_VALUE21;
3756 		break;
3757 	case 10:
3758 		addr = SHADOW_VALUE22;
3759 		break;
3760 	case 11:
3761 		addr = SHADOW_VALUE23;
3762 		break;
3763 	default:
3764 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3765 		QDF_ASSERT(0);
3766 	}
3767 
3768 	return addr;
3769 
3770 }
3771 #else
3772 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3773 {
3774 	u32 addr = 0;
3775 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3776 
3777 	switch (ce) {
3778 	case 0:
3779 		addr = SHADOW_VALUE0;
3780 		break;
3781 	case 4:
3782 		addr = SHADOW_VALUE4;
3783 		break;
3784 	case 5:
3785 		addr = SHADOW_VALUE5;
3786 		break;
3787 	default:
3788 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3789 		QDF_ASSERT(0);
3790 	}
3791 	return addr;
3792 }
3793 
3794 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3795 {
3796 	u32 addr = 0;
3797 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3798 
3799 	switch (ce) {
3800 	case 1:
3801 		addr = SHADOW_VALUE13;
3802 		break;
3803 	case 2:
3804 		addr = SHADOW_VALUE14;
3805 		break;
3806 	case 3:
3807 		addr = SHADOW_VALUE15;
3808 		break;
3809 	case 5:
3810 		addr = SHADOW_VALUE17;
3811 		break;
3812 	case 7:
3813 		addr = SHADOW_VALUE19;
3814 		break;
3815 	case 8:
3816 		addr = SHADOW_VALUE20;
3817 		break;
3818 	case 9:
3819 		addr = SHADOW_VALUE21;
3820 		break;
3821 	case 10:
3822 		addr = SHADOW_VALUE22;
3823 		break;
3824 	case 11:
3825 		addr = SHADOW_VALUE23;
3826 		break;
3827 	default:
3828 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3829 		QDF_ASSERT(0);
3830 	}
3831 
3832 	return addr;
3833 }
3834 #endif
3835 #endif
3836 
3837 #if defined(FEATURE_LRO)
3838 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3839 {
3840 	struct CE_state *ce_state;
3841 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3842 
3843 	ce_state = scn->ce_id_to_state[ctx_id];
3844 
3845 	return ce_state->lro_data;
3846 }
3847 #endif
3848 
3849 /**
3850  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3851  * this service
3852  * @scn: hif_softc pointer.
3853  * @svc_id: Service ID for which the mapping is needed.
3854  * @ul_pipe: address of the container in which ul pipe is returned.
3855  * @dl_pipe: address of the container in which dl pipe is returned.
3856  * @ul_is_polled: address of the container in which a bool
3857  *			indicating if the UL CE for this service
3858  *			is polled is returned.
3859  * @dl_is_polled: address of the container in which a bool
3860  *			indicating if the DL CE for this service
3861  *			is polled is returned.
3862  *
3863  * Return: Indicates whether the service has been found in the table.
3864  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3865  *         There will be warning logs if either leg has not been updated
3866  *         because it missed the entry in the table (but this is not an err).
3867  */
3868 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3869 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3870 			int *dl_is_polled)
3871 {
3872 	int status = -EINVAL;
3873 	unsigned int i;
3874 	struct service_to_pipe element;
3875 	struct service_to_pipe *tgt_svc_map_to_use;
3876 	uint32_t sz_tgt_svc_map_to_use;
3877 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3878 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3879 	bool dl_updated = false;
3880 	bool ul_updated = false;
3881 
3882 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3883 				       &sz_tgt_svc_map_to_use);
3884 
3885 	*dl_is_polled = 0;  /* polling for received messages not supported */
3886 
3887 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3888 
3889 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3890 		if (element.service_id == svc_id) {
3891 			if (element.pipedir == PIPEDIR_OUT) {
3892 				*ul_pipe = element.pipenum;
3893 				*ul_is_polled =
3894 					(hif_state->host_ce_config[*ul_pipe].flags &
3895 					 CE_ATTR_DISABLE_INTR) != 0;
3896 				ul_updated = true;
3897 			} else if (element.pipedir == PIPEDIR_IN) {
3898 				*dl_pipe = element.pipenum;
3899 				dl_updated = true;
3900 			}
3901 			status = 0;
3902 		}
3903 	}
3904 	if (ul_updated == false)
3905 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3906 	if (dl_updated == false)
3907 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3908 
3909 	return status;
3910 }
3911 
3912 #ifdef SHADOW_REG_DEBUG
3913 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3914 		uint32_t CE_ctrl_addr)
3915 {
3916 	uint32_t read_from_hw, srri_from_ddr = 0;
3917 
3918 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3919 
3920 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3921 
3922 	if (read_from_hw != srri_from_ddr) {
3923 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3924 		       __func__, srri_from_ddr, read_from_hw,
3925 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3926 		QDF_ASSERT(0);
3927 	}
3928 	return srri_from_ddr;
3929 }
3930 
3931 
3932 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3933 		uint32_t CE_ctrl_addr)
3934 {
3935 	uint32_t read_from_hw, drri_from_ddr = 0;
3936 
3937 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3938 
3939 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3940 
3941 	if (read_from_hw != drri_from_ddr) {
3942 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3943 		       drri_from_ddr, read_from_hw,
3944 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3945 		QDF_ASSERT(0);
3946 	}
3947 	return drri_from_ddr;
3948 }
3949 
3950 #endif
3951 
3952 /**
3953  * hif_dump_ce_registers() - dump ce registers
3954  * @scn: hif_opaque_softc pointer.
3955  *
3956  * Output the copy engine registers
3957  *
3958  * Return: 0 for success or error code
3959  */
3960 int hif_dump_ce_registers(struct hif_softc *scn)
3961 {
3962 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3963 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3964 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3965 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3966 	uint16_t i;
3967 	QDF_STATUS status;
3968 
3969 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3970 		if (!scn->ce_id_to_state[i]) {
3971 			HIF_DBG("CE%d not used.", i);
3972 			continue;
3973 		}
3974 
3975 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3976 					   (uint8_t *) &ce_reg_values[0],
3977 					   ce_reg_word_size * sizeof(uint32_t));
3978 
3979 		if (status != QDF_STATUS_SUCCESS) {
3980 			HIF_ERROR("Dumping CE register failed!");
3981 			return -EACCES;
3982 		}
3983 		HIF_ERROR("CE%d=>\n", i);
3984 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3985 				   (uint8_t *) &ce_reg_values[0],
3986 				   ce_reg_word_size * sizeof(uint32_t));
3987 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3988 				+ SR_WR_INDEX_ADDRESS),
3989 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3990 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3991 				+ CURRENT_SRRI_ADDRESS),
3992 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3993 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3994 				+ DST_WR_INDEX_ADDRESS),
3995 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3996 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3997 				+ CURRENT_DRRI_ADDRESS),
3998 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3999 		qdf_print("---");
4000 	}
4001 	return 0;
4002 }
4003 qdf_export_symbol(hif_dump_ce_registers);
4004 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
4005 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
4006 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
4007 {
4008 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4009 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4010 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
4011 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
4012 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
4013 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
4014 	struct CE_ring_state *src_ring = ce_state->src_ring;
4015 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
4016 
4017 	if (src_ring) {
4018 		hif_info->ul_pipe.nentries = src_ring->nentries;
4019 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
4020 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
4021 		hif_info->ul_pipe.write_index = src_ring->write_index;
4022 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
4023 		hif_info->ul_pipe.base_addr_CE_space =
4024 			src_ring->base_addr_CE_space;
4025 		hif_info->ul_pipe.base_addr_owner_space =
4026 			src_ring->base_addr_owner_space;
4027 	}
4028 
4029 
4030 	if (dest_ring) {
4031 		hif_info->dl_pipe.nentries = dest_ring->nentries;
4032 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
4033 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
4034 		hif_info->dl_pipe.write_index = dest_ring->write_index;
4035 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
4036 		hif_info->dl_pipe.base_addr_CE_space =
4037 			dest_ring->base_addr_CE_space;
4038 		hif_info->dl_pipe.base_addr_owner_space =
4039 			dest_ring->base_addr_owner_space;
4040 	}
4041 
4042 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
4043 	hif_info->ctrl_addr = ce_state->ctrl_addr;
4044 
4045 	return hif_info;
4046 }
4047 qdf_export_symbol(hif_get_addl_pipe_info);
4048 
4049 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
4050 {
4051 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4052 
4053 	scn->nss_wifi_ol_mode = mode;
4054 	return 0;
4055 }
4056 qdf_export_symbol(hif_set_nss_wifiol_mode);
4057 #endif
4058 
4059 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
4060 {
4061 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4062 	scn->hif_attribute = hif_attrib;
4063 }
4064 
4065 
4066 /* disable interrupts (only applicable for legacy copy engine currently */
4067 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
4068 {
4069 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4070 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
4071 	uint32_t ctrl_addr = CE_state->ctrl_addr;
4072 
4073 	Q_TARGET_ACCESS_BEGIN(scn);
4074 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
4075 	Q_TARGET_ACCESS_END(scn);
4076 }
4077 qdf_export_symbol(hif_disable_interrupt);
4078 
4079 /**
4080  * hif_fw_event_handler() - hif fw event handler
4081  * @hif_state: pointer to hif ce state structure
4082  *
4083  * Process fw events and raise HTC callback to process fw events.
4084  *
4085  * Return: none
4086  */
4087 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4088 {
4089 	struct hif_msg_callbacks *msg_callbacks =
4090 		&hif_state->msg_callbacks_current;
4091 
4092 	if (!msg_callbacks->fwEventHandler)
4093 		return;
4094 
4095 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
4096 			QDF_STATUS_E_FAILURE);
4097 }
4098 
4099 #ifndef QCA_WIFI_3_0
4100 /**
4101  * hif_fw_interrupt_handler() - FW interrupt handler
4102  * @irq: irq number
4103  * @arg: the user pointer
4104  *
4105  * Called from the PCI interrupt handler when a
4106  * firmware-generated interrupt to the Host.
4107  *
4108  * only registered for legacy ce devices
4109  *
4110  * Return: status of handled irq
4111  */
4112 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4113 {
4114 	struct hif_softc *scn = arg;
4115 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4116 	uint32_t fw_indicator_address, fw_indicator;
4117 
4118 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4119 		return ATH_ISR_NOSCHED;
4120 
4121 	fw_indicator_address = hif_state->fw_indicator_address;
4122 	/* For sudden unplug this will return ~0 */
4123 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4124 
4125 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4126 		/* ACK: clear Target-side pending event */
4127 		A_TARGET_WRITE(scn, fw_indicator_address,
4128 			       fw_indicator & ~FW_IND_EVENT_PENDING);
4129 		if (Q_TARGET_ACCESS_END(scn) < 0)
4130 			return ATH_ISR_SCHED;
4131 
4132 		if (hif_state->started) {
4133 			hif_fw_event_handler(hif_state);
4134 		} else {
4135 			/*
4136 			 * Probable Target failure before we're prepared
4137 			 * to handle it.  Generally unexpected.
4138 			 * fw_indicator used as bitmap, and defined as below:
4139 			 *     FW_IND_EVENT_PENDING    0x1
4140 			 *     FW_IND_INITIALIZED      0x2
4141 			 *     FW_IND_NEEDRECOVER      0x4
4142 			 */
4143 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
4144 				("%s: Early firmware event indicated 0x%x\n",
4145 				 __func__, fw_indicator));
4146 		}
4147 	} else {
4148 		if (Q_TARGET_ACCESS_END(scn) < 0)
4149 			return ATH_ISR_SCHED;
4150 	}
4151 
4152 	return ATH_ISR_SCHED;
4153 }
4154 #else
4155 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4156 {
4157 	return ATH_ISR_SCHED;
4158 }
4159 #endif /* #ifdef QCA_WIFI_3_0 */
4160 
4161 
4162 /**
4163  * hif_wlan_disable(): call the platform driver to disable wlan
4164  * @scn: HIF Context
4165  *
4166  * This function passes the con_mode to platform driver to disable
4167  * wlan.
4168  *
4169  * Return: void
4170  */
4171 void hif_wlan_disable(struct hif_softc *scn)
4172 {
4173 	enum pld_driver_mode mode;
4174 	uint32_t con_mode = hif_get_conparam(scn);
4175 
4176 	if (scn->target_status == TARGET_STATUS_RESET)
4177 		return;
4178 
4179 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4180 		mode = PLD_FTM;
4181 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4182 		mode = PLD_EPPING;
4183 	else
4184 		mode = PLD_MISSION;
4185 
4186 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4187 }
4188 
4189 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4190 {
4191 	int status;
4192 	uint8_t ul_pipe, dl_pipe;
4193 	int ul_is_polled, dl_is_polled;
4194 
4195 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4196 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4197 					 HTC_CTRL_RSVD_SVC,
4198 					 &ul_pipe, &dl_pipe,
4199 					 &ul_is_polled, &dl_is_polled);
4200 	if (status) {
4201 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4202 		return status;
4203 	}
4204 
4205 	*ce_id = dl_pipe;
4206 
4207 	return 0;
4208 }
4209 
4210 #ifdef HIF_CE_LOG_INFO
4211 /**
4212  * ce_get_index_info(): Get CE index info
4213  * @scn: HIF Context
4214  * @ce_state: CE opaque handle
4215  * @info: CE info
4216  *
4217  * Return: 0 for success and non zero for failure
4218  */
4219 static
4220 int ce_get_index_info(struct hif_softc *scn, void *ce_state,
4221 		      struct ce_index *info)
4222 {
4223 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4224 
4225 	return hif_state->ce_services->ce_get_index_info(scn, ce_state, info);
4226 }
4227 
4228 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
4229 		     unsigned int *offset)
4230 {
4231 	struct hang_event_info info = {0};
4232 	static uint32_t tracked_ce = BIT(CE_ID_1) | BIT(CE_ID_2) |
4233 		BIT(CE_ID_3) | BIT(CE_ID_4) | BIT(CE_ID_9) | BIT(CE_ID_10);
4234 	uint8_t curr_index = 0;
4235 	uint8_t i;
4236 	uint16_t size;
4237 
4238 	info.active_tasklet_count = qdf_atomic_read(&scn->active_tasklet_cnt);
4239 	info.active_grp_tasklet_cnt =
4240 				qdf_atomic_read(&scn->active_grp_tasklet_cnt);
4241 
4242 	for (i = 0; i < scn->ce_count; i++) {
4243 		if (!(tracked_ce & BIT(i)) || !scn->ce_id_to_state[i])
4244 			continue;
4245 
4246 		if (ce_get_index_info(scn, scn->ce_id_to_state[i],
4247 				      &info.ce_info[curr_index]))
4248 			continue;
4249 
4250 		curr_index++;
4251 	}
4252 
4253 	info.ce_count = curr_index;
4254 	size = sizeof(info) -
4255 		(CE_COUNT_MAX - info.ce_count) * sizeof(struct ce_index);
4256 
4257 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_CE_INFO,
4258 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
4259 
4260 	qdf_mem_copy(data + *offset, &info, size);
4261 	*offset = *offset + size;
4262 }
4263 #endif
4264