xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #include "qdf_module.h"
41 
42 #define CE_POLL_TIMEOUT 10      /* ms */
43 
44 #define AGC_DUMP         1
45 #define CHANINFO_DUMP    2
46 #define BB_WATCHDOG_DUMP 3
47 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
48 #define PCIE_ACCESS_DUMP 4
49 #endif
50 #include "mp_dev.h"
51 
52 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
53 	defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG)
54 #define QCA_WIFI_SUPPORT_SRNG
55 #endif
56 
57 /* Forward references */
58 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
59 
60 /*
61  * Fix EV118783, poll to check whether a BMI response comes
62  * other than waiting for the interruption which may be lost.
63  */
64 /* #define BMI_RSP_POLLING */
65 #define BMI_RSP_TO_MILLISEC  1000
66 
67 #ifdef CONFIG_BYPASS_QMI
68 #define BYPASS_QMI 1
69 #else
70 #define BYPASS_QMI 0
71 #endif
72 
73 #ifdef ENABLE_10_4_FW_HDR
74 #if (ENABLE_10_4_FW_HDR == 1)
75 #define WDI_IPA_SERVICE_GROUP 5
76 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
77 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
78 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
79 #endif /* ENABLE_10_4_FW_HDR == 1 */
80 #endif /* ENABLE_10_4_FW_HDR */
81 
82 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
83 static void hif_config_rri_on_ddr(struct hif_softc *scn);
84 
85 /**
86  * hif_target_access_log_dump() - dump access log
87  *
88  * dump access log
89  *
90  * Return: n/a
91  */
92 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
93 static void hif_target_access_log_dump(void)
94 {
95 	hif_target_dump_access_log();
96 }
97 #endif
98 
99 
100 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
101 		      uint8_t cmd_id, bool start)
102 {
103 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
104 
105 	switch (cmd_id) {
106 	case AGC_DUMP:
107 		if (start)
108 			priv_start_agc(scn);
109 		else
110 			priv_dump_agc(scn);
111 		break;
112 	case CHANINFO_DUMP:
113 		if (start)
114 			priv_start_cap_chaninfo(scn);
115 		else
116 			priv_dump_chaninfo(scn);
117 		break;
118 	case BB_WATCHDOG_DUMP:
119 		priv_dump_bbwatchdog(scn);
120 		break;
121 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
122 	case PCIE_ACCESS_DUMP:
123 		hif_target_access_log_dump();
124 		break;
125 #endif
126 	default:
127 		HIF_ERROR("%s: Invalid htc dump command", __func__);
128 		break;
129 	}
130 }
131 
132 static void ce_poll_timeout(void *arg)
133 {
134 	struct CE_state *CE_state = (struct CE_state *)arg;
135 
136 	if (CE_state->timer_inited) {
137 		ce_per_engine_service(CE_state->scn, CE_state->id);
138 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
139 	}
140 }
141 
142 static unsigned int roundup_pwr2(unsigned int n)
143 {
144 	int i;
145 	unsigned int test_pwr2;
146 
147 	if (!(n & (n - 1)))
148 		return n; /* already a power of 2 */
149 
150 	test_pwr2 = 4;
151 	for (i = 0; i < 29; i++) {
152 		if (test_pwr2 > n)
153 			return test_pwr2;
154 		test_pwr2 = test_pwr2 << 1;
155 	}
156 
157 	QDF_ASSERT(0); /* n too large */
158 	return 0;
159 }
160 
161 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
162 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
163 
164 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
165 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
166 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
167 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
168 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
171 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
172 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
173 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
174 #ifdef QCA_WIFI_3_0_ADRASTEA
175 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
177 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
178 #endif
179 };
180 
181 #ifdef QCN7605_SUPPORT
182 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
183 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
184 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
185 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
186 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
187 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
188 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
189 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
190 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
191 };
192 #endif
193 
194 #ifdef WLAN_FEATURE_EPPING
195 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
196 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
197 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
198 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
199 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
201 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
202 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
203 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
204 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
205 };
206 #endif
207 
208 /* CE_PCI TABLE */
209 /*
210  * NOTE: the table below is out of date, though still a useful reference.
211  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
212  * mapping of HTC services to HIF pipes.
213  */
214 /*
215  * This authoritative table defines Copy Engine configuration and the mapping
216  * of services/endpoints to CEs.  A subset of this information is passed to
217  * the Target during startup as a prerequisite to entering BMI phase.
218  * See:
219  *    target_service_to_ce_map - Target-side mapping
220  *    hif_map_service_to_pipe      - Host-side mapping
221  *    target_ce_config         - Target-side configuration
222  *    host_ce_config           - Host-side configuration
223    ============================================================================
224    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
225  |                      |      | ctio | Size     | Frequency
226  |                      |      | n    |          |
227    ============================================================================
228    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
229    descriptor |                      |      |      | O(100B)  | and regular
230    download   |                      |      |      |          |
231    ----------------------------------------------------------------------------
232    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
233    indication |                      |      |      | O(10B)   | regular
234    upload     |                      |      |      |          |
235    ----------------------------------------------------------------------------
236    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
237    upload     |                      |      |      | O(1000B) | (frequent
238    e.g. noise |                      |      |      |          | during IP1.0
239    packets    |                      |      |      |          | testing)
240    ----------------------------------------------------------------------------
241    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
242    download   |                      |      |      | O(1000B) | (frequent
243    e.g.       |                      |      |      |          | during IP1.0
244    misdirecte |                      |      |      |          | testing)
245    d EAPOL    |                      |      |      |          |
246    packets    |                      |      |      |          |
247    ----------------------------------------------------------------------------
248    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
249  | DATA_VO (uplink)     |      |      |          |
250    ----------------------------------------------------------------------------
251    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
252  | DATA_VO (downlink)   |      |      |          |
253    ----------------------------------------------------------------------------
254    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
255  |                      |      |      | O(100B)  |
256    ----------------------------------------------------------------------------
257    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
258    messages   | (downlink)           |      |      | O(100B)  |
259  |                      |      |      |          |
260    ----------------------------------------------------------------------------
261    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
262  | HTC_RAW_STREAMS      |      |      |          |
263  | (uplink)             |      |      |          |
264    ----------------------------------------------------------------------------
265    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
266  | HTC_RAW_STREAMS      |      |      |          |
267  | (downlink)           |      |      |          |
268    ----------------------------------------------------------------------------
269    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
270  |                      |      |      |          | infrequent
271    ============================================================================
272  */
273 
274 /*
275  * Map from service/endpoint to Copy Engine.
276  * This table is derived from the CE_PCI TABLE, above.
277  * It is passed to the Target at startup for use by firmware.
278  */
279 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
280 	{
281 		WMI_DATA_VO_SVC,
282 		PIPEDIR_OUT,    /* out = UL = host -> target */
283 		3,
284 	},
285 	{
286 		WMI_DATA_VO_SVC,
287 		PIPEDIR_IN,     /* in = DL = target -> host */
288 		2,
289 	},
290 	{
291 		WMI_DATA_BK_SVC,
292 		PIPEDIR_OUT,    /* out = UL = host -> target */
293 		3,
294 	},
295 	{
296 		WMI_DATA_BK_SVC,
297 		PIPEDIR_IN,     /* in = DL = target -> host */
298 		2,
299 	},
300 	{
301 		WMI_DATA_BE_SVC,
302 		PIPEDIR_OUT,    /* out = UL = host -> target */
303 		3,
304 	},
305 	{
306 		WMI_DATA_BE_SVC,
307 		PIPEDIR_IN,     /* in = DL = target -> host */
308 		2,
309 	},
310 	{
311 		WMI_DATA_VI_SVC,
312 		PIPEDIR_OUT,    /* out = UL = host -> target */
313 		3,
314 	},
315 	{
316 		WMI_DATA_VI_SVC,
317 		PIPEDIR_IN,     /* in = DL = target -> host */
318 		2,
319 	},
320 	{
321 		WMI_CONTROL_SVC,
322 		PIPEDIR_OUT,    /* out = UL = host -> target */
323 		3,
324 	},
325 	{
326 		WMI_CONTROL_SVC,
327 		PIPEDIR_IN,     /* in = DL = target -> host */
328 		2,
329 	},
330 	{
331 		HTC_CTRL_RSVD_SVC,
332 		PIPEDIR_OUT,    /* out = UL = host -> target */
333 		0,              /* could be moved to 3 (share with WMI) */
334 	},
335 	{
336 		HTC_CTRL_RSVD_SVC,
337 		PIPEDIR_IN,     /* in = DL = target -> host */
338 		2,
339 	},
340 	{
341 		HTC_RAW_STREAMS_SVC, /* not currently used */
342 		PIPEDIR_OUT,    /* out = UL = host -> target */
343 		0,
344 	},
345 	{
346 		HTC_RAW_STREAMS_SVC, /* not currently used */
347 		PIPEDIR_IN,     /* in = DL = target -> host */
348 		2,
349 	},
350 	{
351 		HTT_DATA_MSG_SVC,
352 		PIPEDIR_OUT,    /* out = UL = host -> target */
353 		4,
354 	},
355 	{
356 		HTT_DATA_MSG_SVC,
357 		PIPEDIR_IN,     /* in = DL = target -> host */
358 		1,
359 	},
360 	{
361 		WDI_IPA_TX_SVC,
362 		PIPEDIR_OUT,    /* in = DL = target -> host */
363 		5,
364 	},
365 #if defined(QCA_WIFI_3_0_ADRASTEA)
366 	{
367 		HTT_DATA2_MSG_SVC,
368 		PIPEDIR_IN,    /* in = DL = target -> host */
369 		9,
370 	},
371 	{
372 		HTT_DATA3_MSG_SVC,
373 		PIPEDIR_IN,    /* in = DL = target -> host */
374 		10,
375 	},
376 	{
377 		PACKET_LOG_SVC,
378 		PIPEDIR_IN,    /* in = DL = target -> host */
379 		11,
380 	},
381 #endif
382 	/* (Additions here) */
383 
384 	{                       /* Must be last */
385 		0,
386 		0,
387 		0,
388 	},
389 };
390 
391 /* PIPEDIR_OUT = HOST to Target */
392 /* PIPEDIR_IN  = TARGET to HOST */
393 #if (defined(QCA_WIFI_QCA8074))
394 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
395 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
396 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
397 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
398 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
399 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
400 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
401 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
402 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
403 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
404 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
405 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
406 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
407 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
408 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
409 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
410 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
411 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
412 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
413 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
414 	/* (Additions here) */
415 	{ 0, 0, 0, },
416 };
417 #else
418 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
419 };
420 #endif
421 
422 #if (defined(QCA_WIFI_QCA8074V2))
423 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
424 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
425 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
426 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
427 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
428 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
429 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
430 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
431 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
432 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
433 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
434 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
435 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
436 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
437 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
438 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
439 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
440 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
441 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
442 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
443 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
444 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
445 	/* (Additions here) */
446 	{ 0, 0, 0, },
447 };
448 #else
449 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
450 };
451 #endif
452 
453 #if (defined(QCA_WIFI_QCA6018))
454 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
455 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
456 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
457 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
458 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
459 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
460 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
461 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
462 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
463 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
464 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
465 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
466 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
467 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
468 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
469 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
470 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
471 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
472 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
473 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
474 	/* (Additions here) */
475 	{ 0, 0, 0, },
476 };
477 #else
478 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
479 };
480 #endif
481 
482 #if (defined(QCA_WIFI_QCN9000))
483 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
484 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
485 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
486 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
487 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
488 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
489 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
490 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
491 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
492 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
493 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
494 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
495 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
496 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
497 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
498 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
499 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
500 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
501 	/* (Additions here) */
502 	{ 0, 0, 0, },
503 };
504 #else
505 static struct service_to_pipe target_service_to_ce_map_qcn9000[] = {
506 };
507 #endif
508 
509 /* PIPEDIR_OUT = HOST to Target */
510 /* PIPEDIR_IN  = TARGET to HOST */
511 #ifdef QCN7605_SUPPORT
512 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
513 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
514 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
515 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
516 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
517 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
518 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
519 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
520 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
521 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
522 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
523 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
524 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
525 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
526 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
527 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
528 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
529 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
530 #ifdef IPA_OFFLOAD
531 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
532 #else
533 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
534 #endif
535 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
536 	/* (Additions here) */
537 	{ 0, 0, 0, },
538 };
539 #endif
540 
541 #if (defined(QCA_WIFI_QCA6290))
542 #ifdef QCA_6290_AP_MODE
543 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
544 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
545 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
546 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
547 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
548 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
549 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
550 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
551 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
552 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
553 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
554 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
555 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
556 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
557 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
558 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
559 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
560 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
561 	/* (Additions here) */
562 	{ 0, 0, 0, },
563 };
564 #else
565 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
566 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
567 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
568 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
569 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
570 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
571 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
572 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
573 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
574 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
575 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
576 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
577 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
578 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
579 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
580 	/* (Additions here) */
581 	{ 0, 0, 0, },
582 };
583 #endif
584 #else
585 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
586 };
587 #endif
588 
589 #if (defined(QCA_WIFI_QCA6390))
590 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
591 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
592 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
593 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
594 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
595 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
596 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
597 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
598 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
599 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
600 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
601 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
602 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
603 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
604 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
605 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
606 	/* (Additions here) */
607 	{ 0, 0, 0, },
608 };
609 #else
610 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
611 };
612 #endif
613 
614 static struct service_to_pipe target_service_to_ce_map_qca6490[] = {
615 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
616 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
617 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
618 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
619 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
620 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
621 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
622 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
623 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
624 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
625 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
626 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
627 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
628 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
629 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
630 	/* (Additions here) */
631 	{ 0, 0, 0, },
632 };
633 
634 #if (defined(QCA_WIFI_QCA6750))
635 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
636 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
637 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
638 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
639 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
640 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
641 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
642 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
643 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
644 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
645 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
646 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
647 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
648 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
649 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
650 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
651 	/* (Additions here) */
652 	{ 0, 0, 0, },
653 };
654 #else
655 static struct service_to_pipe target_service_to_ce_map_qca6750[] = {
656 };
657 #endif
658 
659 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
660 	{
661 		WMI_DATA_VO_SVC,
662 		PIPEDIR_OUT,    /* out = UL = host -> target */
663 		3,
664 	},
665 	{
666 		WMI_DATA_VO_SVC,
667 		PIPEDIR_IN,     /* in = DL = target -> host */
668 		2,
669 	},
670 	{
671 		WMI_DATA_BK_SVC,
672 		PIPEDIR_OUT,    /* out = UL = host -> target */
673 		3,
674 	},
675 	{
676 		WMI_DATA_BK_SVC,
677 		PIPEDIR_IN,     /* in = DL = target -> host */
678 		2,
679 	},
680 	{
681 		WMI_DATA_BE_SVC,
682 		PIPEDIR_OUT,    /* out = UL = host -> target */
683 		3,
684 	},
685 	{
686 		WMI_DATA_BE_SVC,
687 		PIPEDIR_IN,     /* in = DL = target -> host */
688 		2,
689 	},
690 	{
691 		WMI_DATA_VI_SVC,
692 		PIPEDIR_OUT,    /* out = UL = host -> target */
693 		3,
694 	},
695 	{
696 		WMI_DATA_VI_SVC,
697 		PIPEDIR_IN,     /* in = DL = target -> host */
698 		2,
699 	},
700 	{
701 		WMI_CONTROL_SVC,
702 		PIPEDIR_OUT,    /* out = UL = host -> target */
703 		3,
704 	},
705 	{
706 		WMI_CONTROL_SVC,
707 		PIPEDIR_IN,     /* in = DL = target -> host */
708 		2,
709 	},
710 	{
711 		HTC_CTRL_RSVD_SVC,
712 		PIPEDIR_OUT,    /* out = UL = host -> target */
713 		0,              /* could be moved to 3 (share with WMI) */
714 	},
715 	{
716 		HTC_CTRL_RSVD_SVC,
717 		PIPEDIR_IN,     /* in = DL = target -> host */
718 		1,
719 	},
720 	{
721 		HTC_RAW_STREAMS_SVC, /* not currently used */
722 		PIPEDIR_OUT,    /* out = UL = host -> target */
723 		0,
724 	},
725 	{
726 		HTC_RAW_STREAMS_SVC, /* not currently used */
727 		PIPEDIR_IN,     /* in = DL = target -> host */
728 		1,
729 	},
730 	{
731 		HTT_DATA_MSG_SVC,
732 		PIPEDIR_OUT,    /* out = UL = host -> target */
733 		4,
734 	},
735 #ifdef WLAN_FEATURE_FASTPATH
736 	{
737 		HTT_DATA_MSG_SVC,
738 		PIPEDIR_IN,     /* in = DL = target -> host */
739 		5,
740 	},
741 #else /* WLAN_FEATURE_FASTPATH */
742 	{
743 		HTT_DATA_MSG_SVC,
744 		PIPEDIR_IN,  /* in = DL = target -> host */
745 		1,
746 	},
747 #endif /* WLAN_FEATURE_FASTPATH */
748 
749 	/* (Additions here) */
750 
751 	{                       /* Must be last */
752 		0,
753 		0,
754 		0,
755 	},
756 };
757 
758 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
759 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
760 
761 #ifdef WLAN_FEATURE_EPPING
762 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
763 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
764 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
765 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
766 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
767 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
768 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
769 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
770 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
771 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
772 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
773 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
774 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
775 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
776 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
777 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
778 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
779 	{0, 0, 0,},             /* Must be last */
780 };
781 
782 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
783 					   **tgt_svc_map_to_use,
784 					   uint32_t *sz_tgt_svc_map_to_use)
785 {
786 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
787 	*sz_tgt_svc_map_to_use =
788 			sizeof(target_service_to_ce_map_wlan_epping);
789 }
790 #endif
791 
792 #ifdef QCN7605_SUPPORT
793 static inline
794 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
795 			       uint32_t *sz_tgt_svc_map_to_use)
796 {
797 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
798 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
799 }
800 #else
801 static inline
802 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
803 			       uint32_t *sz_tgt_svc_map_to_use)
804 {
805 	HIF_ERROR("%s: QCN7605 not supported", __func__);
806 }
807 #endif
808 
809 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
810 				    struct service_to_pipe **tgt_svc_map_to_use,
811 				    uint32_t *sz_tgt_svc_map_to_use)
812 {
813 	uint32_t mode = hif_get_conparam(scn);
814 	struct hif_target_info *tgt_info = &scn->target_info;
815 
816 	if (QDF_IS_EPPING_ENABLED(mode)) {
817 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
818 						      sz_tgt_svc_map_to_use);
819 	} else {
820 		switch (tgt_info->target_type) {
821 		default:
822 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
823 			*sz_tgt_svc_map_to_use =
824 				sizeof(target_service_to_ce_map_wlan);
825 			break;
826 		case TARGET_TYPE_QCN7605:
827 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
828 						  sz_tgt_svc_map_to_use);
829 			break;
830 		case TARGET_TYPE_AR900B:
831 		case TARGET_TYPE_QCA9984:
832 		case TARGET_TYPE_IPQ4019:
833 		case TARGET_TYPE_QCA9888:
834 		case TARGET_TYPE_AR9888:
835 		case TARGET_TYPE_AR9888V2:
836 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
837 			*sz_tgt_svc_map_to_use =
838 				sizeof(target_service_to_ce_map_ar900b);
839 			break;
840 		case TARGET_TYPE_QCA6290:
841 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
842 			*sz_tgt_svc_map_to_use =
843 				sizeof(target_service_to_ce_map_qca6290);
844 			break;
845 		case TARGET_TYPE_QCA6390:
846 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
847 			*sz_tgt_svc_map_to_use =
848 				sizeof(target_service_to_ce_map_qca6390);
849 			break;
850 		case TARGET_TYPE_QCA6490:
851 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6490;
852 			*sz_tgt_svc_map_to_use =
853 				sizeof(target_service_to_ce_map_qca6490);
854 			break;
855 		case TARGET_TYPE_QCA6750:
856 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6750;
857 			*sz_tgt_svc_map_to_use =
858 				sizeof(target_service_to_ce_map_qca6750);
859 			break;
860 		case TARGET_TYPE_QCA8074:
861 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
862 			*sz_tgt_svc_map_to_use =
863 				sizeof(target_service_to_ce_map_qca8074);
864 			break;
865 		case TARGET_TYPE_QCA8074V2:
866 			*tgt_svc_map_to_use =
867 				target_service_to_ce_map_qca8074_v2;
868 			*sz_tgt_svc_map_to_use =
869 				sizeof(target_service_to_ce_map_qca8074_v2);
870 			break;
871 		case TARGET_TYPE_QCA6018:
872 			*tgt_svc_map_to_use =
873 				target_service_to_ce_map_qca6018;
874 			*sz_tgt_svc_map_to_use =
875 				sizeof(target_service_to_ce_map_qca6018);
876 			break;
877 		case TARGET_TYPE_QCN9000:
878 			*tgt_svc_map_to_use =
879 				target_service_to_ce_map_qcn9000;
880 			*sz_tgt_svc_map_to_use =
881 				sizeof(target_service_to_ce_map_qcn9000);
882 			break;
883 		}
884 	}
885 }
886 
887 /**
888  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
889  * @ce_state : pointer to the state context of the CE
890  *
891  * Description:
892  *   Sets htt_rx_data attribute of the state structure if the
893  *   CE serves one of the HTT DATA services.
894  *
895  * Return:
896  *  false (attribute set to false)
897  *  true  (attribute set to true);
898  */
899 static bool ce_mark_datapath(struct CE_state *ce_state)
900 {
901 	struct service_to_pipe *svc_map;
902 	uint32_t map_sz, map_len;
903 	int    i;
904 	bool   rc = false;
905 
906 	if (ce_state) {
907 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
908 					       &map_sz);
909 
910 		map_len = map_sz / sizeof(struct service_to_pipe);
911 		for (i = 0; i < map_len; i++) {
912 			if ((svc_map[i].pipenum == ce_state->id) &&
913 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
914 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
915 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
916 				/* HTT CEs are unidirectional */
917 				if (svc_map[i].pipedir == PIPEDIR_IN)
918 					ce_state->htt_rx_data = true;
919 				else
920 					ce_state->htt_tx_data = true;
921 				rc = true;
922 			}
923 		}
924 	}
925 	return rc;
926 }
927 
928 /**
929  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
930  * @ce_id: ce in question
931  * @ring: ring state being examined
932  * @type: "src_ring" or "dest_ring" string for identifying the ring
933  *
934  * Warns on non-zero index values.
935  * Causes a kernel panic if the ring is not empty durring initialization.
936  */
937 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
938 					 char *type)
939 {
940 	if (ring->write_index != 0 || ring->sw_index != 0)
941 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
942 			  ce_id, type, ring->sw_index, ring->write_index);
943 	if (ring->write_index != ring->sw_index)
944 		QDF_BUG(0);
945 }
946 
947 #ifdef IPA_OFFLOAD
948 /**
949  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
950  * @scn: softc instance
951  * @ce_id: ce in question
952  * @base_addr: pointer to copyengine ring base address
953  * @ce_ring: copyengine instance
954  * @nentries: number of entries should be allocated
955  * @desc_size: ce desc size
956  *
957  * Return: QDF_STATUS_SUCCESS - for success
958  */
959 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
960 				     qdf_dma_addr_t *base_addr,
961 				     struct CE_ring_state *ce_ring,
962 				     unsigned int nentries, uint32_t desc_size)
963 {
964 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
965 	    !ce_srng_based(scn)) {
966 		if (!scn->ipa_ce_ring) {
967 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
968 				scn->qdf_dev,
969 				nentries * desc_size + CE_DESC_RING_ALIGN);
970 			if (!scn->ipa_ce_ring) {
971 				HIF_ERROR(
972 				"%s: Failed to allocate memory for IPA ce ring",
973 				__func__);
974 				return QDF_STATUS_E_NOMEM;
975 			}
976 		}
977 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
978 						&scn->ipa_ce_ring->mem_info);
979 		ce_ring->base_addr_owner_space_unaligned =
980 						scn->ipa_ce_ring->vaddr;
981 	} else {
982 		ce_ring->base_addr_owner_space_unaligned =
983 			qdf_mem_alloc_consistent(scn->qdf_dev,
984 						 scn->qdf_dev->dev,
985 						 (nentries * desc_size +
986 						 CE_DESC_RING_ALIGN),
987 						 base_addr);
988 		if (!ce_ring->base_addr_owner_space_unaligned) {
989 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
990 				  __func__, CE_id);
991 			return QDF_STATUS_E_NOMEM;
992 		}
993 	}
994 	return QDF_STATUS_SUCCESS;
995 }
996 
997 /**
998  * ce_free_desc_ring() - Frees copyengine descriptor ring
999  * @scn: softc instance
1000  * @ce_id: ce in question
1001  * @ce_ring: copyengine instance
1002  * @desc_size: ce desc size
1003  *
1004  * Return: None
1005  */
1006 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1007 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1008 {
1009 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
1010 	    !ce_srng_based(scn)) {
1011 		if (scn->ipa_ce_ring) {
1012 			qdf_mem_shared_mem_free(scn->qdf_dev,
1013 						scn->ipa_ce_ring);
1014 			scn->ipa_ce_ring = NULL;
1015 		}
1016 		ce_ring->base_addr_owner_space_unaligned = NULL;
1017 	} else {
1018 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1019 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1020 			ce_ring->base_addr_owner_space_unaligned,
1021 			ce_ring->base_addr_CE_space, 0);
1022 		ce_ring->base_addr_owner_space_unaligned = NULL;
1023 	}
1024 }
1025 #else
1026 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1027 				     qdf_dma_addr_t *base_addr,
1028 				     struct CE_ring_state *ce_ring,
1029 				     unsigned int nentries, uint32_t desc_size)
1030 {
1031 	ce_ring->base_addr_owner_space_unaligned =
1032 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1033 					 (nentries * desc_size +
1034 					 CE_DESC_RING_ALIGN), base_addr);
1035 	if (!ce_ring->base_addr_owner_space_unaligned) {
1036 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
1037 			  __func__, CE_id);
1038 		return QDF_STATUS_E_NOMEM;
1039 	}
1040 	return QDF_STATUS_SUCCESS;
1041 }
1042 
1043 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
1044 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
1045 {
1046 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1047 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
1048 		ce_ring->base_addr_owner_space_unaligned,
1049 		ce_ring->base_addr_CE_space, 0);
1050 	ce_ring->base_addr_owner_space_unaligned = NULL;
1051 }
1052 #endif /* IPA_OFFLOAD */
1053 
1054 /*
1055  * TODO: Need to explore the possibility of having this as part of a
1056  * target context instead of a global array.
1057  */
1058 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
1059 
1060 void ce_service_register_module(enum ce_target_type target_type,
1061 				struct ce_ops* (*ce_attach)(void))
1062 {
1063 	if (target_type < CE_MAX_TARGET_TYPE)
1064 		ce_attach_register[target_type] = ce_attach;
1065 }
1066 
1067 qdf_export_symbol(ce_service_register_module);
1068 
1069 /**
1070  * ce_srng_based() - Does this target use srng
1071  * @ce_state : pointer to the state context of the CE
1072  *
1073  * Description:
1074  *   returns true if the target is SRNG based
1075  *
1076  * Return:
1077  *  false (attribute set to false)
1078  *  true  (attribute set to true);
1079  */
1080 bool ce_srng_based(struct hif_softc *scn)
1081 {
1082 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1083 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1084 
1085 	switch (tgt_info->target_type) {
1086 	case TARGET_TYPE_QCA8074:
1087 	case TARGET_TYPE_QCA8074V2:
1088 	case TARGET_TYPE_QCA6290:
1089 	case TARGET_TYPE_QCA6390:
1090 	case TARGET_TYPE_QCA6490:
1091 	case TARGET_TYPE_QCA6750:
1092 	case TARGET_TYPE_QCA6018:
1093 	case TARGET_TYPE_QCN9000:
1094 		return true;
1095 	default:
1096 		return false;
1097 	}
1098 	return false;
1099 }
1100 qdf_export_symbol(ce_srng_based);
1101 
1102 #ifdef QCA_WIFI_SUPPORT_SRNG
1103 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1104 {
1105 	struct ce_ops *ops = NULL;
1106 
1107 	if (ce_srng_based(scn)) {
1108 		if (ce_attach_register[CE_SVC_SRNG])
1109 			ops = ce_attach_register[CE_SVC_SRNG]();
1110 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1111 		ops = ce_attach_register[CE_SVC_LEGACY]();
1112 	}
1113 
1114 	return ops;
1115 }
1116 
1117 
1118 #else	/* QCA_LITHIUM */
1119 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1120 {
1121 	if (ce_attach_register[CE_SVC_LEGACY])
1122 		return ce_attach_register[CE_SVC_LEGACY]();
1123 
1124 	return NULL;
1125 }
1126 #endif /* QCA_LITHIUM */
1127 
1128 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1129 		struct pld_shadow_reg_v2_cfg **shadow_config,
1130 		int *num_shadow_registers_configured) {
1131 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1132 
1133 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1134 			scn, shadow_config, num_shadow_registers_configured);
1135 }
1136 
1137 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1138 						uint8_t ring_type)
1139 {
1140 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1141 
1142 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1143 }
1144 
1145 
1146 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1147 		uint8_t ring_type, uint32_t nentries)
1148 {
1149 	uint32_t ce_nbytes;
1150 	char *ptr;
1151 	qdf_dma_addr_t base_addr;
1152 	struct CE_ring_state *ce_ring;
1153 	uint32_t desc_size;
1154 	struct hif_softc *scn = CE_state->scn;
1155 
1156 	ce_nbytes = sizeof(struct CE_ring_state)
1157 		+ (nentries * sizeof(void *));
1158 	ptr = qdf_mem_malloc(ce_nbytes);
1159 	if (!ptr)
1160 		return NULL;
1161 
1162 	ce_ring = (struct CE_ring_state *)ptr;
1163 	ptr += sizeof(struct CE_ring_state);
1164 	ce_ring->nentries = nentries;
1165 	ce_ring->nentries_mask = nentries - 1;
1166 
1167 	ce_ring->low_water_mark_nentries = 0;
1168 	ce_ring->high_water_mark_nentries = nentries;
1169 	ce_ring->per_transfer_context = (void **)ptr;
1170 
1171 	desc_size = ce_get_desc_size(scn, ring_type);
1172 
1173 	/* Legacy platforms that do not support cache
1174 	 * coherent DMA are unsupported
1175 	 */
1176 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1177 			       ce_ring, nentries,
1178 			       desc_size) !=
1179 	    QDF_STATUS_SUCCESS) {
1180 		HIF_ERROR("%s: ring has no DMA mem",
1181 				__func__);
1182 		qdf_mem_free(ce_ring);
1183 		return NULL;
1184 	}
1185 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1186 
1187 	/* Correctly initialize memory to 0 to
1188 	 * prevent garbage data crashing system
1189 	 * when download firmware
1190 	 */
1191 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1192 			nentries * desc_size +
1193 			CE_DESC_RING_ALIGN);
1194 
1195 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1196 
1197 		ce_ring->base_addr_CE_space =
1198 			(ce_ring->base_addr_CE_space_unaligned +
1199 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1200 
1201 		ce_ring->base_addr_owner_space = (void *)
1202 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1203 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1204 	} else {
1205 		ce_ring->base_addr_CE_space =
1206 				ce_ring->base_addr_CE_space_unaligned;
1207 		ce_ring->base_addr_owner_space =
1208 				ce_ring->base_addr_owner_space_unaligned;
1209 	}
1210 
1211 	return ce_ring;
1212 }
1213 
1214 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1215 			uint32_t ce_id, struct CE_ring_state *ring,
1216 			struct CE_attr *attr)
1217 {
1218 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1219 
1220 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1221 					      ring, attr);
1222 }
1223 
1224 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1225 {
1226 	uint8_t ul_pipe, dl_pipe;
1227 	int ce_id, status, ul_is_polled, dl_is_polled;
1228 	struct CE_state *ce_state;
1229 
1230 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1231 					 &ul_pipe, &dl_pipe,
1232 					 &ul_is_polled, &dl_is_polled);
1233 	if (status) {
1234 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1235 		return status;
1236 	}
1237 
1238 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1239 		if (ce_id == ul_pipe)
1240 			continue;
1241 		if (ce_id == dl_pipe)
1242 			continue;
1243 
1244 		ce_state = scn->ce_id_to_state[ce_id];
1245 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1246 		if (ce_state->state == CE_RUNNING)
1247 			ce_state->state = CE_PAUSED;
1248 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1249 	}
1250 
1251 	return status;
1252 }
1253 
1254 int hif_ce_bus_late_resume(struct hif_softc *scn)
1255 {
1256 	int ce_id;
1257 	struct CE_state *ce_state;
1258 	int write_index = 0;
1259 	bool index_updated;
1260 
1261 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1262 		ce_state = scn->ce_id_to_state[ce_id];
1263 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1264 		if (ce_state->state == CE_PENDING) {
1265 			write_index = ce_state->src_ring->write_index;
1266 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1267 					write_index);
1268 			ce_state->state = CE_RUNNING;
1269 			index_updated = true;
1270 		} else {
1271 			index_updated = false;
1272 		}
1273 
1274 		if (ce_state->state == CE_PAUSED)
1275 			ce_state->state = CE_RUNNING;
1276 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1277 
1278 		if (index_updated)
1279 			hif_record_ce_desc_event(scn, ce_id,
1280 				RESUME_WRITE_INDEX_UPDATE,
1281 				NULL, NULL, write_index, 0);
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 /**
1288  * ce_oom_recovery() - try to recover rx ce from oom condition
1289  * @context: CE_state of the CE with oom rx ring
1290  *
1291  * the executing work Will continue to be rescheduled until
1292  * at least 1 descriptor is successfully posted to the rx ring.
1293  *
1294  * return: none
1295  */
1296 static void ce_oom_recovery(void *context)
1297 {
1298 	struct CE_state *ce_state = context;
1299 	struct hif_softc *scn = ce_state->scn;
1300 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1301 	struct HIF_CE_pipe_info *pipe_info =
1302 		&ce_softc->pipe_info[ce_state->id];
1303 
1304 	hif_post_recv_buffers_for_pipe(pipe_info);
1305 }
1306 
1307 #ifdef HIF_CE_DEBUG_DATA_BUF
1308 /**
1309  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1310  * the CE descriptors.
1311  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1312  * @scn: hif scn handle
1313  * ce_id: Copy Engine Id
1314  *
1315  * Return: QDF_STATUS
1316  */
1317 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1318 {
1319 	struct hif_ce_desc_event *event = NULL;
1320 	struct hif_ce_desc_event *hist_ev = NULL;
1321 	uint32_t index = 0;
1322 
1323 	hist_ev =
1324 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1325 
1326 	if (!hist_ev)
1327 		return QDF_STATUS_E_NOMEM;
1328 
1329 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1330 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1331 		event = &hist_ev[index];
1332 		event->data =
1333 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1334 		if (!event->data) {
1335 			hif_err_rl("ce debug data alloc failed");
1336 			return QDF_STATUS_E_NOMEM;
1337 		}
1338 	}
1339 	return QDF_STATUS_SUCCESS;
1340 }
1341 
1342 /**
1343  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1344  * the CE descriptors.
1345  * @scn: hif scn handle
1346  * ce_id: Copy Engine Id
1347  *
1348  * Return:
1349  */
1350 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1351 {
1352 	struct hif_ce_desc_event *event = NULL;
1353 	struct hif_ce_desc_event *hist_ev = NULL;
1354 	uint32_t index = 0;
1355 
1356 	hist_ev =
1357 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1358 
1359 	if (!hist_ev)
1360 		return;
1361 
1362 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1363 		event = &hist_ev[index];
1364 		if (event->data)
1365 			qdf_mem_free(event->data);
1366 		event->data = NULL;
1367 		event = NULL;
1368 	}
1369 
1370 }
1371 #endif /* HIF_CE_DEBUG_DATA_BUF */
1372 
1373 #ifndef HIF_CE_DEBUG_DATA_DYNAMIC_BUF
1374 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1375 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1376 
1377 /**
1378  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1379  * @scn: hif scn handle
1380  * @ce_id: Copy Engine Id
1381  * @src_nentries: source ce ring entries
1382  * Return: QDF_STATUS
1383  */
1384 static QDF_STATUS
1385 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1386 			   uint32_t src_nentries)
1387 {
1388 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1389 
1390 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1391 	ce_hist->enable[ce_id] = 1;
1392 
1393 	if (src_nentries)
1394 		alloc_mem_ce_debug_hist_data(scn, ce_id);
1395 	else
1396 		ce_hist->data_enable[ce_id] = false;
1397 
1398 	return QDF_STATUS_SUCCESS;
1399 }
1400 
1401 /**
1402  * free_mem_ce_debug_history() - Free CE descriptor history
1403  * @scn: hif scn handle
1404  * @ce_id: Copy Engine Id
1405  *
1406  * Return: None
1407  */
1408 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1409 {
1410 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1411 
1412 	ce_hist->enable[ce_id] = 0;
1413 	if (ce_hist->data_enable[ce_id]) {
1414 		ce_hist->data_enable[ce_id] = false;
1415 		free_mem_ce_debug_hist_data(scn, ce_id);
1416 	}
1417 	ce_hist->hist_ev[ce_id] = NULL;
1418 }
1419 #else
1420 static inline QDF_STATUS
1421 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1422 			   uint32_t src_nentries)
1423 {
1424 	return QDF_STATUS_SUCCESS;
1425 }
1426 
1427 static inline void
1428 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1429 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
1430 #else
1431 #if defined(HIF_CE_DEBUG_DATA_BUF)
1432 
1433 static QDF_STATUS
1434 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1435 			   uint32_t src_nentries)
1436 {
1437 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1438 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1439 
1440 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
1441 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1442 		return QDF_STATUS_E_NOMEM;
1443 	} else {
1444 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1445 		return QDF_STATUS_SUCCESS;
1446 	}
1447 }
1448 
1449 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1450 {
1451 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1452 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1453 
1454 	if (!hist_ev)
1455 		return;
1456 
1457 	if (ce_hist->data_enable[CE_id]) {
1458 		ce_hist->data_enable[CE_id] = false;
1459 		free_mem_ce_debug_hist_data(scn, CE_id);
1460 	}
1461 
1462 	ce_hist->enable[CE_id] = 0;
1463 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1464 	ce_hist->hist_ev[CE_id] = NULL;
1465 }
1466 
1467 #else
1468 
1469 static inline QDF_STATUS
1470 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1471 			   uint32_t src_nentries)
1472 {
1473 	return QDF_STATUS_SUCCESS;
1474 }
1475 
1476 static inline void
1477 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1478 #endif /* HIF_CE_DEBUG_DATA_BUF */
1479 #endif /* HIF_CE_DEBUG_DATA_DYNAMIC_BUF */
1480 
1481 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1482 /**
1483  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1484  * CE records on the console using sysfs.
1485  * @scn: hif scn handle
1486  *
1487  * Return:
1488  */
1489 static inline void reset_ce_debug_history(struct hif_softc *scn)
1490 {
1491 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1492 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1493 	 * index. Disable data storing
1494 	 */
1495 	ce_hist->hist_index = 0;
1496 	ce_hist->hist_id = 0;
1497 }
1498 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1499 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
1500 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1501 
1502 void ce_enable_polling(void *cestate)
1503 {
1504 	struct CE_state *CE_state = (struct CE_state *)cestate;
1505 
1506 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1507 		CE_state->timer_inited = true;
1508 }
1509 
1510 void ce_disable_polling(void *cestate)
1511 {
1512 	struct CE_state *CE_state = (struct CE_state *)cestate;
1513 
1514 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1515 		CE_state->timer_inited = false;
1516 }
1517 
1518 /*
1519  * Initialize a Copy Engine based on caller-supplied attributes.
1520  * This may be called once to initialize both source and destination
1521  * rings or it may be called twice for separate source and destination
1522  * initialization. It may be that only one side or the other is
1523  * initialized by software/firmware.
1524  *
1525  * This should be called durring the initialization sequence before
1526  * interupts are enabled, so we don't have to worry about thread safety.
1527  */
1528 struct CE_handle *ce_init(struct hif_softc *scn,
1529 			  unsigned int CE_id, struct CE_attr *attr)
1530 {
1531 	struct CE_state *CE_state;
1532 	uint32_t ctrl_addr;
1533 	unsigned int nentries;
1534 	bool malloc_CE_state = false;
1535 	bool malloc_src_ring = false;
1536 	int status;
1537 
1538 	QDF_ASSERT(CE_id < scn->ce_count);
1539 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1540 	CE_state = scn->ce_id_to_state[CE_id];
1541 
1542 	if (!CE_state) {
1543 		CE_state =
1544 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1545 		if (!CE_state)
1546 			return NULL;
1547 
1548 		malloc_CE_state = true;
1549 		qdf_spinlock_create(&CE_state->ce_index_lock);
1550 
1551 		CE_state->id = CE_id;
1552 		CE_state->ctrl_addr = ctrl_addr;
1553 		CE_state->state = CE_RUNNING;
1554 		CE_state->attr_flags = attr->flags;
1555 	}
1556 	CE_state->scn = scn;
1557 	CE_state->service = ce_engine_service_reg;
1558 
1559 	qdf_atomic_init(&CE_state->rx_pending);
1560 	if (!attr) {
1561 		/* Already initialized; caller wants the handle */
1562 		return (struct CE_handle *)CE_state;
1563 	}
1564 
1565 	if (CE_state->src_sz_max)
1566 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1567 	else
1568 		CE_state->src_sz_max = attr->src_sz_max;
1569 
1570 	ce_init_ce_desc_event_log(scn, CE_id,
1571 				  attr->src_nentries + attr->dest_nentries);
1572 
1573 	/* source ring setup */
1574 	nentries = attr->src_nentries;
1575 	if (nentries) {
1576 		struct CE_ring_state *src_ring;
1577 
1578 		nentries = roundup_pwr2(nentries);
1579 		if (CE_state->src_ring) {
1580 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1581 		} else {
1582 			src_ring = CE_state->src_ring =
1583 				ce_alloc_ring_state(CE_state,
1584 						CE_RING_SRC,
1585 						nentries);
1586 			if (!src_ring) {
1587 				/* cannot allocate src ring. If the
1588 				 * CE_state is allocated locally free
1589 				 * CE_State and return error.
1590 				 */
1591 				HIF_ERROR("%s: src ring has no mem", __func__);
1592 				if (malloc_CE_state) {
1593 					/* allocated CE_state locally */
1594 					qdf_mem_free(CE_state);
1595 					malloc_CE_state = false;
1596 				}
1597 				return NULL;
1598 			}
1599 			/* we can allocate src ring. Mark that the src ring is
1600 			 * allocated locally
1601 			 */
1602 			malloc_src_ring = true;
1603 
1604 			/*
1605 			 * Also allocate a shadow src ring in
1606 			 * regular mem to use for faster access.
1607 			 */
1608 			src_ring->shadow_base_unaligned =
1609 				qdf_mem_malloc(nentries *
1610 					       sizeof(struct CE_src_desc) +
1611 					       CE_DESC_RING_ALIGN);
1612 			if (!src_ring->shadow_base_unaligned)
1613 				goto error_no_dma_mem;
1614 
1615 			src_ring->shadow_base = (struct CE_src_desc *)
1616 				(((size_t) src_ring->shadow_base_unaligned +
1617 				CE_DESC_RING_ALIGN - 1) &
1618 				 ~(CE_DESC_RING_ALIGN - 1));
1619 
1620 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1621 					       src_ring, attr);
1622 			if (status < 0)
1623 				goto error_target_access;
1624 
1625 			ce_ring_test_initial_indexes(CE_id, src_ring,
1626 						     "src_ring");
1627 		}
1628 	}
1629 
1630 	/* destination ring setup */
1631 	nentries = attr->dest_nentries;
1632 	if (nentries) {
1633 		struct CE_ring_state *dest_ring;
1634 
1635 		nentries = roundup_pwr2(nentries);
1636 		if (CE_state->dest_ring) {
1637 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1638 		} else {
1639 			dest_ring = CE_state->dest_ring =
1640 				ce_alloc_ring_state(CE_state,
1641 						CE_RING_DEST,
1642 						nentries);
1643 			if (!dest_ring) {
1644 				/* cannot allocate dst ring. If the CE_state
1645 				 * or src ring is allocated locally free
1646 				 * CE_State and src ring and return error.
1647 				 */
1648 				HIF_ERROR("%s: dest ring has no mem",
1649 					  __func__);
1650 				goto error_no_dma_mem;
1651 			}
1652 
1653 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1654 				      dest_ring, attr);
1655 			if (status < 0)
1656 				goto error_target_access;
1657 
1658 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1659 						     "dest_ring");
1660 
1661 			/* For srng based target, init status ring here */
1662 			if (ce_srng_based(CE_state->scn)) {
1663 				CE_state->status_ring =
1664 					ce_alloc_ring_state(CE_state,
1665 							CE_RING_STATUS,
1666 							nentries);
1667 				if (!CE_state->status_ring) {
1668 					/*Allocation failed. Cleanup*/
1669 					qdf_mem_free(CE_state->dest_ring);
1670 					if (malloc_src_ring) {
1671 						qdf_mem_free
1672 							(CE_state->src_ring);
1673 						CE_state->src_ring = NULL;
1674 						malloc_src_ring = false;
1675 					}
1676 					if (malloc_CE_state) {
1677 						/* allocated CE_state locally */
1678 						scn->ce_id_to_state[CE_id] =
1679 							NULL;
1680 						qdf_mem_free(CE_state);
1681 						malloc_CE_state = false;
1682 					}
1683 
1684 					return NULL;
1685 				}
1686 
1687 				status = ce_ring_setup(scn, CE_RING_STATUS,
1688 					       CE_id, CE_state->status_ring,
1689 					       attr);
1690 				if (status < 0)
1691 					goto error_target_access;
1692 
1693 			}
1694 
1695 			/* epping */
1696 			/* poll timer */
1697 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1698 				qdf_timer_init(scn->qdf_dev,
1699 						&CE_state->poll_timer,
1700 						ce_poll_timeout,
1701 						CE_state,
1702 						QDF_TIMER_TYPE_WAKE_APPS);
1703 				ce_enable_polling(CE_state);
1704 				qdf_timer_mod(&CE_state->poll_timer,
1705 						      CE_POLL_TIMEOUT);
1706 			}
1707 		}
1708 	}
1709 
1710 	if (!ce_srng_based(scn)) {
1711 		/* Enable CE error interrupts */
1712 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1713 			goto error_target_access;
1714 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1715 		if (Q_TARGET_ACCESS_END(scn) < 0)
1716 			goto error_target_access;
1717 	}
1718 
1719 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1720 			ce_oom_recovery, CE_state);
1721 
1722 	/* update the htt_data attribute */
1723 	ce_mark_datapath(CE_state);
1724 	scn->ce_id_to_state[CE_id] = CE_state;
1725 
1726 	alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
1727 
1728 	return (struct CE_handle *)CE_state;
1729 
1730 error_target_access:
1731 error_no_dma_mem:
1732 	ce_fini((struct CE_handle *)CE_state);
1733 	return NULL;
1734 }
1735 
1736 /**
1737  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1738  * @hif_ctx: HIF Context
1739  *
1740  * API to check if polling is enabled on all CEs. Returns true when polling
1741  * is enabled on all CEs.
1742  *
1743  * Return: bool
1744  */
1745 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1746 {
1747 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1748 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1749 	struct CE_attr *attr;
1750 	int id;
1751 
1752 	for (id = 0; id < scn->ce_count; id++) {
1753 		attr = &hif_state->host_ce_config[id];
1754 		if (attr && (attr->dest_nentries) &&
1755 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1756 			return false;
1757 	}
1758 	return true;
1759 }
1760 qdf_export_symbol(hif_is_polled_mode_enabled);
1761 
1762 #ifdef WLAN_FEATURE_FASTPATH
1763 /**
1764  * hif_enable_fastpath() Update that we have enabled fastpath mode
1765  * @hif_ctx: HIF context
1766  *
1767  * For use in data path
1768  *
1769  * Retrun: void
1770  */
1771 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1772 {
1773 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1774 
1775 	if (ce_srng_based(scn)) {
1776 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1777 		return;
1778 	}
1779 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1780 	scn->fastpath_mode_on = true;
1781 }
1782 
1783 /**
1784  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1785  * @hif_ctx: HIF Context
1786  *
1787  * For use in data path to skip HTC
1788  *
1789  * Return: bool
1790  */
1791 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1792 {
1793 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1794 
1795 	return scn->fastpath_mode_on;
1796 }
1797 
1798 /**
1799  * hif_get_ce_handle - API to get CE handle for FastPath mode
1800  * @hif_ctx: HIF Context
1801  * @id: CopyEngine Id
1802  *
1803  * API to return CE handle for fastpath mode
1804  *
1805  * Return: void
1806  */
1807 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1808 {
1809 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1810 
1811 	return scn->ce_id_to_state[id];
1812 }
1813 qdf_export_symbol(hif_get_ce_handle);
1814 
1815 /**
1816  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1817  * No processing is required inside this function.
1818  * @ce_hdl: Cope engine handle
1819  * Using an assert, this function makes sure that,
1820  * the TX CE has been processed completely.
1821  *
1822  * This is called while dismantling CE structures. No other thread
1823  * should be using these structures while dismantling is occurring
1824  * therfore no locking is needed.
1825  *
1826  * Return: none
1827  */
1828 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1829 {
1830 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1831 	struct CE_ring_state *src_ring = ce_state->src_ring;
1832 	struct hif_softc *sc = ce_state->scn;
1833 	uint32_t sw_index, write_index;
1834 
1835 	if (hif_is_nss_wifi_enabled(sc))
1836 		return;
1837 
1838 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1839 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1840 			 __func__, __LINE__);
1841 		sw_index = src_ring->sw_index;
1842 		write_index = src_ring->sw_index;
1843 
1844 		/* At this point Tx CE should be clean */
1845 		qdf_assert_always(sw_index == write_index);
1846 	}
1847 }
1848 
1849 /**
1850  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1851  * @ce_hdl: Handle to CE
1852  *
1853  * These buffers are never allocated on the fly, but
1854  * are allocated only once during HIF start and freed
1855  * only once during HIF stop.
1856  * NOTE:
1857  * The assumption here is there is no in-flight DMA in progress
1858  * currently, so that buffers can be freed up safely.
1859  *
1860  * Return: NONE
1861  */
1862 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1863 {
1864 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1865 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1866 	qdf_nbuf_t nbuf;
1867 	int i;
1868 
1869 	if (ce_state->scn->fastpath_mode_on == false)
1870 		return;
1871 
1872 	if (!ce_state->htt_rx_data)
1873 		return;
1874 
1875 	/*
1876 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1877 	 * this CE is completely full: does not leave one blank space, to
1878 	 * distinguish between empty queue & full queue. So free all the
1879 	 * entries.
1880 	 */
1881 	for (i = 0; i < dst_ring->nentries; i++) {
1882 		nbuf = dst_ring->per_transfer_context[i];
1883 
1884 		/*
1885 		 * The reasons for doing this check are:
1886 		 * 1) Protect against calling cleanup before allocating buffers
1887 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1888 		 *    could have a partially filled ring, because of a memory
1889 		 *    allocation failure in the middle of allocating ring.
1890 		 *    This check accounts for that case, checking
1891 		 *    fastpath_mode_on flag or started flag would not have
1892 		 *    covered that case. This is not in performance path,
1893 		 *    so OK to do this.
1894 		 */
1895 		if (nbuf) {
1896 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1897 					      QDF_DMA_FROM_DEVICE);
1898 			qdf_nbuf_free(nbuf);
1899 		}
1900 	}
1901 }
1902 
1903 /**
1904  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1905  * @scn: HIF handle
1906  *
1907  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1908  * Hence we have to post all the entries in the pipe, even, in the beginning
1909  * unlike for other CE pipes where one less than dest_nentries are filled in
1910  * the beginning.
1911  *
1912  * Return: None
1913  */
1914 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1915 {
1916 	int pipe_num;
1917 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1918 
1919 	if (scn->fastpath_mode_on == false)
1920 		return;
1921 
1922 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1923 		struct HIF_CE_pipe_info *pipe_info =
1924 			&hif_state->pipe_info[pipe_num];
1925 		struct CE_state *ce_state =
1926 			scn->ce_id_to_state[pipe_info->pipe_num];
1927 
1928 		if (ce_state->htt_rx_data)
1929 			atomic_inc(&pipe_info->recv_bufs_needed);
1930 	}
1931 }
1932 #else
1933 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1934 {
1935 }
1936 
1937 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1938 {
1939 	return false;
1940 }
1941 #endif /* WLAN_FEATURE_FASTPATH */
1942 
1943 void ce_fini(struct CE_handle *copyeng)
1944 {
1945 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1946 	unsigned int CE_id = CE_state->id;
1947 	struct hif_softc *scn = CE_state->scn;
1948 	uint32_t desc_size;
1949 
1950 	bool inited = CE_state->timer_inited;
1951 	CE_state->state = CE_UNUSED;
1952 	scn->ce_id_to_state[CE_id] = NULL;
1953 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1954 	ce_disable_polling(CE_state);
1955 
1956 	qdf_lro_deinit(CE_state->lro_data);
1957 
1958 	if (CE_state->src_ring) {
1959 		/* Cleanup the datapath Tx ring */
1960 		ce_h2t_tx_ce_cleanup(copyeng);
1961 
1962 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1963 		if (CE_state->src_ring->shadow_base_unaligned)
1964 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1965 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1966 			ce_free_desc_ring(scn, CE_state->id,
1967 					  CE_state->src_ring,
1968 					  desc_size);
1969 		qdf_mem_free(CE_state->src_ring);
1970 	}
1971 	if (CE_state->dest_ring) {
1972 		/* Cleanup the datapath Rx ring */
1973 		ce_t2h_msg_ce_cleanup(copyeng);
1974 
1975 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1976 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1977 			ce_free_desc_ring(scn, CE_state->id,
1978 					  CE_state->dest_ring,
1979 					  desc_size);
1980 		qdf_mem_free(CE_state->dest_ring);
1981 
1982 		/* epping */
1983 		if (inited) {
1984 			qdf_timer_free(&CE_state->poll_timer);
1985 		}
1986 	}
1987 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1988 		/* Cleanup the datapath Tx ring */
1989 		ce_h2t_tx_ce_cleanup(copyeng);
1990 
1991 		if (CE_state->status_ring->shadow_base_unaligned)
1992 			qdf_mem_free(
1993 				CE_state->status_ring->shadow_base_unaligned);
1994 
1995 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1996 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1997 			ce_free_desc_ring(scn, CE_state->id,
1998 					  CE_state->status_ring,
1999 					  desc_size);
2000 		qdf_mem_free(CE_state->status_ring);
2001 	}
2002 
2003 	free_mem_ce_debug_history(scn, CE_id);
2004 	reset_ce_debug_history(scn);
2005 	ce_deinit_ce_desc_event_log(scn, CE_id);
2006 
2007 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
2008 	qdf_mem_free(CE_state);
2009 }
2010 
2011 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
2012 {
2013 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2014 
2015 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
2016 		  sizeof(hif_state->msg_callbacks_pending));
2017 	qdf_mem_zero(&hif_state->msg_callbacks_current,
2018 		  sizeof(hif_state->msg_callbacks_current));
2019 }
2020 
2021 /* Send the first nbytes bytes of the buffer */
2022 QDF_STATUS
2023 hif_send_head(struct hif_opaque_softc *hif_ctx,
2024 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
2025 	      qdf_nbuf_t nbuf, unsigned int data_attr)
2026 {
2027 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2028 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2029 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2030 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2031 	int bytes = nbytes, nfrags = 0;
2032 	struct ce_sendlist sendlist;
2033 	int status, i = 0;
2034 	unsigned int mux_id = 0;
2035 
2036 	if (nbytes > qdf_nbuf_len(nbuf)) {
2037 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
2038 			  (uint32_t)qdf_nbuf_len(nbuf));
2039 		QDF_ASSERT(0);
2040 	}
2041 
2042 	transfer_id =
2043 		(mux_id & MUX_ID_MASK) |
2044 		(transfer_id & TRANSACTION_ID_MASK);
2045 	data_attr &= DESC_DATA_FLAG_MASK;
2046 	/*
2047 	 * The common case involves sending multiple fragments within a
2048 	 * single download (the tx descriptor and the tx frame header).
2049 	 * So, optimize for the case of multiple fragments by not even
2050 	 * checking whether it's necessary to use a sendlist.
2051 	 * The overhead of using a sendlist for a single buffer download
2052 	 * is not a big deal, since it happens rarely (for WMI messages).
2053 	 */
2054 	ce_sendlist_init(&sendlist);
2055 	do {
2056 		qdf_dma_addr_t frag_paddr;
2057 		int frag_bytes;
2058 
2059 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
2060 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
2061 		/*
2062 		 * Clear the packet offset for all but the first CE desc.
2063 		 */
2064 		if (i++ > 0)
2065 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
2066 
2067 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
2068 				    frag_bytes >
2069 				    bytes ? bytes : frag_bytes,
2070 				    qdf_nbuf_get_frag_is_wordstream
2071 				    (nbuf,
2072 				    nfrags) ? 0 :
2073 				    CE_SEND_FLAG_SWAP_DISABLE,
2074 				    data_attr);
2075 		if (status != QDF_STATUS_SUCCESS) {
2076 			HIF_ERROR("%s: error, frag_num %d larger than limit",
2077 				__func__, nfrags);
2078 			return status;
2079 		}
2080 		bytes -= frag_bytes;
2081 		nfrags++;
2082 	} while (bytes > 0);
2083 
2084 	/* Make sure we have resources to handle this request */
2085 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2086 	if (pipe_info->num_sends_allowed < nfrags) {
2087 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2088 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
2089 		return QDF_STATUS_E_RESOURCES;
2090 	}
2091 	pipe_info->num_sends_allowed -= nfrags;
2092 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2093 
2094 	if (qdf_unlikely(!ce_hdl)) {
2095 		HIF_ERROR("%s: error CE handle is null", __func__);
2096 		return A_ERROR;
2097 	}
2098 
2099 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2100 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2101 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2102 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2103 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2104 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2105 
2106 	return status;
2107 }
2108 
2109 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2110 								int force)
2111 {
2112 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2113 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2114 
2115 	if (!force) {
2116 		int resources;
2117 		/*
2118 		 * Decide whether to actually poll for completions, or just
2119 		 * wait for a later chance. If there seem to be plenty of
2120 		 * resources left, then just wait, since checking involves
2121 		 * reading a CE register, which is a relatively expensive
2122 		 * operation.
2123 		 */
2124 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2125 		/*
2126 		 * If at least 50% of the total resources are still available,
2127 		 * don't bother checking again yet.
2128 		 */
2129 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2130 									 1))
2131 			return;
2132 	}
2133 #if ATH_11AC_TXCOMPACT
2134 	ce_per_engine_servicereap(scn, pipe);
2135 #else
2136 	ce_per_engine_service(scn, pipe);
2137 #endif
2138 }
2139 
2140 uint16_t
2141 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2142 {
2143 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2144 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2145 	uint16_t rv;
2146 
2147 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2148 	rv = pipe_info->num_sends_allowed;
2149 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2150 	return rv;
2151 }
2152 
2153 /* Called by lower (CE) layer when a send to Target completes. */
2154 static void
2155 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2156 		     void *transfer_context, qdf_dma_addr_t CE_data,
2157 		     unsigned int nbytes, unsigned int transfer_id,
2158 		     unsigned int sw_index, unsigned int hw_index,
2159 		     unsigned int toeplitz_hash_result)
2160 {
2161 	struct HIF_CE_pipe_info *pipe_info =
2162 		(struct HIF_CE_pipe_info *)ce_context;
2163 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2164 	struct hif_msg_callbacks *msg_callbacks =
2165 		&pipe_info->pipe_callbacks;
2166 
2167 	do {
2168 		/*
2169 		 * The upper layer callback will be triggered
2170 		 * when last fragment is complteted.
2171 		 */
2172 		if (transfer_context != CE_SENDLIST_ITEM_CTXT)
2173 			msg_callbacks->txCompletionHandler(
2174 				msg_callbacks->Context,
2175 				transfer_context, transfer_id,
2176 				toeplitz_hash_result);
2177 
2178 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2179 		pipe_info->num_sends_allowed++;
2180 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2181 	} while (ce_completed_send_next(copyeng,
2182 			&ce_context, &transfer_context,
2183 			&CE_data, &nbytes, &transfer_id,
2184 			&sw_idx, &hw_idx,
2185 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2186 }
2187 
2188 /**
2189  * hif_ce_do_recv(): send message from copy engine to upper layers
2190  * @msg_callbacks: structure containing callback and callback context
2191  * @netbuff: skb containing message
2192  * @nbytes: number of bytes in the message
2193  * @pipe_info: used for the pipe_number info
2194  *
2195  * Checks the packet length, configures the length in the netbuff,
2196  * and calls the upper layer callback.
2197  *
2198  * return: None
2199  */
2200 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2201 		qdf_nbuf_t netbuf, int nbytes,
2202 		struct HIF_CE_pipe_info *pipe_info) {
2203 	if (nbytes <= pipe_info->buf_sz) {
2204 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2205 		msg_callbacks->
2206 			rxCompletionHandler(msg_callbacks->Context,
2207 					netbuf, pipe_info->pipe_num);
2208 	} else {
2209 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
2210 				__func__, netbuf, nbytes);
2211 
2212 		qdf_nbuf_free(netbuf);
2213 	}
2214 }
2215 
2216 /* Called by lower (CE) layer when data is received from the Target. */
2217 static void
2218 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2219 		     void *transfer_context, qdf_dma_addr_t CE_data,
2220 		     unsigned int nbytes, unsigned int transfer_id,
2221 		     unsigned int flags)
2222 {
2223 	struct HIF_CE_pipe_info *pipe_info =
2224 		(struct HIF_CE_pipe_info *)ce_context;
2225 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2226 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2227 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2228 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
2229 	struct hif_msg_callbacks *msg_callbacks =
2230 		 &pipe_info->pipe_callbacks;
2231 
2232 	do {
2233 		hif_pm_runtime_mark_last_busy(hif_ctx);
2234 		qdf_nbuf_unmap_single(scn->qdf_dev,
2235 				      (qdf_nbuf_t) transfer_context,
2236 				      QDF_DMA_FROM_DEVICE);
2237 
2238 		atomic_inc(&pipe_info->recv_bufs_needed);
2239 		hif_post_recv_buffers_for_pipe(pipe_info);
2240 		if (scn->target_status == TARGET_STATUS_RESET)
2241 			qdf_nbuf_free(transfer_context);
2242 		else
2243 			hif_ce_do_recv(msg_callbacks, transfer_context,
2244 				nbytes, pipe_info);
2245 
2246 		/* Set up force_break flag if num of receices reaches
2247 		 * MAX_NUM_OF_RECEIVES
2248 		 */
2249 		ce_state->receive_count++;
2250 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2251 			ce_state->force_break = 1;
2252 			break;
2253 		}
2254 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2255 					&CE_data, &nbytes, &transfer_id,
2256 					&flags) == QDF_STATUS_SUCCESS);
2257 
2258 }
2259 
2260 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2261 
2262 void
2263 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2264 	      struct hif_msg_callbacks *callbacks)
2265 {
2266 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2267 
2268 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2269 	spin_lock_init(&pcie_access_log_lock);
2270 #endif
2271 	/* Save callbacks for later installation */
2272 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2273 		 sizeof(hif_state->msg_callbacks_pending));
2274 
2275 }
2276 
2277 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2278 {
2279 	struct CE_handle *ce_diag = hif_state->ce_diag;
2280 	int pipe_num;
2281 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2282 	struct hif_msg_callbacks *hif_msg_callbacks =
2283 		&hif_state->msg_callbacks_current;
2284 
2285 	/* daemonize("hif_compl_thread"); */
2286 
2287 	if (scn->ce_count == 0) {
2288 		HIF_ERROR("%s: Invalid ce_count", __func__);
2289 		return -EINVAL;
2290 	}
2291 
2292 	if (!hif_msg_callbacks ||
2293 			!hif_msg_callbacks->rxCompletionHandler ||
2294 			!hif_msg_callbacks->txCompletionHandler) {
2295 		HIF_ERROR("%s: no completion handler registered", __func__);
2296 		return -EFAULT;
2297 	}
2298 
2299 	A_TARGET_ACCESS_LIKELY(scn);
2300 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2301 		struct CE_attr attr;
2302 		struct HIF_CE_pipe_info *pipe_info;
2303 
2304 		pipe_info = &hif_state->pipe_info[pipe_num];
2305 		if (pipe_info->ce_hdl == ce_diag)
2306 			continue;       /* Handle Diagnostic CE specially */
2307 		attr = hif_state->host_ce_config[pipe_num];
2308 		if (attr.src_nentries) {
2309 			/* pipe used to send to target */
2310 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2311 					 __func__, pipe_num, pipe_info);
2312 			ce_send_cb_register(pipe_info->ce_hdl,
2313 					    hif_pci_ce_send_done, pipe_info,
2314 					    attr.flags & CE_ATTR_DISABLE_INTR);
2315 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2316 		}
2317 		if (attr.dest_nentries) {
2318 			/* pipe used to receive from target */
2319 			ce_recv_cb_register(pipe_info->ce_hdl,
2320 					    hif_pci_ce_recv_data, pipe_info,
2321 					    attr.flags & CE_ATTR_DISABLE_INTR);
2322 		}
2323 
2324 		if (attr.src_nentries)
2325 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2326 
2327 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2328 					sizeof(pipe_info->pipe_callbacks));
2329 	}
2330 
2331 	A_TARGET_ACCESS_UNLIKELY(scn);
2332 	return 0;
2333 }
2334 
2335 /*
2336  * Install pending msg callbacks.
2337  *
2338  * TBDXXX: This hack is needed because upper layers install msg callbacks
2339  * for use with HTC before BMI is done; yet this HIF implementation
2340  * needs to continue to use BMI msg callbacks. Really, upper layers
2341  * should not register HTC callbacks until AFTER BMI phase.
2342  */
2343 static void hif_msg_callbacks_install(struct hif_softc *scn)
2344 {
2345 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2346 
2347 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2348 		 &hif_state->msg_callbacks_pending,
2349 		 sizeof(hif_state->msg_callbacks_pending));
2350 }
2351 
2352 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2353 							uint8_t *DLPipe)
2354 {
2355 	int ul_is_polled, dl_is_polled;
2356 
2357 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2358 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2359 }
2360 
2361 /**
2362  * hif_dump_pipe_debug_count() - Log error count
2363  * @scn: hif_softc pointer.
2364  *
2365  * Output the pipe error counts of each pipe to log file
2366  *
2367  * Return: N/A
2368  */
2369 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2370 {
2371 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2372 	int pipe_num;
2373 
2374 	if (!hif_state) {
2375 		HIF_ERROR("%s hif_state is NULL", __func__);
2376 		return;
2377 	}
2378 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2379 		struct HIF_CE_pipe_info *pipe_info;
2380 
2381 	pipe_info = &hif_state->pipe_info[pipe_num];
2382 
2383 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2384 			pipe_info->nbuf_dma_err_count > 0 ||
2385 			pipe_info->nbuf_ce_enqueue_err_count)
2386 		HIF_ERROR(
2387 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2388 			__func__, pipe_info->pipe_num,
2389 			atomic_read(&pipe_info->recv_bufs_needed),
2390 			pipe_info->nbuf_alloc_err_count,
2391 			pipe_info->nbuf_dma_err_count,
2392 			pipe_info->nbuf_ce_enqueue_err_count);
2393 	}
2394 }
2395 
2396 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2397 					  void *nbuf, uint32_t *error_cnt,
2398 					  enum hif_ce_event_type failure_type,
2399 					  const char *failure_type_string)
2400 {
2401 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2402 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2403 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2404 	int ce_id = CE_state->id;
2405 	uint32_t error_cnt_tmp;
2406 
2407 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2408 	error_cnt_tmp = ++(*error_cnt);
2409 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2410 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2411 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2412 		  failure_type_string);
2413 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2414 				 NULL, nbuf, bufs_needed_tmp, 0);
2415 	/* if we fail to allocate the last buffer for an rx pipe,
2416 	 *	there is no trigger to refill the ce and we will
2417 	 *	eventually crash
2418 	 */
2419 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2420 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2421 
2422 }
2423 
2424 
2425 
2426 
2427 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2428 {
2429 	struct CE_handle *ce_hdl;
2430 	qdf_size_t buf_sz;
2431 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2432 	QDF_STATUS status;
2433 	uint32_t bufs_posted = 0;
2434 	unsigned int ce_id;
2435 
2436 	buf_sz = pipe_info->buf_sz;
2437 	if (buf_sz == 0) {
2438 		/* Unused Copy Engine */
2439 		return QDF_STATUS_SUCCESS;
2440 	}
2441 
2442 	ce_hdl = pipe_info->ce_hdl;
2443 	ce_id = ((struct CE_state *)ce_hdl)->id;
2444 
2445 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2446 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2447 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2448 		qdf_nbuf_t nbuf;
2449 
2450 		atomic_dec(&pipe_info->recv_bufs_needed);
2451 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2452 
2453 		hif_record_ce_desc_event(scn, ce_id,
2454 					 HIF_RX_DESC_PRE_NBUF_ALLOC, NULL, NULL,
2455 					 0, 0);
2456 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2457 		if (!nbuf) {
2458 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2459 					&pipe_info->nbuf_alloc_err_count,
2460 					 HIF_RX_NBUF_ALLOC_FAILURE,
2461 					"HIF_RX_NBUF_ALLOC_FAILURE");
2462 			return QDF_STATUS_E_NOMEM;
2463 		}
2464 
2465 		hif_record_ce_desc_event(scn, ce_id,
2466 					 HIF_RX_DESC_PRE_NBUF_MAP, NULL, nbuf,
2467 					 0, 0);
2468 		/*
2469 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2470 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2471 		 * DMA_FROM_DEVICE);
2472 		 */
2473 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2474 					    QDF_DMA_FROM_DEVICE);
2475 
2476 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2477 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2478 					&pipe_info->nbuf_dma_err_count,
2479 					 HIF_RX_NBUF_MAP_FAILURE,
2480 					"HIF_RX_NBUF_MAP_FAILURE");
2481 			qdf_nbuf_free(nbuf);
2482 			return status;
2483 		}
2484 
2485 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2486 		hif_record_ce_desc_event(scn, ce_id,
2487 					 HIF_RX_DESC_POST_NBUF_MAP, NULL, nbuf,
2488 					 0, 0);
2489 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2490 					       buf_sz, DMA_FROM_DEVICE);
2491 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2492 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2493 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2494 					&pipe_info->nbuf_ce_enqueue_err_count,
2495 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2496 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2497 
2498 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2499 						QDF_DMA_FROM_DEVICE);
2500 			qdf_nbuf_free(nbuf);
2501 			return status;
2502 		}
2503 
2504 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2505 		bufs_posted++;
2506 	}
2507 	pipe_info->nbuf_alloc_err_count =
2508 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2509 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2510 	pipe_info->nbuf_dma_err_count =
2511 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2512 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2513 	pipe_info->nbuf_ce_enqueue_err_count =
2514 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2515 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2516 
2517 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2518 
2519 	return QDF_STATUS_SUCCESS;
2520 }
2521 
2522 /*
2523  * Try to post all desired receive buffers for all pipes.
2524  * Returns 0 for non fastpath rx copy engine as
2525  * oom_allocation_work will be scheduled to recover any
2526  * failures, non-zero if unable to completely replenish
2527  * receive buffers for fastpath rx Copy engine.
2528  */
2529 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2530 {
2531 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2532 	int pipe_num;
2533 	struct CE_state *ce_state = NULL;
2534 	QDF_STATUS qdf_status;
2535 
2536 	A_TARGET_ACCESS_LIKELY(scn);
2537 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2538 		struct HIF_CE_pipe_info *pipe_info;
2539 
2540 		ce_state = scn->ce_id_to_state[pipe_num];
2541 		pipe_info = &hif_state->pipe_info[pipe_num];
2542 
2543 		if (hif_is_nss_wifi_enabled(scn) &&
2544 		    ce_state && (ce_state->htt_rx_data))
2545 			continue;
2546 
2547 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2548 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2549 			ce_state->htt_rx_data &&
2550 			scn->fastpath_mode_on) {
2551 			A_TARGET_ACCESS_UNLIKELY(scn);
2552 			return qdf_status;
2553 		}
2554 	}
2555 
2556 	A_TARGET_ACCESS_UNLIKELY(scn);
2557 
2558 	return QDF_STATUS_SUCCESS;
2559 }
2560 
2561 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2562 {
2563 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2564 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2565 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2566 
2567 	hif_update_fastpath_recv_bufs_cnt(scn);
2568 
2569 	hif_msg_callbacks_install(scn);
2570 
2571 	if (hif_completion_thread_startup(hif_state))
2572 		return QDF_STATUS_E_FAILURE;
2573 
2574 	/* enable buffer cleanup */
2575 	hif_state->started = true;
2576 
2577 	/* Post buffers once to start things off. */
2578 	qdf_status = hif_post_recv_buffers(scn);
2579 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2580 		/* cleanup is done in hif_ce_disable */
2581 		HIF_ERROR("%s:failed to post buffers", __func__);
2582 		return qdf_status;
2583 	}
2584 
2585 	return qdf_status;
2586 }
2587 
2588 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2589 {
2590 	struct hif_softc *scn;
2591 	struct CE_handle *ce_hdl;
2592 	uint32_t buf_sz;
2593 	struct HIF_CE_state *hif_state;
2594 	qdf_nbuf_t netbuf;
2595 	qdf_dma_addr_t CE_data;
2596 	void *per_CE_context;
2597 
2598 	buf_sz = pipe_info->buf_sz;
2599 	/* Unused Copy Engine */
2600 	if (buf_sz == 0)
2601 		return;
2602 
2603 
2604 	hif_state = pipe_info->HIF_CE_state;
2605 	if (!hif_state->started)
2606 		return;
2607 
2608 	scn = HIF_GET_SOFTC(hif_state);
2609 	ce_hdl = pipe_info->ce_hdl;
2610 
2611 	if (!scn->qdf_dev)
2612 		return;
2613 	while (ce_revoke_recv_next
2614 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2615 			&CE_data) == QDF_STATUS_SUCCESS) {
2616 		if (netbuf) {
2617 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2618 					      QDF_DMA_FROM_DEVICE);
2619 			qdf_nbuf_free(netbuf);
2620 		}
2621 	}
2622 }
2623 
2624 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2625 {
2626 	struct CE_handle *ce_hdl;
2627 	struct HIF_CE_state *hif_state;
2628 	struct hif_softc *scn;
2629 	qdf_nbuf_t netbuf;
2630 	void *per_CE_context;
2631 	qdf_dma_addr_t CE_data;
2632 	unsigned int nbytes;
2633 	unsigned int id;
2634 	uint32_t buf_sz;
2635 	uint32_t toeplitz_hash_result;
2636 
2637 	buf_sz = pipe_info->buf_sz;
2638 	if (buf_sz == 0) {
2639 		/* Unused Copy Engine */
2640 		return;
2641 	}
2642 
2643 	hif_state = pipe_info->HIF_CE_state;
2644 	if (!hif_state->started) {
2645 		return;
2646 	}
2647 
2648 	scn = HIF_GET_SOFTC(hif_state);
2649 
2650 	ce_hdl = pipe_info->ce_hdl;
2651 
2652 	while (ce_cancel_send_next
2653 		       (ce_hdl, &per_CE_context,
2654 		       (void **)&netbuf, &CE_data, &nbytes,
2655 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2656 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2657 			/*
2658 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2659 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2660 			 * freed in htt_htc_misc_pkt_pool_free() in
2661 			 * wlantl_close(), so do not free them here again
2662 			 * by checking whether it's the endpoint
2663 			 * which they are queued in.
2664 			 */
2665 			if (id == scn->htc_htt_tx_endpoint)
2666 				return;
2667 			/* Indicate the completion to higher
2668 			 * layer to free the buffer
2669 			 */
2670 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2671 				pipe_info->pipe_callbacks.
2672 				    txCompletionHandler(pipe_info->
2673 					    pipe_callbacks.Context,
2674 					    netbuf, id, toeplitz_hash_result);
2675 		}
2676 	}
2677 }
2678 
2679 /*
2680  * Cleanup residual buffers for device shutdown:
2681  *    buffers that were enqueued for receive
2682  *    buffers that were to be sent
2683  * Note: Buffers that had completed but which were
2684  * not yet processed are on a completion queue. They
2685  * are handled when the completion thread shuts down.
2686  */
2687 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2688 {
2689 	int pipe_num;
2690 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2691 	struct CE_state *ce_state;
2692 
2693 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2694 		struct HIF_CE_pipe_info *pipe_info;
2695 
2696 		ce_state = scn->ce_id_to_state[pipe_num];
2697 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2698 				((ce_state->htt_tx_data) ||
2699 				 (ce_state->htt_rx_data))) {
2700 			continue;
2701 		}
2702 
2703 		pipe_info = &hif_state->pipe_info[pipe_num];
2704 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2705 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2706 	}
2707 }
2708 
2709 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2710 {
2711 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2712 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2713 
2714 	hif_buffer_cleanup(hif_state);
2715 }
2716 
2717 static void hif_destroy_oom_work(struct hif_softc *scn)
2718 {
2719 	struct CE_state *ce_state;
2720 	int ce_id;
2721 
2722 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2723 		ce_state = scn->ce_id_to_state[ce_id];
2724 		if (ce_state)
2725 			qdf_destroy_work(scn->qdf_dev,
2726 					 &ce_state->oom_allocation_work);
2727 	}
2728 }
2729 
2730 void hif_ce_stop(struct hif_softc *scn)
2731 {
2732 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2733 	int pipe_num;
2734 
2735 	/*
2736 	 * before cleaning up any memory, ensure irq &
2737 	 * bottom half contexts will not be re-entered
2738 	 */
2739 	hif_disable_isr(&scn->osc);
2740 	hif_destroy_oom_work(scn);
2741 	scn->hif_init_done = false;
2742 
2743 	/*
2744 	 * At this point, asynchronous threads are stopped,
2745 	 * The Target should not DMA nor interrupt, Host code may
2746 	 * not initiate anything more.  So we just need to clean
2747 	 * up Host-side state.
2748 	 */
2749 
2750 	if (scn->athdiag_procfs_inited) {
2751 		athdiag_procfs_remove();
2752 		scn->athdiag_procfs_inited = false;
2753 	}
2754 
2755 	hif_buffer_cleanup(hif_state);
2756 
2757 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2758 		struct HIF_CE_pipe_info *pipe_info;
2759 		struct CE_attr attr;
2760 		struct CE_handle *ce_diag = hif_state->ce_diag;
2761 
2762 		pipe_info = &hif_state->pipe_info[pipe_num];
2763 		if (pipe_info->ce_hdl) {
2764 			if (pipe_info->ce_hdl != ce_diag) {
2765 				attr = hif_state->host_ce_config[pipe_num];
2766 				if (attr.src_nentries)
2767 					qdf_spinlock_destroy(&pipe_info->
2768 							completion_freeq_lock);
2769 			}
2770 			ce_fini(pipe_info->ce_hdl);
2771 			pipe_info->ce_hdl = NULL;
2772 			pipe_info->buf_sz = 0;
2773 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2774 		}
2775 	}
2776 
2777 	if (hif_state->sleep_timer_init) {
2778 		qdf_timer_stop(&hif_state->sleep_timer);
2779 		qdf_timer_free(&hif_state->sleep_timer);
2780 		hif_state->sleep_timer_init = false;
2781 	}
2782 
2783 	hif_state->started = false;
2784 }
2785 
2786 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2787 				   struct shadow_reg_cfg
2788 				   **target_shadow_reg_cfg_ret,
2789 				   uint32_t *shadow_cfg_sz_ret)
2790 {
2791 	if (target_shadow_reg_cfg_ret)
2792 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2793 	if (shadow_cfg_sz_ret)
2794 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2795 }
2796 
2797 /**
2798  * hif_get_target_ce_config() - get copy engine configuration
2799  * @target_ce_config_ret: basic copy engine configuration
2800  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2801  * @target_service_to_ce_map_ret: service mapping for the copy engines
2802  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2803  * @target_shadow_reg_cfg_ret: shadow register configuration
2804  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2805  *
2806  * providing accessor to these values outside of this file.
2807  * currently these are stored in static pointers to const sections.
2808  * there are multiple configurations that are selected from at compile time.
2809  * Runtime selection would need to consider mode, target type and bus type.
2810  *
2811  * Return: return by parameter.
2812  */
2813 void hif_get_target_ce_config(struct hif_softc *scn,
2814 		struct CE_pipe_config **target_ce_config_ret,
2815 		uint32_t *target_ce_config_sz_ret,
2816 		struct service_to_pipe **target_service_to_ce_map_ret,
2817 		uint32_t *target_service_to_ce_map_sz_ret,
2818 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2819 		uint32_t *shadow_cfg_sz_ret)
2820 {
2821 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2822 
2823 	*target_ce_config_ret = hif_state->target_ce_config;
2824 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2825 
2826 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2827 				       target_service_to_ce_map_sz_ret);
2828 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2829 			       shadow_cfg_sz_ret);
2830 }
2831 
2832 #ifdef CONFIG_SHADOW_V2
2833 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2834 {
2835 	int i;
2836 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2837 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2838 
2839 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2840 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2841 		     "%s: i %d, val %x", __func__, i,
2842 		     cfg->shadow_reg_v2_cfg[i].addr);
2843 	}
2844 }
2845 
2846 #else
2847 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2848 {
2849 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2850 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2851 }
2852 #endif
2853 
2854 #ifdef ADRASTEA_RRI_ON_DDR
2855 /**
2856  * hif_get_src_ring_read_index(): Called to get the SRRI
2857  *
2858  * @scn: hif_softc pointer
2859  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2860  *
2861  * This function returns the SRRI to the caller. For CEs that
2862  * dont have interrupts enabled, we look at the DDR based SRRI
2863  *
2864  * Return: SRRI
2865  */
2866 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2867 		uint32_t CE_ctrl_addr)
2868 {
2869 	struct CE_attr attr;
2870 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2871 
2872 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2873 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2874 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2875 	} else {
2876 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2877 			return A_TARGET_READ(scn,
2878 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2879 		else
2880 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2881 					CE_ctrl_addr);
2882 	}
2883 }
2884 
2885 /**
2886  * hif_get_dst_ring_read_index(): Called to get the DRRI
2887  *
2888  * @scn: hif_softc pointer
2889  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2890  *
2891  * This function returns the DRRI to the caller. For CEs that
2892  * dont have interrupts enabled, we look at the DDR based DRRI
2893  *
2894  * Return: DRRI
2895  */
2896 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2897 		uint32_t CE_ctrl_addr)
2898 {
2899 	struct CE_attr attr;
2900 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2901 
2902 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2903 
2904 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2905 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2906 	} else {
2907 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2908 			return A_TARGET_READ(scn,
2909 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2910 		else
2911 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2912 					CE_ctrl_addr);
2913 	}
2914 }
2915 
2916 /**
2917  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2918  * @scn: hif_softc pointer
2919  *
2920  * Return: qdf status
2921  */
2922 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2923 {
2924 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
2925 
2926 	scn->vaddr_rri_on_ddr =
2927 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2928 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2929 		&paddr_rri_on_ddr);
2930 
2931 	if (!scn->vaddr_rri_on_ddr) {
2932 		hif_err("dmaable page alloc fail");
2933 		return QDF_STATUS_E_NOMEM;
2934 	}
2935 
2936 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2937 
2938 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2939 
2940 	return QDF_STATUS_SUCCESS;
2941 }
2942 #endif
2943 
2944 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2945 /**
2946  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2947  *
2948  * @scn: hif_softc pointer
2949  *
2950  * This function allocates non cached memory on ddr and sends
2951  * the physical address of this memory to the CE hardware. The
2952  * hardware updates the RRI on this particular location.
2953  *
2954  * Return: None
2955  */
2956 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2957 {
2958 	unsigned int i;
2959 	uint32_t high_paddr, low_paddr;
2960 
2961 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2962 		return;
2963 
2964 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
2965 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2966 
2967 	HIF_DBG("%s using srri and drri from DDR", __func__);
2968 
2969 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2970 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2971 
2972 	for (i = 0; i < CE_COUNT; i++)
2973 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2974 }
2975 #else
2976 /**
2977  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2978  *
2979  * @scn: hif_softc pointer
2980  *
2981  * This is a dummy implementation for platforms that don't
2982  * support this functionality.
2983  *
2984  * Return: None
2985  */
2986 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2987 {
2988 }
2989 #endif
2990 
2991 /**
2992  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2993  *                                    QMI command
2994  * @scn: hif context
2995  * @cfg: wlan enable config
2996  *
2997  * In case of Genoa, rri_over_ddr memory configuration is passed
2998  * to firmware through QMI configure command.
2999  */
3000 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
3001 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3002 					   struct pld_wlan_enable_cfg *cfg)
3003 {
3004 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
3005 		return;
3006 
3007 	cfg->rri_over_ddr_cfg_valid = true;
3008 	cfg->rri_over_ddr_cfg.base_addr_low =
3009 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
3010 	cfg->rri_over_ddr_cfg.base_addr_high =
3011 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
3012 }
3013 #else
3014 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
3015 					   struct pld_wlan_enable_cfg *cfg)
3016 {
3017 }
3018 #endif
3019 
3020 /**
3021  * hif_wlan_enable(): call the platform driver to enable wlan
3022  * @scn: HIF Context
3023  *
3024  * This function passes the con_mode and CE configuration to
3025  * platform driver to enable wlan.
3026  *
3027  * Return: linux error code
3028  */
3029 int hif_wlan_enable(struct hif_softc *scn)
3030 {
3031 	struct pld_wlan_enable_cfg cfg;
3032 	enum pld_driver_mode mode;
3033 	uint32_t con_mode = hif_get_conparam(scn);
3034 
3035 	hif_get_target_ce_config(scn,
3036 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
3037 			&cfg.num_ce_tgt_cfg,
3038 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
3039 			&cfg.num_ce_svc_pipe_cfg,
3040 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
3041 			&cfg.num_shadow_reg_cfg);
3042 
3043 	/* translate from structure size to array size */
3044 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
3045 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
3046 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
3047 
3048 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
3049 			      &cfg.num_shadow_reg_v2_cfg);
3050 
3051 	hif_print_hal_shadow_register_cfg(&cfg);
3052 
3053 	hif_update_rri_over_ddr_config(scn, &cfg);
3054 
3055 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3056 		mode = PLD_FTM;
3057 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
3058 		mode = PLD_COLDBOOT_CALIBRATION;
3059 	else if (QDF_GLOBAL_FTM_COLDBOOT_CALIB_MODE == con_mode)
3060 		mode = PLD_FTM_COLDBOOT_CALIBRATION;
3061 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3062 		mode = PLD_EPPING;
3063 	else
3064 		mode = PLD_MISSION;
3065 
3066 	if (BYPASS_QMI)
3067 		return 0;
3068 	else
3069 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg, mode);
3070 }
3071 
3072 #ifdef WLAN_FEATURE_EPPING
3073 
3074 #define CE_EPPING_USES_IRQ true
3075 
3076 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
3077 {
3078 	if (CE_EPPING_USES_IRQ)
3079 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
3080 	else
3081 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3082 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3083 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3084 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3085 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3086 }
3087 #endif
3088 
3089 #ifdef QCN7605_SUPPORT
3090 static inline
3091 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3092 			       struct HIF_CE_state *hif_state)
3093 {
3094 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3095 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3096 	hif_state->target_ce_config_sz =
3097 				 sizeof(target_ce_config_wlan_qcn7605);
3098 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3099 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3100 	scn->ce_count = QCN7605_CE_COUNT;
3101 }
3102 #else
3103 static inline
3104 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3105 			       struct HIF_CE_state *hif_state)
3106 {
3107 	HIF_ERROR("QCN7605 not supported");
3108 }
3109 #endif
3110 
3111 #ifdef CE_SVC_CMN_INIT
3112 #ifdef QCA_WIFI_SUPPORT_SRNG
3113 static inline void hif_ce_service_init(void)
3114 {
3115 	ce_service_srng_init();
3116 }
3117 #else
3118 static inline void hif_ce_service_init(void)
3119 {
3120 	ce_service_legacy_init();
3121 }
3122 #endif
3123 #else
3124 static inline void hif_ce_service_init(void)
3125 {
3126 }
3127 #endif
3128 
3129 
3130 /**
3131  * hif_ce_prepare_config() - load the correct static tables.
3132  * @scn: hif context
3133  *
3134  * Epping uses different static attribute tables than mission mode.
3135  */
3136 void hif_ce_prepare_config(struct hif_softc *scn)
3137 {
3138 	uint32_t mode = hif_get_conparam(scn);
3139 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3140 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3141 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3142 
3143 	hif_ce_service_init();
3144 	hif_state->ce_services = ce_services_attach(scn);
3145 
3146 	scn->ce_count = HOST_CE_COUNT;
3147 	/* if epping is enabled we need to use the epping configuration. */
3148 	if (QDF_IS_EPPING_ENABLED(mode)) {
3149 		hif_ce_prepare_epping_config(hif_state);
3150 		return;
3151 	}
3152 
3153 	switch (tgt_info->target_type) {
3154 	default:
3155 		hif_state->host_ce_config = host_ce_config_wlan;
3156 		hif_state->target_ce_config = target_ce_config_wlan;
3157 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3158 		break;
3159 	case TARGET_TYPE_QCN7605:
3160 		hif_set_ce_config_qcn7605(scn, hif_state);
3161 		break;
3162 	case TARGET_TYPE_AR900B:
3163 	case TARGET_TYPE_QCA9984:
3164 	case TARGET_TYPE_IPQ4019:
3165 	case TARGET_TYPE_QCA9888:
3166 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3167 			hif_state->host_ce_config =
3168 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3169 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3170 			hif_state->host_ce_config =
3171 				host_lowdesc_ce_cfg_wlan_ar900b;
3172 		} else {
3173 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3174 		}
3175 
3176 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3177 		hif_state->target_ce_config_sz =
3178 				sizeof(target_ce_config_wlan_ar900b);
3179 
3180 		break;
3181 
3182 	case TARGET_TYPE_AR9888:
3183 	case TARGET_TYPE_AR9888V2:
3184 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3185 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3186 		} else {
3187 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3188 		}
3189 
3190 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3191 		hif_state->target_ce_config_sz =
3192 					sizeof(target_ce_config_wlan_ar9888);
3193 
3194 		break;
3195 
3196 	case TARGET_TYPE_QCA8074:
3197 	case TARGET_TYPE_QCA8074V2:
3198 	case TARGET_TYPE_QCA6018:
3199 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3200 			hif_state->host_ce_config =
3201 					host_ce_config_wlan_qca8074_pci;
3202 			hif_state->target_ce_config =
3203 				target_ce_config_wlan_qca8074_pci;
3204 			hif_state->target_ce_config_sz =
3205 				sizeof(target_ce_config_wlan_qca8074_pci);
3206 		} else {
3207 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3208 			hif_state->target_ce_config =
3209 					target_ce_config_wlan_qca8074;
3210 			hif_state->target_ce_config_sz =
3211 				sizeof(target_ce_config_wlan_qca8074);
3212 		}
3213 		break;
3214 	case TARGET_TYPE_QCA6290:
3215 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3216 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3217 		hif_state->target_ce_config_sz =
3218 					sizeof(target_ce_config_wlan_qca6290);
3219 
3220 		scn->ce_count = QCA_6290_CE_COUNT;
3221 		break;
3222 	case TARGET_TYPE_QCN9000:
3223 		hif_state->host_ce_config = host_ce_config_wlan_qcn9000;
3224 		hif_state->target_ce_config = target_ce_config_wlan_qcn9000;
3225 		hif_state->target_ce_config_sz =
3226 					sizeof(target_ce_config_wlan_qcn9000);
3227 		scn->ce_count = QCN_9000_CE_COUNT;
3228 		scn->disable_wake_irq = 1;
3229 		break;
3230 	case TARGET_TYPE_QCA6390:
3231 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3232 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3233 		hif_state->target_ce_config_sz =
3234 					sizeof(target_ce_config_wlan_qca6390);
3235 
3236 		scn->ce_count = QCA_6390_CE_COUNT;
3237 		break;
3238 	case TARGET_TYPE_QCA6490:
3239 		hif_state->host_ce_config = host_ce_config_wlan_qca6490;
3240 		hif_state->target_ce_config = target_ce_config_wlan_qca6490;
3241 		hif_state->target_ce_config_sz =
3242 					sizeof(target_ce_config_wlan_qca6490);
3243 
3244 		scn->ce_count = QCA_6490_CE_COUNT;
3245 		break;
3246 	case TARGET_TYPE_QCA6750:
3247 		hif_state->host_ce_config = host_ce_config_wlan_qca6750;
3248 		hif_state->target_ce_config = target_ce_config_wlan_qca6750;
3249 		hif_state->target_ce_config_sz =
3250 					sizeof(target_ce_config_wlan_qca6750);
3251 
3252 		scn->ce_count = QCA_6750_CE_COUNT;
3253 		break;
3254 	case TARGET_TYPE_ADRASTEA:
3255 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3256 			hif_state->host_ce_config =
3257 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3258 			hif_state->target_ce_config =
3259 			       target_lowdesc_ce_config_wlan_adrastea_nopktlog;
3260 			hif_state->target_ce_config_sz =
3261 			sizeof(target_lowdesc_ce_config_wlan_adrastea_nopktlog);
3262 		} else {
3263 			hif_state->host_ce_config =
3264 				host_ce_config_wlan_adrastea;
3265 			hif_state->target_ce_config =
3266 					target_ce_config_wlan_adrastea;
3267 			hif_state->target_ce_config_sz =
3268 					sizeof(target_ce_config_wlan_adrastea);
3269 		}
3270 		break;
3271 
3272 	}
3273 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3274 }
3275 
3276 /**
3277  * hif_ce_open() - do ce specific allocations
3278  * @hif_sc: pointer to hif context
3279  *
3280  * return: 0 for success or QDF_STATUS_E_NOMEM
3281  */
3282 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3283 {
3284 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3285 
3286 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3287 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3288 	return QDF_STATUS_SUCCESS;
3289 }
3290 
3291 /**
3292  * hif_ce_close() - do ce specific free
3293  * @hif_sc: pointer to hif context
3294  */
3295 void hif_ce_close(struct hif_softc *hif_sc)
3296 {
3297 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3298 
3299 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3300 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3301 }
3302 
3303 /**
3304  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3305  * @hif_sc: hif context
3306  *
3307  * uses state variables to support cleaning up when hif_config_ce fails.
3308  */
3309 void hif_unconfig_ce(struct hif_softc *hif_sc)
3310 {
3311 	int pipe_num;
3312 	struct HIF_CE_pipe_info *pipe_info;
3313 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3314 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3315 
3316 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3317 		pipe_info = &hif_state->pipe_info[pipe_num];
3318 		if (pipe_info->ce_hdl) {
3319 			ce_unregister_irq(hif_state, (1 << pipe_num));
3320 		}
3321 	}
3322 	deinit_tasklet_workers(hif_hdl);
3323 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3324 		pipe_info = &hif_state->pipe_info[pipe_num];
3325 		if (pipe_info->ce_hdl) {
3326 			ce_fini(pipe_info->ce_hdl);
3327 			pipe_info->ce_hdl = NULL;
3328 			pipe_info->buf_sz = 0;
3329 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3330 		}
3331 	}
3332 	if (hif_sc->athdiag_procfs_inited) {
3333 		athdiag_procfs_remove();
3334 		hif_sc->athdiag_procfs_inited = false;
3335 	}
3336 }
3337 
3338 #ifdef CONFIG_BYPASS_QMI
3339 #ifdef QCN7605_SUPPORT
3340 /**
3341  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3342  * @scn: pointer to HIF structure
3343  *
3344  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3345  *
3346  * Return: void
3347  */
3348 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3349 {
3350 	void *target_va;
3351 	phys_addr_t target_pa;
3352 	struct ce_info *ce_info_ptr;
3353 	uint32_t msi_data_start;
3354 	uint32_t msi_data_count;
3355 	uint32_t msi_irq_start;
3356 	uint32_t i = 0;
3357 	int ret;
3358 
3359 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3360 					     scn->qdf_dev->dev,
3361 					     FW_SHARED_MEM +
3362 					     sizeof(struct ce_info),
3363 					     &target_pa);
3364 	if (!target_va)
3365 		return;
3366 
3367 	ce_info_ptr = (struct ce_info *)target_va;
3368 
3369 	if (scn->vaddr_rri_on_ddr) {
3370 		ce_info_ptr->rri_over_ddr_low_paddr  =
3371 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3372 		ce_info_ptr->rri_over_ddr_high_paddr =
3373 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3374 	}
3375 
3376 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3377 					  &msi_data_count, &msi_data_start,
3378 					  &msi_irq_start);
3379 	if (ret) {
3380 		hif_err("Failed to get CE msi config");
3381 		return;
3382 	}
3383 
3384 	for (i = 0; i < CE_COUNT_MAX; i++) {
3385 		ce_info_ptr->cfg[i].ce_id = i;
3386 		ce_info_ptr->cfg[i].msi_vector =
3387 			 (i % msi_data_count) + msi_irq_start;
3388 	}
3389 
3390 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3391 	hif_info("target va %pK target pa %pa", target_va, &target_pa);
3392 }
3393 #else
3394 /**
3395  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3396  * @scn: pointer to HIF structure
3397  *
3398  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3399  *
3400  * Return: void
3401  */
3402 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3403 {
3404 	void *target_va;
3405 	phys_addr_t target_pa;
3406 
3407 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3408 				FW_SHARED_MEM, &target_pa);
3409 	if (!target_va) {
3410 		HIF_TRACE("Memory allocation failed could not post target buf");
3411 		return;
3412 	}
3413 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3414 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
3415 }
3416 #endif
3417 
3418 #else
3419 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3420 {
3421 }
3422 #endif
3423 
3424 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3425 				bool wait_for_it)
3426 {
3427 	/* todo */
3428 	return 0;
3429 }
3430 
3431 /**
3432  * hif_config_ce() - configure copy engines
3433  * @scn: hif context
3434  *
3435  * Prepares fw, copy engine hardware and host sw according
3436  * to the attributes selected by hif_ce_prepare_config.
3437  *
3438  * also calls athdiag_procfs_init
3439  *
3440  * return: 0 for success nonzero for failure.
3441  */
3442 int hif_config_ce(struct hif_softc *scn)
3443 {
3444 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3445 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3446 	struct HIF_CE_pipe_info *pipe_info;
3447 	int pipe_num;
3448 	struct CE_state *ce_state = NULL;
3449 
3450 #ifdef ADRASTEA_SHADOW_REGISTERS
3451 	int i;
3452 #endif
3453 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3454 
3455 	scn->notice_send = true;
3456 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3457 
3458 	hif_post_static_buf_to_target(scn);
3459 
3460 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3461 
3462 	hif_config_rri_on_ddr(scn);
3463 
3464 	if (ce_srng_based(scn))
3465 		scn->bus_ops.hif_target_sleep_state_adjust =
3466 			&hif_srng_sleep_state_adjust;
3467 
3468 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3469 	 * index. Disable data storing
3470 	 */
3471 	reset_ce_debug_history(scn);
3472 
3473 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3474 		struct CE_attr *attr;
3475 
3476 		pipe_info = &hif_state->pipe_info[pipe_num];
3477 		pipe_info->pipe_num = pipe_num;
3478 		pipe_info->HIF_CE_state = hif_state;
3479 		attr = &hif_state->host_ce_config[pipe_num];
3480 
3481 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3482 		ce_state = scn->ce_id_to_state[pipe_num];
3483 		if (!ce_state) {
3484 			A_TARGET_ACCESS_UNLIKELY(scn);
3485 			goto err;
3486 		}
3487 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3488 		QDF_ASSERT(pipe_info->ce_hdl);
3489 		if (!pipe_info->ce_hdl) {
3490 			rv = QDF_STATUS_E_FAILURE;
3491 			A_TARGET_ACCESS_UNLIKELY(scn);
3492 			goto err;
3493 		}
3494 
3495 		ce_state->lro_data = qdf_lro_init();
3496 
3497 		if (attr->flags & CE_ATTR_DIAG) {
3498 			/* Reserve the ultimate CE for
3499 			 * Diagnostic Window support
3500 			 */
3501 			hif_state->ce_diag = pipe_info->ce_hdl;
3502 			continue;
3503 		}
3504 
3505 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3506 				(ce_state->htt_rx_data))
3507 			continue;
3508 
3509 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3510 		if (attr->dest_nentries > 0) {
3511 			atomic_set(&pipe_info->recv_bufs_needed,
3512 				   init_buffer_count(attr->dest_nentries - 1));
3513 			/*SRNG based CE has one entry less */
3514 			if (ce_srng_based(scn))
3515 				atomic_dec(&pipe_info->recv_bufs_needed);
3516 		} else {
3517 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3518 		}
3519 		ce_tasklet_init(hif_state, (1 << pipe_num));
3520 		ce_register_irq(hif_state, (1 << pipe_num));
3521 	}
3522 
3523 	if (athdiag_procfs_init(scn) != 0) {
3524 		A_TARGET_ACCESS_UNLIKELY(scn);
3525 		goto err;
3526 	}
3527 	scn->athdiag_procfs_inited = true;
3528 
3529 	HIF_DBG("%s: ce_init done", __func__);
3530 
3531 	init_tasklet_workers(hif_hdl);
3532 
3533 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3534 
3535 #ifdef ADRASTEA_SHADOW_REGISTERS
3536 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3537 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3538 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3539 			  __func__, i,
3540 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3541 	}
3542 #endif
3543 
3544 	return rv != QDF_STATUS_SUCCESS;
3545 
3546 err:
3547 	/* Failure, so clean up */
3548 	hif_unconfig_ce(scn);
3549 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3550 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3551 }
3552 
3553 #ifdef IPA_OFFLOAD
3554 /**
3555  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3556  * @scn: bus context
3557  * @ce_sr_base_paddr: copyengine source ring base physical address
3558  * @ce_sr_ring_size: copyengine source ring size
3559  * @ce_reg_paddr: copyengine register physical address
3560  *
3561  * IPA micro controller data path offload feature enabled,
3562  * HIF should release copy engine related resource information to IPA UC
3563  * IPA UC will access hardware resource with released information
3564  *
3565  * Return: None
3566  */
3567 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3568 			     qdf_shared_mem_t **ce_sr,
3569 			     uint32_t *ce_sr_ring_size,
3570 			     qdf_dma_addr_t *ce_reg_paddr)
3571 {
3572 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3573 	struct HIF_CE_pipe_info *pipe_info =
3574 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3575 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3576 
3577 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3578 			    ce_reg_paddr);
3579 }
3580 #endif /* IPA_OFFLOAD */
3581 
3582 
3583 #ifdef ADRASTEA_SHADOW_REGISTERS
3584 
3585 /*
3586  * Current shadow register config
3587  *
3588  * -----------------------------------------------------------
3589  * Shadow Register      |     CE   |    src/dst write index
3590  * -----------------------------------------------------------
3591  *         0            |     0    |           src
3592  *         1     No Config - Doesn't point to anything
3593  *         2     No Config - Doesn't point to anything
3594  *         3            |     3    |           src
3595  *         4            |     4    |           src
3596  *         5            |     5    |           src
3597  *         6     No Config - Doesn't point to anything
3598  *         7            |     7    |           src
3599  *         8     No Config - Doesn't point to anything
3600  *         9     No Config - Doesn't point to anything
3601  *         10    No Config - Doesn't point to anything
3602  *         11    No Config - Doesn't point to anything
3603  * -----------------------------------------------------------
3604  *         12    No Config - Doesn't point to anything
3605  *         13           |     1    |           dst
3606  *         14           |     2    |           dst
3607  *         15    No Config - Doesn't point to anything
3608  *         16    No Config - Doesn't point to anything
3609  *         17    No Config - Doesn't point to anything
3610  *         18    No Config - Doesn't point to anything
3611  *         19           |     7    |           dst
3612  *         20           |     8    |           dst
3613  *         21    No Config - Doesn't point to anything
3614  *         22    No Config - Doesn't point to anything
3615  *         23    No Config - Doesn't point to anything
3616  * -----------------------------------------------------------
3617  *
3618  *
3619  * ToDo - Move shadow register config to following in the future
3620  * This helps free up a block of shadow registers towards the end.
3621  * Can be used for other purposes
3622  *
3623  * -----------------------------------------------------------
3624  * Shadow Register      |     CE   |    src/dst write index
3625  * -----------------------------------------------------------
3626  *      0            |     0    |           src
3627  *      1            |     3    |           src
3628  *      2            |     4    |           src
3629  *      3            |     5    |           src
3630  *      4            |     7    |           src
3631  * -----------------------------------------------------------
3632  *      5            |     1    |           dst
3633  *      6            |     2    |           dst
3634  *      7            |     7    |           dst
3635  *      8            |     8    |           dst
3636  * -----------------------------------------------------------
3637  *      9     No Config - Doesn't point to anything
3638  *      12    No Config - Doesn't point to anything
3639  *      13    No Config - Doesn't point to anything
3640  *      14    No Config - Doesn't point to anything
3641  *      15    No Config - Doesn't point to anything
3642  *      16    No Config - Doesn't point to anything
3643  *      17    No Config - Doesn't point to anything
3644  *      18    No Config - Doesn't point to anything
3645  *      19    No Config - Doesn't point to anything
3646  *      20    No Config - Doesn't point to anything
3647  *      21    No Config - Doesn't point to anything
3648  *      22    No Config - Doesn't point to anything
3649  *      23    No Config - Doesn't point to anything
3650  * -----------------------------------------------------------
3651 */
3652 #ifndef QCN7605_SUPPORT
3653 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3654 {
3655 	u32 addr = 0;
3656 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3657 
3658 	switch (ce) {
3659 	case 0:
3660 		addr = SHADOW_VALUE0;
3661 		break;
3662 	case 3:
3663 		addr = SHADOW_VALUE3;
3664 		break;
3665 	case 4:
3666 		addr = SHADOW_VALUE4;
3667 		break;
3668 	case 5:
3669 		addr = SHADOW_VALUE5;
3670 		break;
3671 	case 7:
3672 		addr = SHADOW_VALUE7;
3673 		break;
3674 	default:
3675 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3676 		QDF_ASSERT(0);
3677 	}
3678 	return addr;
3679 
3680 }
3681 
3682 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3683 {
3684 	u32 addr = 0;
3685 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3686 
3687 	switch (ce) {
3688 	case 1:
3689 		addr = SHADOW_VALUE13;
3690 		break;
3691 	case 2:
3692 		addr = SHADOW_VALUE14;
3693 		break;
3694 	case 5:
3695 		addr = SHADOW_VALUE17;
3696 		break;
3697 	case 7:
3698 		addr = SHADOW_VALUE19;
3699 		break;
3700 	case 8:
3701 		addr = SHADOW_VALUE20;
3702 		break;
3703 	case 9:
3704 		addr = SHADOW_VALUE21;
3705 		break;
3706 	case 10:
3707 		addr = SHADOW_VALUE22;
3708 		break;
3709 	case 11:
3710 		addr = SHADOW_VALUE23;
3711 		break;
3712 	default:
3713 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3714 		QDF_ASSERT(0);
3715 	}
3716 
3717 	return addr;
3718 
3719 }
3720 #else
3721 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3722 {
3723 	u32 addr = 0;
3724 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3725 
3726 	switch (ce) {
3727 	case 0:
3728 		addr = SHADOW_VALUE0;
3729 		break;
3730 	case 4:
3731 		addr = SHADOW_VALUE4;
3732 		break;
3733 	case 5:
3734 		addr = SHADOW_VALUE5;
3735 		break;
3736 	default:
3737 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3738 		QDF_ASSERT(0);
3739 	}
3740 	return addr;
3741 }
3742 
3743 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3744 {
3745 	u32 addr = 0;
3746 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3747 
3748 	switch (ce) {
3749 	case 1:
3750 		addr = SHADOW_VALUE13;
3751 		break;
3752 	case 2:
3753 		addr = SHADOW_VALUE14;
3754 		break;
3755 	case 3:
3756 		addr = SHADOW_VALUE15;
3757 		break;
3758 	case 5:
3759 		addr = SHADOW_VALUE17;
3760 		break;
3761 	case 7:
3762 		addr = SHADOW_VALUE19;
3763 		break;
3764 	case 8:
3765 		addr = SHADOW_VALUE20;
3766 		break;
3767 	case 9:
3768 		addr = SHADOW_VALUE21;
3769 		break;
3770 	case 10:
3771 		addr = SHADOW_VALUE22;
3772 		break;
3773 	case 11:
3774 		addr = SHADOW_VALUE23;
3775 		break;
3776 	default:
3777 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3778 		QDF_ASSERT(0);
3779 	}
3780 
3781 	return addr;
3782 }
3783 #endif
3784 #endif
3785 
3786 #if defined(FEATURE_LRO)
3787 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3788 {
3789 	struct CE_state *ce_state;
3790 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3791 
3792 	ce_state = scn->ce_id_to_state[ctx_id];
3793 
3794 	return ce_state->lro_data;
3795 }
3796 #endif
3797 
3798 /**
3799  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3800  * this service
3801  * @scn: hif_softc pointer.
3802  * @svc_id: Service ID for which the mapping is needed.
3803  * @ul_pipe: address of the container in which ul pipe is returned.
3804  * @dl_pipe: address of the container in which dl pipe is returned.
3805  * @ul_is_polled: address of the container in which a bool
3806  *			indicating if the UL CE for this service
3807  *			is polled is returned.
3808  * @dl_is_polled: address of the container in which a bool
3809  *			indicating if the DL CE for this service
3810  *			is polled is returned.
3811  *
3812  * Return: Indicates whether the service has been found in the table.
3813  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3814  *         There will be warning logs if either leg has not been updated
3815  *         because it missed the entry in the table (but this is not an err).
3816  */
3817 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3818 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3819 			int *dl_is_polled)
3820 {
3821 	int status = QDF_STATUS_E_INVAL;
3822 	unsigned int i;
3823 	struct service_to_pipe element;
3824 	struct service_to_pipe *tgt_svc_map_to_use;
3825 	uint32_t sz_tgt_svc_map_to_use;
3826 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3827 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3828 	bool dl_updated = false;
3829 	bool ul_updated = false;
3830 
3831 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3832 				       &sz_tgt_svc_map_to_use);
3833 
3834 	*dl_is_polled = 0;  /* polling for received messages not supported */
3835 
3836 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3837 
3838 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3839 		if (element.service_id == svc_id) {
3840 			if (element.pipedir == PIPEDIR_OUT) {
3841 				*ul_pipe = element.pipenum;
3842 				*ul_is_polled =
3843 					(hif_state->host_ce_config[*ul_pipe].flags &
3844 					 CE_ATTR_DISABLE_INTR) != 0;
3845 				ul_updated = true;
3846 			} else if (element.pipedir == PIPEDIR_IN) {
3847 				*dl_pipe = element.pipenum;
3848 				dl_updated = true;
3849 			}
3850 			status = QDF_STATUS_SUCCESS;
3851 		}
3852 	}
3853 	if (ul_updated == false)
3854 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3855 	if (dl_updated == false)
3856 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3857 
3858 	return status;
3859 }
3860 
3861 #ifdef SHADOW_REG_DEBUG
3862 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3863 		uint32_t CE_ctrl_addr)
3864 {
3865 	uint32_t read_from_hw, srri_from_ddr = 0;
3866 
3867 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3868 
3869 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3870 
3871 	if (read_from_hw != srri_from_ddr) {
3872 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3873 		       __func__, srri_from_ddr, read_from_hw,
3874 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3875 		QDF_ASSERT(0);
3876 	}
3877 	return srri_from_ddr;
3878 }
3879 
3880 
3881 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3882 		uint32_t CE_ctrl_addr)
3883 {
3884 	uint32_t read_from_hw, drri_from_ddr = 0;
3885 
3886 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3887 
3888 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3889 
3890 	if (read_from_hw != drri_from_ddr) {
3891 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3892 		       drri_from_ddr, read_from_hw,
3893 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3894 		QDF_ASSERT(0);
3895 	}
3896 	return drri_from_ddr;
3897 }
3898 
3899 #endif
3900 
3901 /**
3902  * hif_dump_ce_registers() - dump ce registers
3903  * @scn: hif_opaque_softc pointer.
3904  *
3905  * Output the copy engine registers
3906  *
3907  * Return: 0 for success or error code
3908  */
3909 int hif_dump_ce_registers(struct hif_softc *scn)
3910 {
3911 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3912 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3913 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3914 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3915 	uint16_t i;
3916 	QDF_STATUS status;
3917 
3918 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3919 		if (!scn->ce_id_to_state[i]) {
3920 			HIF_DBG("CE%d not used.", i);
3921 			continue;
3922 		}
3923 
3924 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3925 					   (uint8_t *) &ce_reg_values[0],
3926 					   ce_reg_word_size * sizeof(uint32_t));
3927 
3928 		if (status != QDF_STATUS_SUCCESS) {
3929 			HIF_ERROR("Dumping CE register failed!");
3930 			return -EACCES;
3931 		}
3932 		HIF_ERROR("CE%d=>\n", i);
3933 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3934 				   (uint8_t *) &ce_reg_values[0],
3935 				   ce_reg_word_size * sizeof(uint32_t));
3936 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3937 				+ SR_WR_INDEX_ADDRESS),
3938 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3939 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3940 				+ CURRENT_SRRI_ADDRESS),
3941 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3942 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3943 				+ DST_WR_INDEX_ADDRESS),
3944 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3945 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3946 				+ CURRENT_DRRI_ADDRESS),
3947 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3948 		qdf_print("---");
3949 	}
3950 	return 0;
3951 }
3952 qdf_export_symbol(hif_dump_ce_registers);
3953 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3954 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3955 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3956 {
3957 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3958 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3959 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3960 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3961 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3962 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3963 	struct CE_ring_state *src_ring = ce_state->src_ring;
3964 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3965 
3966 	if (src_ring) {
3967 		hif_info->ul_pipe.nentries = src_ring->nentries;
3968 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3969 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3970 		hif_info->ul_pipe.write_index = src_ring->write_index;
3971 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3972 		hif_info->ul_pipe.base_addr_CE_space =
3973 			src_ring->base_addr_CE_space;
3974 		hif_info->ul_pipe.base_addr_owner_space =
3975 			src_ring->base_addr_owner_space;
3976 	}
3977 
3978 
3979 	if (dest_ring) {
3980 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3981 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3982 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3983 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3984 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3985 		hif_info->dl_pipe.base_addr_CE_space =
3986 			dest_ring->base_addr_CE_space;
3987 		hif_info->dl_pipe.base_addr_owner_space =
3988 			dest_ring->base_addr_owner_space;
3989 	}
3990 
3991 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3992 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3993 
3994 	return hif_info;
3995 }
3996 qdf_export_symbol(hif_get_addl_pipe_info);
3997 
3998 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3999 {
4000 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4001 
4002 	scn->nss_wifi_ol_mode = mode;
4003 	return 0;
4004 }
4005 qdf_export_symbol(hif_set_nss_wifiol_mode);
4006 #endif
4007 
4008 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
4009 {
4010 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4011 	scn->hif_attribute = hif_attrib;
4012 }
4013 
4014 
4015 /* disable interrupts (only applicable for legacy copy engine currently */
4016 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
4017 {
4018 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
4019 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
4020 	uint32_t ctrl_addr = CE_state->ctrl_addr;
4021 
4022 	Q_TARGET_ACCESS_BEGIN(scn);
4023 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
4024 	Q_TARGET_ACCESS_END(scn);
4025 }
4026 qdf_export_symbol(hif_disable_interrupt);
4027 
4028 /**
4029  * hif_fw_event_handler() - hif fw event handler
4030  * @hif_state: pointer to hif ce state structure
4031  *
4032  * Process fw events and raise HTC callback to process fw events.
4033  *
4034  * Return: none
4035  */
4036 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
4037 {
4038 	struct hif_msg_callbacks *msg_callbacks =
4039 		&hif_state->msg_callbacks_current;
4040 
4041 	if (!msg_callbacks->fwEventHandler)
4042 		return;
4043 
4044 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
4045 			QDF_STATUS_E_FAILURE);
4046 }
4047 
4048 #ifndef QCA_WIFI_3_0
4049 /**
4050  * hif_fw_interrupt_handler() - FW interrupt handler
4051  * @irq: irq number
4052  * @arg: the user pointer
4053  *
4054  * Called from the PCI interrupt handler when a
4055  * firmware-generated interrupt to the Host.
4056  *
4057  * only registered for legacy ce devices
4058  *
4059  * Return: status of handled irq
4060  */
4061 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4062 {
4063 	struct hif_softc *scn = arg;
4064 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
4065 	uint32_t fw_indicator_address, fw_indicator;
4066 
4067 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
4068 		return ATH_ISR_NOSCHED;
4069 
4070 	fw_indicator_address = hif_state->fw_indicator_address;
4071 	/* For sudden unplug this will return ~0 */
4072 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
4073 
4074 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
4075 		/* ACK: clear Target-side pending event */
4076 		A_TARGET_WRITE(scn, fw_indicator_address,
4077 			       fw_indicator & ~FW_IND_EVENT_PENDING);
4078 		if (Q_TARGET_ACCESS_END(scn) < 0)
4079 			return ATH_ISR_SCHED;
4080 
4081 		if (hif_state->started) {
4082 			hif_fw_event_handler(hif_state);
4083 		} else {
4084 			/*
4085 			 * Probable Target failure before we're prepared
4086 			 * to handle it.  Generally unexpected.
4087 			 * fw_indicator used as bitmap, and defined as below:
4088 			 *     FW_IND_EVENT_PENDING    0x1
4089 			 *     FW_IND_INITIALIZED      0x2
4090 			 *     FW_IND_NEEDRECOVER      0x4
4091 			 */
4092 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
4093 				("%s: Early firmware event indicated 0x%x\n",
4094 				 __func__, fw_indicator));
4095 		}
4096 	} else {
4097 		if (Q_TARGET_ACCESS_END(scn) < 0)
4098 			return ATH_ISR_SCHED;
4099 	}
4100 
4101 	return ATH_ISR_SCHED;
4102 }
4103 #else
4104 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
4105 {
4106 	return ATH_ISR_SCHED;
4107 }
4108 #endif /* #ifdef QCA_WIFI_3_0 */
4109 
4110 
4111 /**
4112  * hif_wlan_disable(): call the platform driver to disable wlan
4113  * @scn: HIF Context
4114  *
4115  * This function passes the con_mode to platform driver to disable
4116  * wlan.
4117  *
4118  * Return: void
4119  */
4120 void hif_wlan_disable(struct hif_softc *scn)
4121 {
4122 	enum pld_driver_mode mode;
4123 	uint32_t con_mode = hif_get_conparam(scn);
4124 
4125 	if (scn->target_status == TARGET_STATUS_RESET)
4126 		return;
4127 
4128 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4129 		mode = PLD_FTM;
4130 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4131 		mode = PLD_EPPING;
4132 	else
4133 		mode = PLD_MISSION;
4134 
4135 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4136 }
4137 
4138 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4139 {
4140 	QDF_STATUS status;
4141 	uint8_t ul_pipe, dl_pipe;
4142 	int ul_is_polled, dl_is_polled;
4143 
4144 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4145 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4146 					 HTC_CTRL_RSVD_SVC,
4147 					 &ul_pipe, &dl_pipe,
4148 					 &ul_is_polled, &dl_is_polled);
4149 	if (status) {
4150 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4151 		return qdf_status_to_os_return(status);
4152 	}
4153 
4154 	*ce_id = dl_pipe;
4155 
4156 	return 0;
4157 }
4158