xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 6ecd284e5a94a1c96e26d571dd47419ac305990d)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 #include "targcfg.h"
28 #include "qdf_lock.h"
29 #include "qdf_status.h"
30 #include "qdf_status.h"
31 #include <qdf_atomic.h>         /* qdf_atomic_read */
32 #include <targaddrs.h>
33 #include "hif_io32.h"
34 #include <hif.h>
35 #include <target_type.h>
36 #include "regtable.h"
37 #define ATH_MODULE_NAME hif
38 #include <a_debug.h>
39 #include "hif_main.h"
40 #include "ce_api.h"
41 #include "qdf_trace.h"
42 #include "pld_common.h"
43 #include "hif_debug.h"
44 #include "ce_internal.h"
45 #include "ce_reg.h"
46 #include "ce_assignment.h"
47 #include "ce_tasklet.h"
48 #ifndef CONFIG_WIN
49 #include "qwlan_version.h"
50 #endif
51 #include "qdf_module.h"
52 
53 #define CE_POLL_TIMEOUT 10      /* ms */
54 
55 #define AGC_DUMP         1
56 #define CHANINFO_DUMP    2
57 #define BB_WATCHDOG_DUMP 3
58 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
59 #define PCIE_ACCESS_DUMP 4
60 #endif
61 #include "mp_dev.h"
62 
63 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
64 	!defined(QCA_WIFI_SUPPORT_SRNG)
65 #define QCA_WIFI_SUPPORT_SRNG
66 #endif
67 
68 /* Forward references */
69 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
70 
71 /*
72  * Fix EV118783, poll to check whether a BMI response comes
73  * other than waiting for the interruption which may be lost.
74  */
75 /* #define BMI_RSP_POLLING */
76 #define BMI_RSP_TO_MILLISEC  1000
77 
78 #ifdef CONFIG_BYPASS_QMI
79 #define BYPASS_QMI 1
80 #else
81 #define BYPASS_QMI 0
82 #endif
83 
84 #ifdef CONFIG_WIN
85 #if ENABLE_10_4_FW_HDR
86 #define WDI_IPA_SERVICE_GROUP 5
87 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
88 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
89 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
90 #endif /* ENABLE_10_4_FW_HDR */
91 #endif
92 
93 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
94 static void hif_config_rri_on_ddr(struct hif_softc *scn);
95 
96 /**
97  * hif_target_access_log_dump() - dump access log
98  *
99  * dump access log
100  *
101  * Return: n/a
102  */
103 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
104 static void hif_target_access_log_dump(void)
105 {
106 	hif_target_dump_access_log();
107 }
108 #endif
109 
110 
111 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
112 		      uint8_t cmd_id, bool start)
113 {
114 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
115 
116 	switch (cmd_id) {
117 	case AGC_DUMP:
118 		if (start)
119 			priv_start_agc(scn);
120 		else
121 			priv_dump_agc(scn);
122 		break;
123 	case CHANINFO_DUMP:
124 		if (start)
125 			priv_start_cap_chaninfo(scn);
126 		else
127 			priv_dump_chaninfo(scn);
128 		break;
129 	case BB_WATCHDOG_DUMP:
130 		priv_dump_bbwatchdog(scn);
131 		break;
132 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
133 	case PCIE_ACCESS_DUMP:
134 		hif_target_access_log_dump();
135 		break;
136 #endif
137 	default:
138 		HIF_ERROR("%s: Invalid htc dump command", __func__);
139 		break;
140 	}
141 }
142 
143 static void ce_poll_timeout(void *arg)
144 {
145 	struct CE_state *CE_state = (struct CE_state *)arg;
146 
147 	if (CE_state->timer_inited) {
148 		ce_per_engine_service(CE_state->scn, CE_state->id);
149 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
150 	}
151 }
152 
153 static unsigned int roundup_pwr2(unsigned int n)
154 {
155 	int i;
156 	unsigned int test_pwr2;
157 
158 	if (!(n & (n - 1)))
159 		return n; /* already a power of 2 */
160 
161 	test_pwr2 = 4;
162 	for (i = 0; i < 29; i++) {
163 		if (test_pwr2 > n)
164 			return test_pwr2;
165 		test_pwr2 = test_pwr2 << 1;
166 	}
167 
168 	QDF_ASSERT(0); /* n too large */
169 	return 0;
170 }
171 
172 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
173 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
174 
175 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
176 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
179 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
180 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
181 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
182 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
183 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
184 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
185 #ifdef QCA_WIFI_3_0_ADRASTEA
186 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
187 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
188 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
189 #endif
190 };
191 
192 #ifdef WLAN_FEATURE_EPPING
193 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
194 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
195 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
196 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
197 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
198 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
199 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
200 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
201 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
202 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
203 };
204 #endif
205 
206 /* CE_PCI TABLE */
207 /*
208  * NOTE: the table below is out of date, though still a useful reference.
209  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
210  * mapping of HTC services to HIF pipes.
211  */
212 /*
213  * This authoritative table defines Copy Engine configuration and the mapping
214  * of services/endpoints to CEs.  A subset of this information is passed to
215  * the Target during startup as a prerequisite to entering BMI phase.
216  * See:
217  *    target_service_to_ce_map - Target-side mapping
218  *    hif_map_service_to_pipe      - Host-side mapping
219  *    target_ce_config         - Target-side configuration
220  *    host_ce_config           - Host-side configuration
221    ============================================================================
222    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
223  |                      |      | ctio | Size     | Frequency
224  |                      |      | n    |          |
225    ============================================================================
226    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
227    descriptor |                      |      |      | O(100B)  | and regular
228    download   |                      |      |      |          |
229    ----------------------------------------------------------------------------
230    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
231    indication |                      |      |      | O(10B)   | regular
232    upload     |                      |      |      |          |
233    ----------------------------------------------------------------------------
234    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
235    upload     |                      |      |      | O(1000B) | (frequent
236    e.g. noise |                      |      |      |          | during IP1.0
237    packets    |                      |      |      |          | testing)
238    ----------------------------------------------------------------------------
239    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
240    download   |                      |      |      | O(1000B) | (frequent
241    e.g.       |                      |      |      |          | during IP1.0
242    misdirecte |                      |      |      |          | testing)
243    d EAPOL    |                      |      |      |          |
244    packets    |                      |      |      |          |
245    ----------------------------------------------------------------------------
246    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
247  | DATA_VO (uplink)     |      |      |          |
248    ----------------------------------------------------------------------------
249    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
250  | DATA_VO (downlink)   |      |      |          |
251    ----------------------------------------------------------------------------
252    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
253  |                      |      |      | O(100B)  |
254    ----------------------------------------------------------------------------
255    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
256    messages   | (downlink)           |      |      | O(100B)  |
257  |                      |      |      |          |
258    ----------------------------------------------------------------------------
259    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
260  | HTC_RAW_STREAMS      |      |      |          |
261  | (uplink)             |      |      |          |
262    ----------------------------------------------------------------------------
263    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
264  | HTC_RAW_STREAMS      |      |      |          |
265  | (downlink)           |      |      |          |
266    ----------------------------------------------------------------------------
267    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
268  |                      |      |      |          | infrequent
269    ============================================================================
270  */
271 
272 /*
273  * Map from service/endpoint to Copy Engine.
274  * This table is derived from the CE_PCI TABLE, above.
275  * It is passed to the Target at startup for use by firmware.
276  */
277 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
278 	{
279 		WMI_DATA_VO_SVC,
280 		PIPEDIR_OUT,    /* out = UL = host -> target */
281 		3,
282 	},
283 	{
284 		WMI_DATA_VO_SVC,
285 		PIPEDIR_IN,     /* in = DL = target -> host */
286 		2,
287 	},
288 	{
289 		WMI_DATA_BK_SVC,
290 		PIPEDIR_OUT,    /* out = UL = host -> target */
291 		3,
292 	},
293 	{
294 		WMI_DATA_BK_SVC,
295 		PIPEDIR_IN,     /* in = DL = target -> host */
296 		2,
297 	},
298 	{
299 		WMI_DATA_BE_SVC,
300 		PIPEDIR_OUT,    /* out = UL = host -> target */
301 		3,
302 	},
303 	{
304 		WMI_DATA_BE_SVC,
305 		PIPEDIR_IN,     /* in = DL = target -> host */
306 		2,
307 	},
308 	{
309 		WMI_DATA_VI_SVC,
310 		PIPEDIR_OUT,    /* out = UL = host -> target */
311 		3,
312 	},
313 	{
314 		WMI_DATA_VI_SVC,
315 		PIPEDIR_IN,     /* in = DL = target -> host */
316 		2,
317 	},
318 	{
319 		WMI_CONTROL_SVC,
320 		PIPEDIR_OUT,    /* out = UL = host -> target */
321 		3,
322 	},
323 	{
324 		WMI_CONTROL_SVC,
325 		PIPEDIR_IN,     /* in = DL = target -> host */
326 		2,
327 	},
328 	{
329 		HTC_CTRL_RSVD_SVC,
330 		PIPEDIR_OUT,    /* out = UL = host -> target */
331 		0,              /* could be moved to 3 (share with WMI) */
332 	},
333 	{
334 		HTC_CTRL_RSVD_SVC,
335 		PIPEDIR_IN,     /* in = DL = target -> host */
336 		2,
337 	},
338 	{
339 		HTC_RAW_STREAMS_SVC, /* not currently used */
340 		PIPEDIR_OUT,    /* out = UL = host -> target */
341 		0,
342 	},
343 	{
344 		HTC_RAW_STREAMS_SVC, /* not currently used */
345 		PIPEDIR_IN,     /* in = DL = target -> host */
346 		2,
347 	},
348 	{
349 		HTT_DATA_MSG_SVC,
350 		PIPEDIR_OUT,    /* out = UL = host -> target */
351 		4,
352 	},
353 	{
354 		HTT_DATA_MSG_SVC,
355 		PIPEDIR_IN,     /* in = DL = target -> host */
356 		1,
357 	},
358 	{
359 		WDI_IPA_TX_SVC,
360 		PIPEDIR_OUT,    /* in = DL = target -> host */
361 		5,
362 	},
363 #if defined(QCA_WIFI_3_0_ADRASTEA)
364 	{
365 		HTT_DATA2_MSG_SVC,
366 		PIPEDIR_IN,    /* in = DL = target -> host */
367 		9,
368 	},
369 	{
370 		HTT_DATA3_MSG_SVC,
371 		PIPEDIR_IN,    /* in = DL = target -> host */
372 		10,
373 	},
374 	{
375 		PACKET_LOG_SVC,
376 		PIPEDIR_IN,    /* in = DL = target -> host */
377 		11,
378 	},
379 #endif
380 	/* (Additions here) */
381 
382 	{                       /* Must be last */
383 		0,
384 		0,
385 		0,
386 	},
387 };
388 
389 /* PIPEDIR_OUT = HOST to Target */
390 /* PIPEDIR_IN  = TARGET to HOST */
391 #if (defined(QCA_WIFI_QCA8074))
392 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
393 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
394 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
395 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
396 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
397 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
398 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
399 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
400 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
401 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
402 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
403 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
404 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
405 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
406 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
407 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
408 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
409 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
410 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
411 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
412 	/* (Additions here) */
413 	{ 0, 0, 0, },
414 };
415 #else
416 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
417 };
418 #endif
419 
420 #if (defined(QCA_WIFI_QCA6290))
421 #ifdef CONFIG_WIN
422 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
423 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
424 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
425 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
426 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
427 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
428 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
429 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
430 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
431 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
432 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
433 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
434 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
435 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
436 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
437 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
440 	/* (Additions here) */
441 	{ 0, 0, 0, },
442 };
443 #else
444 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
445 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
446 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
447 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
448 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
449 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
450 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
451 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
452 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
453 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
454 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
455 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
456 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
457 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
458 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
459 	/* (Additions here) */
460 	{ 0, 0, 0, },
461 };
462 #endif
463 #else
464 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
465 };
466 #endif
467 
468 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
469 	{
470 		WMI_DATA_VO_SVC,
471 		PIPEDIR_OUT,    /* out = UL = host -> target */
472 		3,
473 	},
474 	{
475 		WMI_DATA_VO_SVC,
476 		PIPEDIR_IN,     /* in = DL = target -> host */
477 		2,
478 	},
479 	{
480 		WMI_DATA_BK_SVC,
481 		PIPEDIR_OUT,    /* out = UL = host -> target */
482 		3,
483 	},
484 	{
485 		WMI_DATA_BK_SVC,
486 		PIPEDIR_IN,     /* in = DL = target -> host */
487 		2,
488 	},
489 	{
490 		WMI_DATA_BE_SVC,
491 		PIPEDIR_OUT,    /* out = UL = host -> target */
492 		3,
493 	},
494 	{
495 		WMI_DATA_BE_SVC,
496 		PIPEDIR_IN,     /* in = DL = target -> host */
497 		2,
498 	},
499 	{
500 		WMI_DATA_VI_SVC,
501 		PIPEDIR_OUT,    /* out = UL = host -> target */
502 		3,
503 	},
504 	{
505 		WMI_DATA_VI_SVC,
506 		PIPEDIR_IN,     /* in = DL = target -> host */
507 		2,
508 	},
509 	{
510 		WMI_CONTROL_SVC,
511 		PIPEDIR_OUT,    /* out = UL = host -> target */
512 		3,
513 	},
514 	{
515 		WMI_CONTROL_SVC,
516 		PIPEDIR_IN,     /* in = DL = target -> host */
517 		2,
518 	},
519 	{
520 		HTC_CTRL_RSVD_SVC,
521 		PIPEDIR_OUT,    /* out = UL = host -> target */
522 		0,              /* could be moved to 3 (share with WMI) */
523 	},
524 	{
525 		HTC_CTRL_RSVD_SVC,
526 		PIPEDIR_IN,     /* in = DL = target -> host */
527 		1,
528 	},
529 	{
530 		HTC_RAW_STREAMS_SVC, /* not currently used */
531 		PIPEDIR_OUT,    /* out = UL = host -> target */
532 		0,
533 	},
534 	{
535 		HTC_RAW_STREAMS_SVC, /* not currently used */
536 		PIPEDIR_IN,     /* in = DL = target -> host */
537 		1,
538 	},
539 	{
540 		HTT_DATA_MSG_SVC,
541 		PIPEDIR_OUT,    /* out = UL = host -> target */
542 		4,
543 	},
544 #if WLAN_FEATURE_FASTPATH
545 	{
546 		HTT_DATA_MSG_SVC,
547 		PIPEDIR_IN,     /* in = DL = target -> host */
548 		5,
549 	},
550 #else /* WLAN_FEATURE_FASTPATH */
551 	{
552 		HTT_DATA_MSG_SVC,
553 		PIPEDIR_IN,  /* in = DL = target -> host */
554 		1,
555 	},
556 #endif /* WLAN_FEATURE_FASTPATH */
557 
558 	/* (Additions here) */
559 
560 	{                       /* Must be last */
561 		0,
562 		0,
563 		0,
564 	},
565 };
566 
567 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
568 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
569 
570 #ifdef WLAN_FEATURE_EPPING
571 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
572 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
573 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
574 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
575 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
576 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
577 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
578 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
579 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
580 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
581 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
582 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
583 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
584 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
585 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
586 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
587 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
588 	{0, 0, 0,},             /* Must be last */
589 };
590 
591 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
592 					   **tgt_svc_map_to_use,
593 					   uint32_t *sz_tgt_svc_map_to_use)
594 {
595 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
596 	*sz_tgt_svc_map_to_use =
597 			sizeof(target_service_to_ce_map_wlan_epping);
598 }
599 #endif
600 
601 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
602 				    struct service_to_pipe **tgt_svc_map_to_use,
603 				    uint32_t *sz_tgt_svc_map_to_use)
604 {
605 	uint32_t mode = hif_get_conparam(scn);
606 	struct hif_target_info *tgt_info = &scn->target_info;
607 
608 	if (QDF_IS_EPPING_ENABLED(mode)) {
609 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
610 						      sz_tgt_svc_map_to_use);
611 	} else {
612 		switch (tgt_info->target_type) {
613 		default:
614 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
615 			*sz_tgt_svc_map_to_use =
616 				sizeof(target_service_to_ce_map_wlan);
617 			break;
618 		case TARGET_TYPE_AR900B:
619 		case TARGET_TYPE_QCA9984:
620 		case TARGET_TYPE_IPQ4019:
621 		case TARGET_TYPE_QCA9888:
622 		case TARGET_TYPE_AR9888:
623 		case TARGET_TYPE_AR9888V2:
624 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
625 			*sz_tgt_svc_map_to_use =
626 				sizeof(target_service_to_ce_map_ar900b);
627 			break;
628 		case TARGET_TYPE_QCA6290:
629 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
630 			*sz_tgt_svc_map_to_use =
631 				sizeof(target_service_to_ce_map_qca6290);
632 			break;
633 		case TARGET_TYPE_QCA8074:
634 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
635 			*sz_tgt_svc_map_to_use =
636 				sizeof(target_service_to_ce_map_qca8074);
637 			break;
638 		}
639 	}
640 }
641 
642 /**
643  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
644  * @ce_state : pointer to the state context of the CE
645  *
646  * Description:
647  *   Sets htt_rx_data attribute of the state structure if the
648  *   CE serves one of the HTT DATA services.
649  *
650  * Return:
651  *  false (attribute set to false)
652  *  true  (attribute set to true);
653  */
654 static bool ce_mark_datapath(struct CE_state *ce_state)
655 {
656 	struct service_to_pipe *svc_map;
657 	uint32_t map_sz, map_len;
658 	int    i;
659 	bool   rc = false;
660 
661 	if (ce_state != NULL) {
662 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
663 					       &map_sz);
664 
665 		map_len = map_sz / sizeof(struct service_to_pipe);
666 		for (i = 0; i < map_len; i++) {
667 			if ((svc_map[i].pipenum == ce_state->id) &&
668 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
669 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
670 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
671 				/* HTT CEs are unidirectional */
672 				if (svc_map[i].pipedir == PIPEDIR_IN)
673 					ce_state->htt_rx_data = true;
674 				else
675 					ce_state->htt_tx_data = true;
676 				rc = true;
677 			}
678 		}
679 	}
680 	return rc;
681 }
682 
683 /**
684  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
685  * @ce_id: ce in question
686  * @ring: ring state being examined
687  * @type: "src_ring" or "dest_ring" string for identifying the ring
688  *
689  * Warns on non-zero index values.
690  * Causes a kernel panic if the ring is not empty durring initialization.
691  */
692 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
693 					 char *type)
694 {
695 	if (ring->write_index != 0 || ring->sw_index != 0)
696 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
697 			  ce_id, type, ring->sw_index, ring->write_index);
698 	if (ring->write_index != ring->sw_index)
699 		QDF_BUG(0);
700 }
701 
702 #ifdef IPA_OFFLOAD
703 /**
704  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
705  * @scn: softc instance
706  * @ce_id: ce in question
707  * @base_addr: pointer to copyengine ring base address
708  * @ce_ring: copyengine instance
709  * @nentries: number of entries should be allocated
710  * @desc_size: ce desc size
711  *
712  * Return: QDF_STATUS_SUCCESS - for success
713  */
714 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
715 				     qdf_dma_addr_t *base_addr,
716 				     struct CE_ring_state *ce_ring,
717 				     unsigned int nentries, uint32_t desc_size)
718 {
719 	if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
720 		scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev,
721 			nentries * desc_size + CE_DESC_RING_ALIGN);
722 		if (!scn->ipa_ce_ring) {
723 			HIF_ERROR("%s: Failed to allocate memory for IPA ce ring",
724 				  __func__);
725 			return QDF_STATUS_E_NOMEM;
726 		}
727 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
728 						&scn->ipa_ce_ring->mem_info);
729 		ce_ring->base_addr_owner_space_unaligned =
730 						scn->ipa_ce_ring->vaddr;
731 	} else {
732 		ce_ring->base_addr_owner_space_unaligned =
733 			qdf_mem_alloc_consistent(scn->qdf_dev,
734 						 scn->qdf_dev->dev,
735 						 (nentries * desc_size +
736 						 CE_DESC_RING_ALIGN),
737 						 base_addr);
738 		if (!ce_ring->base_addr_owner_space_unaligned) {
739 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
740 				  __func__, CE_id);
741 			return QDF_STATUS_E_NOMEM;
742 		}
743 	}
744 	return QDF_STATUS_SUCCESS;
745 }
746 
747 /**
748  * ce_free_desc_ring() - Frees copyengine descriptor ring
749  * @scn: softc instance
750  * @ce_id: ce in question
751  * @ce_ring: copyengine instance
752  * @desc_size: ce desc size
753  *
754  * Return: None
755  */
756 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
757 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
758 {
759 	if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
760 		qdf_mem_shared_mem_free(scn->qdf_dev,
761 					scn->ipa_ce_ring);
762 		ce_ring->base_addr_owner_space_unaligned = NULL;
763 	} else {
764 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
765 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
766 			ce_ring->base_addr_owner_space_unaligned,
767 			ce_ring->base_addr_CE_space, 0);
768 		ce_ring->base_addr_owner_space_unaligned = NULL;
769 	}
770 }
771 #else
772 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
773 				     qdf_dma_addr_t *base_addr,
774 				     struct CE_ring_state *ce_ring,
775 				     unsigned int nentries, uint32_t desc_size)
776 {
777 	ce_ring->base_addr_owner_space_unaligned =
778 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
779 					 (nentries * desc_size +
780 					 CE_DESC_RING_ALIGN), base_addr);
781 	if (!ce_ring->base_addr_owner_space_unaligned) {
782 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
783 			  __func__, CE_id);
784 		return QDF_STATUS_E_NOMEM;
785 	}
786 	return QDF_STATUS_SUCCESS;
787 }
788 
789 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
790 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
791 {
792 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
793 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
794 		ce_ring->base_addr_owner_space_unaligned,
795 		ce_ring->base_addr_CE_space, 0);
796 	ce_ring->base_addr_owner_space_unaligned = NULL;
797 }
798 #endif /* IPA_OFFLOAD */
799 
800 /**
801  * ce_srng_based() - Does this target use srng
802  * @ce_state : pointer to the state context of the CE
803  *
804  * Description:
805  *   returns true if the target is SRNG based
806  *
807  * Return:
808  *  false (attribute set to false)
809  *  true  (attribute set to true);
810  */
811 bool ce_srng_based(struct hif_softc *scn)
812 {
813 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
814 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
815 
816 	switch (tgt_info->target_type) {
817 	case TARGET_TYPE_QCA8074:
818 	case TARGET_TYPE_QCA6290:
819 		return true;
820 	default:
821 		return false;
822 	}
823 	return false;
824 }
825 qdf_export_symbol(ce_srng_based);
826 
827 #ifdef QCA_WIFI_SUPPORT_SRNG
828 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
829 {
830 	if (ce_srng_based(scn))
831 		return ce_services_srng();
832 
833 	return ce_services_legacy();
834 }
835 
836 
837 #else	/* QCA_LITHIUM */
838 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
839 {
840 	return ce_services_legacy();
841 }
842 #endif /* QCA_LITHIUM */
843 
844 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
845 		struct pld_shadow_reg_v2_cfg **shadow_config,
846 		int *num_shadow_registers_configured) {
847 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
848 
849 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
850 			scn, shadow_config, num_shadow_registers_configured);
851 }
852 
853 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
854 						uint8_t ring_type)
855 {
856 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
857 
858 	return hif_state->ce_services->ce_get_desc_size(ring_type);
859 }
860 
861 
862 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
863 		uint8_t ring_type, uint32_t nentries)
864 {
865 	uint32_t ce_nbytes;
866 	char *ptr;
867 	qdf_dma_addr_t base_addr;
868 	struct CE_ring_state *ce_ring;
869 	uint32_t desc_size;
870 	struct hif_softc *scn = CE_state->scn;
871 
872 	ce_nbytes = sizeof(struct CE_ring_state)
873 		+ (nentries * sizeof(void *));
874 	ptr = qdf_mem_malloc(ce_nbytes);
875 	if (!ptr)
876 		return NULL;
877 
878 	ce_ring = (struct CE_ring_state *)ptr;
879 	ptr += sizeof(struct CE_ring_state);
880 	ce_ring->nentries = nentries;
881 	ce_ring->nentries_mask = nentries - 1;
882 
883 	ce_ring->low_water_mark_nentries = 0;
884 	ce_ring->high_water_mark_nentries = nentries;
885 	ce_ring->per_transfer_context = (void **)ptr;
886 
887 	desc_size = ce_get_desc_size(scn, ring_type);
888 
889 	/* Legacy platforms that do not support cache
890 	 * coherent DMA are unsupported
891 	 */
892 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
893 			       ce_ring, nentries,
894 			       desc_size) !=
895 	    QDF_STATUS_SUCCESS) {
896 		HIF_ERROR("%s: ring has no DMA mem",
897 				__func__);
898 		qdf_mem_free(ptr);
899 		return NULL;
900 	}
901 	ce_ring->base_addr_CE_space_unaligned = base_addr;
902 
903 	/* Correctly initialize memory to 0 to
904 	 * prevent garbage data crashing system
905 	 * when download firmware
906 	 */
907 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
908 			nentries * desc_size +
909 			CE_DESC_RING_ALIGN);
910 
911 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
912 
913 		ce_ring->base_addr_CE_space =
914 			(ce_ring->base_addr_CE_space_unaligned +
915 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
916 
917 		ce_ring->base_addr_owner_space = (void *)
918 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
919 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
920 	} else {
921 		ce_ring->base_addr_CE_space =
922 				ce_ring->base_addr_CE_space_unaligned;
923 		ce_ring->base_addr_owner_space =
924 				ce_ring->base_addr_owner_space_unaligned;
925 	}
926 
927 	return ce_ring;
928 }
929 
930 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
931 			uint32_t ce_id, struct CE_ring_state *ring,
932 			struct CE_attr *attr)
933 {
934 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
935 
936 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
937 					      ring, attr);
938 }
939 
940 int hif_ce_bus_early_suspend(struct hif_softc *scn)
941 {
942 	uint8_t ul_pipe, dl_pipe;
943 	int ce_id, status, ul_is_polled, dl_is_polled;
944 	struct CE_state *ce_state;
945 
946 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
947 					 &ul_pipe, &dl_pipe,
948 					 &ul_is_polled, &dl_is_polled);
949 	if (status) {
950 		HIF_ERROR("%s: pipe_mapping failure", __func__);
951 		return status;
952 	}
953 
954 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
955 		if (ce_id == ul_pipe)
956 			continue;
957 		if (ce_id == dl_pipe)
958 			continue;
959 
960 		ce_state = scn->ce_id_to_state[ce_id];
961 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
962 		if (ce_state->state == CE_RUNNING)
963 			ce_state->state = CE_PAUSED;
964 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
965 	}
966 
967 	return status;
968 }
969 
970 int hif_ce_bus_late_resume(struct hif_softc *scn)
971 {
972 	int ce_id;
973 	struct CE_state *ce_state;
974 	int write_index;
975 	bool index_updated;
976 
977 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
978 		ce_state = scn->ce_id_to_state[ce_id];
979 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
980 		if (ce_state->state == CE_PENDING) {
981 			write_index = ce_state->src_ring->write_index;
982 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
983 					write_index);
984 			ce_state->state = CE_RUNNING;
985 			index_updated = true;
986 		} else {
987 			index_updated = false;
988 		}
989 
990 		if (ce_state->state == CE_PAUSED)
991 			ce_state->state = CE_RUNNING;
992 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
993 
994 		if (index_updated)
995 			hif_record_ce_desc_event(scn, ce_id,
996 				RESUME_WRITE_INDEX_UPDATE,
997 				NULL, NULL, write_index, 0);
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 /**
1004  * ce_oom_recovery() - try to recover rx ce from oom condition
1005  * @context: CE_state of the CE with oom rx ring
1006  *
1007  * the executing work Will continue to be rescheduled untill
1008  * at least 1 descriptor is successfully posted to the rx ring.
1009  *
1010  * return: none
1011  */
1012 static void ce_oom_recovery(void *context)
1013 {
1014 	struct CE_state *ce_state = context;
1015 	struct hif_softc *scn = ce_state->scn;
1016 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1017 	struct HIF_CE_pipe_info *pipe_info =
1018 		&ce_softc->pipe_info[ce_state->id];
1019 
1020 	hif_post_recv_buffers_for_pipe(pipe_info);
1021 }
1022 
1023 #if HIF_CE_DEBUG_DATA_BUF
1024 /**
1025  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1026  * the CE descriptors.
1027  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1028  * @scn: hif scn handle
1029  * ce_id: Copy Engine Id
1030  *
1031  * Return: QDF_STATUS
1032  */
1033 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1034 {
1035 	struct hif_ce_desc_event *event = NULL;
1036 	struct hif_ce_desc_event *hist_ev = NULL;
1037 	uint32_t index = 0;
1038 
1039 	hist_ev =
1040 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1041 
1042 	if (!hist_ev)
1043 		return QDF_STATUS_E_NOMEM;
1044 
1045 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1046 		event = &hist_ev[index];
1047 		event->data =
1048 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1049 		if (event->data == NULL)
1050 			return QDF_STATUS_E_NOMEM;
1051 	}
1052 	return QDF_STATUS_SUCCESS;
1053 }
1054 
1055 /**
1056  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1057  * the CE descriptors.
1058  * @scn: hif scn handle
1059  * ce_id: Copy Engine Id
1060  *
1061  * Return:
1062  */
1063 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1064 {
1065 	struct hif_ce_desc_event *event = NULL;
1066 	struct hif_ce_desc_event *hist_ev = NULL;
1067 	uint32_t index = 0;
1068 
1069 	hist_ev =
1070 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1071 
1072 	if (!hist_ev)
1073 		return;
1074 
1075 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1076 		event = &hist_ev[index];
1077 		if (event->data != NULL)
1078 			qdf_mem_free(event->data);
1079 		event->data = NULL;
1080 		event = NULL;
1081 	}
1082 }
1083 #endif /* HIF_CE_DEBUG_DATA_BUF */
1084 
1085 /*
1086  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1087  * for defined here
1088  */
1089 #if HIF_CE_DEBUG_DATA_BUF
1090 /**
1091  * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
1092  * @scn: hif scn handle
1093  * ce_id: Copy Engine Id
1094  *
1095  * Return: QDF_STATUS
1096  */
1097 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1098 						unsigned int CE_id)
1099 {
1100 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1101 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1102 
1103 	if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1104 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1105 		return QDF_STATUS_E_NOMEM;
1106 	} else {
1107 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1108 		return QDF_STATUS_SUCCESS;
1109 	}
1110 }
1111 
1112 /**
1113  * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1114  * storing.
1115  * @scn: hif scn handle
1116  * ce_id: Copy Engine Id
1117  *
1118  * Return:
1119  */
1120 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1121 						unsigned int CE_id)
1122 {
1123 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1124 	struct hif_ce_desc_event *hist_ev =
1125 			(struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1126 
1127 	if (!hist_ev)
1128 		return;
1129 
1130 #if HIF_CE_DEBUG_DATA_BUF
1131 	if (ce_hist->data_enable[CE_id] == 1) {
1132 		ce_hist->data_enable[CE_id] = 0;
1133 		free_mem_ce_debug_hist_data(scn, CE_id);
1134 	}
1135 #endif
1136 	ce_hist->enable[CE_id] = 0;
1137 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1138 	ce_hist->hist_ev[CE_id] = NULL;
1139 }
1140 
1141 /**
1142  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1143  * CE records on the console using sysfs.
1144  * @scn: hif scn handle
1145  *
1146  * Return:
1147  */
1148 static inline void reset_ce_debug_history(struct hif_softc *scn)
1149 {
1150 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1151 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1152 	 * index. Disable data storing
1153 	 */
1154 	ce_hist->hist_index = 0;
1155 	ce_hist->hist_id = 0;
1156 }
1157 #else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1158 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1159 						unsigned int CE_id)
1160 {
1161 	return QDF_STATUS_SUCCESS;
1162 }
1163 
1164 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1165 						unsigned int CE_id)
1166 {
1167 }
1168 
1169 static inline void reset_ce_debug_history(struct hif_softc *scn)
1170 {
1171 }
1172 #endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1173 
1174 /*
1175  * Initialize a Copy Engine based on caller-supplied attributes.
1176  * This may be called once to initialize both source and destination
1177  * rings or it may be called twice for separate source and destination
1178  * initialization. It may be that only one side or the other is
1179  * initialized by software/firmware.
1180  *
1181  * This should be called durring the initialization sequence before
1182  * interupts are enabled, so we don't have to worry about thread safety.
1183  */
1184 struct CE_handle *ce_init(struct hif_softc *scn,
1185 			  unsigned int CE_id, struct CE_attr *attr)
1186 {
1187 	struct CE_state *CE_state;
1188 	uint32_t ctrl_addr;
1189 	unsigned int nentries;
1190 	bool malloc_CE_state = false;
1191 	bool malloc_src_ring = false;
1192 	int status;
1193 
1194 	QDF_ASSERT(CE_id < scn->ce_count);
1195 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1196 	CE_state = scn->ce_id_to_state[CE_id];
1197 
1198 	if (!CE_state) {
1199 		CE_state =
1200 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1201 		if (!CE_state) {
1202 			HIF_ERROR("%s: CE_state has no mem", __func__);
1203 			return NULL;
1204 		}
1205 		malloc_CE_state = true;
1206 		qdf_spinlock_create(&CE_state->ce_index_lock);
1207 
1208 		CE_state->id = CE_id;
1209 		CE_state->ctrl_addr = ctrl_addr;
1210 		CE_state->state = CE_RUNNING;
1211 		CE_state->attr_flags = attr->flags;
1212 	}
1213 	CE_state->scn = scn;
1214 
1215 	qdf_atomic_init(&CE_state->rx_pending);
1216 	if (attr == NULL) {
1217 		/* Already initialized; caller wants the handle */
1218 		return (struct CE_handle *)CE_state;
1219 	}
1220 
1221 	if (CE_state->src_sz_max)
1222 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1223 	else
1224 		CE_state->src_sz_max = attr->src_sz_max;
1225 
1226 	ce_init_ce_desc_event_log(scn, CE_id,
1227 				  attr->src_nentries + attr->dest_nentries);
1228 
1229 	/* source ring setup */
1230 	nentries = attr->src_nentries;
1231 	if (nentries) {
1232 		struct CE_ring_state *src_ring;
1233 
1234 		nentries = roundup_pwr2(nentries);
1235 		if (CE_state->src_ring) {
1236 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1237 		} else {
1238 			src_ring = CE_state->src_ring =
1239 				ce_alloc_ring_state(CE_state,
1240 						CE_RING_SRC,
1241 						nentries);
1242 			if (!src_ring) {
1243 				/* cannot allocate src ring. If the
1244 				 * CE_state is allocated locally free
1245 				 * CE_State and return error.
1246 				 */
1247 				HIF_ERROR("%s: src ring has no mem", __func__);
1248 				if (malloc_CE_state) {
1249 					/* allocated CE_state locally */
1250 					qdf_mem_free(CE_state);
1251 					malloc_CE_state = false;
1252 				}
1253 				return NULL;
1254 			}
1255 			/* we can allocate src ring. Mark that the src ring is
1256 			 * allocated locally
1257 			 */
1258 			malloc_src_ring = true;
1259 
1260 			/*
1261 			 * Also allocate a shadow src ring in
1262 			 * regular mem to use for faster access.
1263 			 */
1264 			src_ring->shadow_base_unaligned =
1265 				qdf_mem_malloc(nentries *
1266 					       sizeof(struct CE_src_desc) +
1267 					       CE_DESC_RING_ALIGN);
1268 			if (src_ring->shadow_base_unaligned == NULL) {
1269 				HIF_ERROR("%s: src ring no shadow_base mem",
1270 					  __func__);
1271 				goto error_no_dma_mem;
1272 			}
1273 			src_ring->shadow_base = (struct CE_src_desc *)
1274 				(((size_t) src_ring->shadow_base_unaligned +
1275 				CE_DESC_RING_ALIGN - 1) &
1276 				 ~(CE_DESC_RING_ALIGN - 1));
1277 
1278 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1279 					       src_ring, attr);
1280 			if (status < 0)
1281 				goto error_target_access;
1282 
1283 			ce_ring_test_initial_indexes(CE_id, src_ring,
1284 						     "src_ring");
1285 		}
1286 	}
1287 
1288 	/* destination ring setup */
1289 	nentries = attr->dest_nentries;
1290 	if (nentries) {
1291 		struct CE_ring_state *dest_ring;
1292 
1293 		nentries = roundup_pwr2(nentries);
1294 		if (CE_state->dest_ring) {
1295 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1296 		} else {
1297 			dest_ring = CE_state->dest_ring =
1298 				ce_alloc_ring_state(CE_state,
1299 						CE_RING_DEST,
1300 						nentries);
1301 			if (!dest_ring) {
1302 				/* cannot allocate dst ring. If the CE_state
1303 				 * or src ring is allocated locally free
1304 				 * CE_State and src ring and return error.
1305 				 */
1306 				HIF_ERROR("%s: dest ring has no mem",
1307 					  __func__);
1308 				goto error_no_dma_mem;
1309 			}
1310 
1311 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1312 				      dest_ring, attr);
1313 			if (status < 0)
1314 				goto error_target_access;
1315 
1316 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1317 						     "dest_ring");
1318 
1319 			/* For srng based target, init status ring here */
1320 			if (ce_srng_based(CE_state->scn)) {
1321 				CE_state->status_ring =
1322 					ce_alloc_ring_state(CE_state,
1323 							CE_RING_STATUS,
1324 							nentries);
1325 				if (CE_state->status_ring == NULL) {
1326 					/*Allocation failed. Cleanup*/
1327 					qdf_mem_free(CE_state->dest_ring);
1328 					if (malloc_src_ring) {
1329 						qdf_mem_free
1330 							(CE_state->src_ring);
1331 						CE_state->src_ring = NULL;
1332 						malloc_src_ring = false;
1333 					}
1334 					if (malloc_CE_state) {
1335 						/* allocated CE_state locally */
1336 						scn->ce_id_to_state[CE_id] =
1337 							NULL;
1338 						qdf_mem_free(CE_state);
1339 						malloc_CE_state = false;
1340 					}
1341 
1342 					return NULL;
1343 				}
1344 
1345 				status = ce_ring_setup(scn, CE_RING_STATUS,
1346 					       CE_id, CE_state->status_ring,
1347 					       attr);
1348 				if (status < 0)
1349 					goto error_target_access;
1350 
1351 			}
1352 
1353 			/* epping */
1354 			/* poll timer */
1355 			if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL) ||
1356 					scn->polled_mode_on) {
1357 				qdf_timer_init(scn->qdf_dev,
1358 						       &CE_state->poll_timer,
1359 						       ce_poll_timeout,
1360 						       CE_state,
1361 						       QDF_TIMER_TYPE_SW);
1362 				CE_state->timer_inited = true;
1363 				qdf_timer_mod(&CE_state->poll_timer,
1364 						      CE_POLL_TIMEOUT);
1365 			}
1366 		}
1367 	}
1368 
1369 	if (!ce_srng_based(scn)) {
1370 		/* Enable CE error interrupts */
1371 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1372 			goto error_target_access;
1373 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1374 		if (Q_TARGET_ACCESS_END(scn) < 0)
1375 			goto error_target_access;
1376 	}
1377 
1378 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1379 			ce_oom_recovery, CE_state);
1380 
1381 	/* update the htt_data attribute */
1382 	ce_mark_datapath(CE_state);
1383 	scn->ce_id_to_state[CE_id] = CE_state;
1384 
1385 	alloc_mem_ce_debug_history(scn, CE_id);
1386 
1387 	return (struct CE_handle *)CE_state;
1388 
1389 error_target_access:
1390 error_no_dma_mem:
1391 	ce_fini((struct CE_handle *)CE_state);
1392 	return NULL;
1393 }
1394 
1395 #ifdef WLAN_FEATURE_FASTPATH
1396 /**
1397  * hif_enable_fastpath() Update that we have enabled fastpath mode
1398  * @hif_ctx: HIF context
1399  *
1400  * For use in data path
1401  *
1402  * Retrun: void
1403  */
1404 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1405 {
1406 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1407 
1408 	if (ce_srng_based(scn)) {
1409 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1410 		return;
1411 	}
1412 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1413 	scn->fastpath_mode_on = true;
1414 }
1415 
1416 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx)
1417 {
1418 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1419 	HIF_DBG("%s, Enabling polled mode", __func__);
1420 
1421 	scn->polled_mode_on = true;
1422 }
1423 
1424 /**
1425  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1426  * @hif_ctx: HIF Context
1427  *
1428  * For use in data path to skip HTC
1429  *
1430  * Return: bool
1431  */
1432 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1433 {
1434 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1435 
1436 	return scn->fastpath_mode_on;
1437 }
1438 
1439 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1440 {
1441 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1442 
1443 	return scn->polled_mode_on;
1444 }
1445 
1446 /**
1447  * hif_get_ce_handle - API to get CE handle for FastPath mode
1448  * @hif_ctx: HIF Context
1449  * @id: CopyEngine Id
1450  *
1451  * API to return CE handle for fastpath mode
1452  *
1453  * Return: void
1454  */
1455 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1456 {
1457 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1458 
1459 	return scn->ce_id_to_state[id];
1460 }
1461 
1462 /**
1463  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1464  * No processing is required inside this function.
1465  * @ce_hdl: Cope engine handle
1466  * Using an assert, this function makes sure that,
1467  * the TX CE has been processed completely.
1468  *
1469  * This is called while dismantling CE structures. No other thread
1470  * should be using these structures while dismantling is occuring
1471  * therfore no locking is needed.
1472  *
1473  * Return: none
1474  */
1475 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1476 {
1477 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1478 	struct CE_ring_state *src_ring = ce_state->src_ring;
1479 	struct hif_softc *sc = ce_state->scn;
1480 	uint32_t sw_index, write_index;
1481 
1482 	if (hif_is_nss_wifi_enabled(sc))
1483 		return;
1484 
1485 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1486 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1487 			 __func__, __LINE__);
1488 		sw_index = src_ring->sw_index;
1489 		write_index = src_ring->sw_index;
1490 
1491 		/* At this point Tx CE should be clean */
1492 		qdf_assert_always(sw_index == write_index);
1493 	}
1494 }
1495 
1496 /**
1497  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1498  * @ce_hdl: Handle to CE
1499  *
1500  * These buffers are never allocated on the fly, but
1501  * are allocated only once during HIF start and freed
1502  * only once during HIF stop.
1503  * NOTE:
1504  * The assumption here is there is no in-flight DMA in progress
1505  * currently, so that buffers can be freed up safely.
1506  *
1507  * Return: NONE
1508  */
1509 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1510 {
1511 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1512 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1513 	qdf_nbuf_t nbuf;
1514 	int i;
1515 
1516 	if (ce_state->scn->fastpath_mode_on == false)
1517 		return;
1518 
1519 	if (!ce_state->htt_rx_data)
1520 		return;
1521 
1522 	/*
1523 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1524 	 * this CE is completely full: does not leave one blank space, to
1525 	 * distinguish between empty queue & full queue. So free all the
1526 	 * entries.
1527 	 */
1528 	for (i = 0; i < dst_ring->nentries; i++) {
1529 		nbuf = dst_ring->per_transfer_context[i];
1530 
1531 		/*
1532 		 * The reasons for doing this check are:
1533 		 * 1) Protect against calling cleanup before allocating buffers
1534 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1535 		 *    could have a partially filled ring, because of a memory
1536 		 *    allocation failure in the middle of allocating ring.
1537 		 *    This check accounts for that case, checking
1538 		 *    fastpath_mode_on flag or started flag would not have
1539 		 *    covered that case. This is not in performance path,
1540 		 *    so OK to do this.
1541 		 */
1542 		if (nbuf) {
1543 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1544 					      QDF_DMA_FROM_DEVICE);
1545 			qdf_nbuf_free(nbuf);
1546 		}
1547 	}
1548 }
1549 
1550 /**
1551  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1552  * @scn: HIF handle
1553  *
1554  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1555  * Hence we have to post all the entries in the pipe, even, in the beginning
1556  * unlike for other CE pipes where one less than dest_nentries are filled in
1557  * the beginning.
1558  *
1559  * Return: None
1560  */
1561 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1562 {
1563 	int pipe_num;
1564 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1565 
1566 	if (scn->fastpath_mode_on == false)
1567 		return;
1568 
1569 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1570 		struct HIF_CE_pipe_info *pipe_info =
1571 			&hif_state->pipe_info[pipe_num];
1572 		struct CE_state *ce_state =
1573 			scn->ce_id_to_state[pipe_info->pipe_num];
1574 
1575 		if (ce_state->htt_rx_data)
1576 			atomic_inc(&pipe_info->recv_bufs_needed);
1577 	}
1578 }
1579 #else
1580 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1581 {
1582 }
1583 
1584 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1585 {
1586 	return false;
1587 }
1588 
1589 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1590 {
1591 	return false;
1592 }
1593 #endif /* WLAN_FEATURE_FASTPATH */
1594 
1595 void ce_fini(struct CE_handle *copyeng)
1596 {
1597 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1598 	unsigned int CE_id = CE_state->id;
1599 	struct hif_softc *scn = CE_state->scn;
1600 	uint32_t desc_size;
1601 
1602 	bool inited = CE_state->timer_inited;
1603 	CE_state->state = CE_UNUSED;
1604 	scn->ce_id_to_state[CE_id] = NULL;
1605 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1606 	CE_state->timer_inited = false;
1607 	qdf_lro_deinit(CE_state->lro_data);
1608 
1609 	if (CE_state->src_ring) {
1610 		/* Cleanup the datapath Tx ring */
1611 		ce_h2t_tx_ce_cleanup(copyeng);
1612 
1613 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1614 		if (CE_state->src_ring->shadow_base_unaligned)
1615 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1616 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1617 			ce_free_desc_ring(scn, CE_state->id,
1618 					  CE_state->src_ring,
1619 					  desc_size);
1620 		qdf_mem_free(CE_state->src_ring);
1621 	}
1622 	if (CE_state->dest_ring) {
1623 		/* Cleanup the datapath Rx ring */
1624 		ce_t2h_msg_ce_cleanup(copyeng);
1625 
1626 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1627 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1628 			ce_free_desc_ring(scn, CE_state->id,
1629 					  CE_state->dest_ring,
1630 					  desc_size);
1631 		qdf_mem_free(CE_state->dest_ring);
1632 
1633 		/* epping */
1634 		if (inited) {
1635 			qdf_timer_free(&CE_state->poll_timer);
1636 		}
1637 	}
1638 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1639 		/* Cleanup the datapath Tx ring */
1640 		ce_h2t_tx_ce_cleanup(copyeng);
1641 
1642 		if (CE_state->status_ring->shadow_base_unaligned)
1643 			qdf_mem_free(
1644 				CE_state->status_ring->shadow_base_unaligned);
1645 
1646 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1647 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1648 			ce_free_desc_ring(scn, CE_state->id,
1649 					  CE_state->status_ring,
1650 					  desc_size);
1651 		qdf_mem_free(CE_state->status_ring);
1652 	}
1653 
1654 	free_mem_ce_debug_history(scn, CE_id);
1655 	reset_ce_debug_history(scn);
1656 	ce_deinit_ce_desc_event_log(scn, CE_id);
1657 
1658 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1659 	qdf_mem_free(CE_state);
1660 }
1661 
1662 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1663 {
1664 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1665 
1666 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1667 		  sizeof(hif_state->msg_callbacks_pending));
1668 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1669 		  sizeof(hif_state->msg_callbacks_current));
1670 }
1671 
1672 /* Send the first nbytes bytes of the buffer */
1673 QDF_STATUS
1674 hif_send_head(struct hif_opaque_softc *hif_ctx,
1675 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1676 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1677 {
1678 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1679 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1680 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1681 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1682 	int bytes = nbytes, nfrags = 0;
1683 	struct ce_sendlist sendlist;
1684 	int status, i = 0;
1685 	unsigned int mux_id = 0;
1686 
1687 	QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
1688 
1689 	transfer_id =
1690 		(mux_id & MUX_ID_MASK) |
1691 		(transfer_id & TRANSACTION_ID_MASK);
1692 	data_attr &= DESC_DATA_FLAG_MASK;
1693 	/*
1694 	 * The common case involves sending multiple fragments within a
1695 	 * single download (the tx descriptor and the tx frame header).
1696 	 * So, optimize for the case of multiple fragments by not even
1697 	 * checking whether it's necessary to use a sendlist.
1698 	 * The overhead of using a sendlist for a single buffer download
1699 	 * is not a big deal, since it happens rarely (for WMI messages).
1700 	 */
1701 	ce_sendlist_init(&sendlist);
1702 	do {
1703 		qdf_dma_addr_t frag_paddr;
1704 		int frag_bytes;
1705 
1706 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1707 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1708 		/*
1709 		 * Clear the packet offset for all but the first CE desc.
1710 		 */
1711 		if (i++ > 0)
1712 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1713 
1714 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1715 				    frag_bytes >
1716 				    bytes ? bytes : frag_bytes,
1717 				    qdf_nbuf_get_frag_is_wordstream
1718 				    (nbuf,
1719 				    nfrags) ? 0 :
1720 				    CE_SEND_FLAG_SWAP_DISABLE,
1721 				    data_attr);
1722 		if (status != QDF_STATUS_SUCCESS) {
1723 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1724 				__func__, nfrags);
1725 			return status;
1726 		}
1727 		bytes -= frag_bytes;
1728 		nfrags++;
1729 	} while (bytes > 0);
1730 
1731 	/* Make sure we have resources to handle this request */
1732 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1733 	if (pipe_info->num_sends_allowed < nfrags) {
1734 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1735 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1736 		return QDF_STATUS_E_RESOURCES;
1737 	}
1738 	pipe_info->num_sends_allowed -= nfrags;
1739 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1740 
1741 	if (qdf_unlikely(ce_hdl == NULL)) {
1742 		HIF_ERROR("%s: error CE handle is null", __func__);
1743 		return A_ERROR;
1744 	}
1745 
1746 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
1747 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
1748 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1749 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
1750 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
1751 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1752 
1753 	return status;
1754 }
1755 
1756 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1757 								int force)
1758 {
1759 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1760 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1761 
1762 	if (!force) {
1763 		int resources;
1764 		/*
1765 		 * Decide whether to actually poll for completions, or just
1766 		 * wait for a later chance. If there seem to be plenty of
1767 		 * resources left, then just wait, since checking involves
1768 		 * reading a CE register, which is a relatively expensive
1769 		 * operation.
1770 		 */
1771 		resources = hif_get_free_queue_number(hif_ctx, pipe);
1772 		/*
1773 		 * If at least 50% of the total resources are still available,
1774 		 * don't bother checking again yet.
1775 		 */
1776 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1777 									 1))
1778 			return;
1779 	}
1780 #if ATH_11AC_TXCOMPACT
1781 	ce_per_engine_servicereap(scn, pipe);
1782 #else
1783 	ce_per_engine_service(scn, pipe);
1784 #endif
1785 }
1786 
1787 uint16_t
1788 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
1789 {
1790 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1791 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1792 	uint16_t rv;
1793 
1794 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1795 	rv = pipe_info->num_sends_allowed;
1796 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1797 	return rv;
1798 }
1799 
1800 /* Called by lower (CE) layer when a send to Target completes. */
1801 static void
1802 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
1803 		     void *transfer_context, qdf_dma_addr_t CE_data,
1804 		     unsigned int nbytes, unsigned int transfer_id,
1805 		     unsigned int sw_index, unsigned int hw_index,
1806 		     unsigned int toeplitz_hash_result)
1807 {
1808 	struct HIF_CE_pipe_info *pipe_info =
1809 		(struct HIF_CE_pipe_info *)ce_context;
1810 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1811 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1812 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
1813 	struct hif_msg_callbacks *msg_callbacks =
1814 		&pipe_info->pipe_callbacks;
1815 
1816 	do {
1817 		/*
1818 		 * The upper layer callback will be triggered
1819 		 * when last fragment is complteted.
1820 		 */
1821 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
1822 			if (scn->target_status == TARGET_STATUS_RESET) {
1823 
1824 				qdf_nbuf_unmap_single(scn->qdf_dev,
1825 						      transfer_context,
1826 						      QDF_DMA_TO_DEVICE);
1827 				qdf_nbuf_free(transfer_context);
1828 			} else
1829 				msg_callbacks->txCompletionHandler(
1830 					msg_callbacks->Context,
1831 					transfer_context, transfer_id,
1832 					toeplitz_hash_result);
1833 		}
1834 
1835 		qdf_spin_lock(&pipe_info->completion_freeq_lock);
1836 		pipe_info->num_sends_allowed++;
1837 		qdf_spin_unlock(&pipe_info->completion_freeq_lock);
1838 	} while (ce_completed_send_next(copyeng,
1839 			&ce_context, &transfer_context,
1840 			&CE_data, &nbytes, &transfer_id,
1841 			&sw_idx, &hw_idx,
1842 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
1843 }
1844 
1845 /**
1846  * hif_ce_do_recv(): send message from copy engine to upper layers
1847  * @msg_callbacks: structure containing callback and callback context
1848  * @netbuff: skb containing message
1849  * @nbytes: number of bytes in the message
1850  * @pipe_info: used for the pipe_number info
1851  *
1852  * Checks the packet length, configures the lenght in the netbuff,
1853  * and calls the upper layer callback.
1854  *
1855  * return: None
1856  */
1857 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
1858 		qdf_nbuf_t netbuf, int nbytes,
1859 		struct HIF_CE_pipe_info *pipe_info) {
1860 	if (nbytes <= pipe_info->buf_sz) {
1861 		qdf_nbuf_set_pktlen(netbuf, nbytes);
1862 		msg_callbacks->
1863 			rxCompletionHandler(msg_callbacks->Context,
1864 					netbuf, pipe_info->pipe_num);
1865 	} else {
1866 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
1867 				__func__, netbuf, nbytes);
1868 
1869 		qdf_nbuf_free(netbuf);
1870 	}
1871 }
1872 
1873 /* Called by lower (CE) layer when data is received from the Target. */
1874 static void
1875 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
1876 		     void *transfer_context, qdf_dma_addr_t CE_data,
1877 		     unsigned int nbytes, unsigned int transfer_id,
1878 		     unsigned int flags)
1879 {
1880 	struct HIF_CE_pipe_info *pipe_info =
1881 		(struct HIF_CE_pipe_info *)ce_context;
1882 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1883 	struct CE_state *ce_state = (struct CE_state *) copyeng;
1884 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1885 #ifdef HIF_PCI
1886 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1887 #endif
1888 	struct hif_msg_callbacks *msg_callbacks =
1889 		 &pipe_info->pipe_callbacks;
1890 
1891 	do {
1892 #ifdef HIF_PCI
1893 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1894 #endif
1895 		qdf_nbuf_unmap_single(scn->qdf_dev,
1896 				      (qdf_nbuf_t) transfer_context,
1897 				      QDF_DMA_FROM_DEVICE);
1898 
1899 		atomic_inc(&pipe_info->recv_bufs_needed);
1900 		hif_post_recv_buffers_for_pipe(pipe_info);
1901 		if (scn->target_status == TARGET_STATUS_RESET)
1902 			qdf_nbuf_free(transfer_context);
1903 		else
1904 			hif_ce_do_recv(msg_callbacks, transfer_context,
1905 				nbytes, pipe_info);
1906 
1907 		/* Set up force_break flag if num of receices reaches
1908 		 * MAX_NUM_OF_RECEIVES
1909 		 */
1910 		ce_state->receive_count++;
1911 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1912 			ce_state->force_break = 1;
1913 			break;
1914 		}
1915 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1916 					&CE_data, &nbytes, &transfer_id,
1917 					&flags) == QDF_STATUS_SUCCESS);
1918 
1919 }
1920 
1921 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1922 
1923 void
1924 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
1925 	      struct hif_msg_callbacks *callbacks)
1926 {
1927 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1928 
1929 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1930 	spin_lock_init(&pcie_access_log_lock);
1931 #endif
1932 	/* Save callbacks for later installation */
1933 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
1934 		 sizeof(hif_state->msg_callbacks_pending));
1935 
1936 }
1937 
1938 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1939 {
1940 	struct CE_handle *ce_diag = hif_state->ce_diag;
1941 	int pipe_num;
1942 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1943 	struct hif_msg_callbacks *hif_msg_callbacks =
1944 		&hif_state->msg_callbacks_current;
1945 
1946 	/* daemonize("hif_compl_thread"); */
1947 
1948 	if (scn->ce_count == 0) {
1949 		HIF_ERROR("%s: Invalid ce_count", __func__);
1950 		return -EINVAL;
1951 	}
1952 
1953 	if (!hif_msg_callbacks ||
1954 			!hif_msg_callbacks->rxCompletionHandler ||
1955 			!hif_msg_callbacks->txCompletionHandler) {
1956 		HIF_ERROR("%s: no completion handler registered", __func__);
1957 		return -EFAULT;
1958 	}
1959 
1960 	A_TARGET_ACCESS_LIKELY(scn);
1961 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1962 		struct CE_attr attr;
1963 		struct HIF_CE_pipe_info *pipe_info;
1964 
1965 		pipe_info = &hif_state->pipe_info[pipe_num];
1966 		if (pipe_info->ce_hdl == ce_diag)
1967 			continue;       /* Handle Diagnostic CE specially */
1968 		attr = hif_state->host_ce_config[pipe_num];
1969 		if (attr.src_nentries) {
1970 			/* pipe used to send to target */
1971 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
1972 					 __func__, pipe_num, pipe_info);
1973 			ce_send_cb_register(pipe_info->ce_hdl,
1974 					    hif_pci_ce_send_done, pipe_info,
1975 					    attr.flags & CE_ATTR_DISABLE_INTR);
1976 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
1977 		}
1978 		if (attr.dest_nentries) {
1979 			/* pipe used to receive from target */
1980 			ce_recv_cb_register(pipe_info->ce_hdl,
1981 					    hif_pci_ce_recv_data, pipe_info,
1982 					    attr.flags & CE_ATTR_DISABLE_INTR);
1983 		}
1984 
1985 		if (attr.src_nentries)
1986 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
1987 
1988 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1989 					sizeof(pipe_info->pipe_callbacks));
1990 	}
1991 
1992 	A_TARGET_ACCESS_UNLIKELY(scn);
1993 	return 0;
1994 }
1995 
1996 /*
1997  * Install pending msg callbacks.
1998  *
1999  * TBDXXX: This hack is needed because upper layers install msg callbacks
2000  * for use with HTC before BMI is done; yet this HIF implementation
2001  * needs to continue to use BMI msg callbacks. Really, upper layers
2002  * should not register HTC callbacks until AFTER BMI phase.
2003  */
2004 static void hif_msg_callbacks_install(struct hif_softc *scn)
2005 {
2006 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2007 
2008 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2009 		 &hif_state->msg_callbacks_pending,
2010 		 sizeof(hif_state->msg_callbacks_pending));
2011 }
2012 
2013 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2014 							uint8_t *DLPipe)
2015 {
2016 	int ul_is_polled, dl_is_polled;
2017 
2018 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2019 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2020 }
2021 
2022 /**
2023  * hif_dump_pipe_debug_count() - Log error count
2024  * @scn: hif_softc pointer.
2025  *
2026  * Output the pipe error counts of each pipe to log file
2027  *
2028  * Return: N/A
2029  */
2030 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2031 {
2032 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2033 	int pipe_num;
2034 
2035 	if (hif_state == NULL) {
2036 		HIF_ERROR("%s hif_state is NULL", __func__);
2037 		return;
2038 	}
2039 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2040 		struct HIF_CE_pipe_info *pipe_info;
2041 
2042 	pipe_info = &hif_state->pipe_info[pipe_num];
2043 
2044 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2045 			pipe_info->nbuf_dma_err_count > 0 ||
2046 			pipe_info->nbuf_ce_enqueue_err_count)
2047 		HIF_ERROR(
2048 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2049 			__func__, pipe_info->pipe_num,
2050 			atomic_read(&pipe_info->recv_bufs_needed),
2051 			pipe_info->nbuf_alloc_err_count,
2052 			pipe_info->nbuf_dma_err_count,
2053 			pipe_info->nbuf_ce_enqueue_err_count);
2054 	}
2055 }
2056 
2057 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2058 					  void *nbuf, uint32_t *error_cnt,
2059 					  enum hif_ce_event_type failure_type,
2060 					  const char *failure_type_string)
2061 {
2062 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2063 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2064 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2065 	int ce_id = CE_state->id;
2066 	uint32_t error_cnt_tmp;
2067 
2068 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2069 	error_cnt_tmp = ++(*error_cnt);
2070 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2071 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2072 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2073 		  failure_type_string);
2074 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2075 				 NULL, nbuf, bufs_needed_tmp, 0);
2076 	/* if we fail to allocate the last buffer for an rx pipe,
2077 	 *	there is no trigger to refill the ce and we will
2078 	 *	eventually crash
2079 	 */
2080 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2081 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2082 
2083 }
2084 
2085 
2086 
2087 
2088 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2089 {
2090 	struct CE_handle *ce_hdl;
2091 	qdf_size_t buf_sz;
2092 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2093 	QDF_STATUS status;
2094 	uint32_t bufs_posted = 0;
2095 
2096 	buf_sz = pipe_info->buf_sz;
2097 	if (buf_sz == 0) {
2098 		/* Unused Copy Engine */
2099 		return QDF_STATUS_SUCCESS;
2100 	}
2101 
2102 	ce_hdl = pipe_info->ce_hdl;
2103 
2104 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2105 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2106 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2107 		qdf_nbuf_t nbuf;
2108 
2109 		atomic_dec(&pipe_info->recv_bufs_needed);
2110 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2111 
2112 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2113 		if (!nbuf) {
2114 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2115 					&pipe_info->nbuf_alloc_err_count,
2116 					 HIF_RX_NBUF_ALLOC_FAILURE,
2117 					"HIF_RX_NBUF_ALLOC_FAILURE");
2118 			return QDF_STATUS_E_NOMEM;
2119 		}
2120 
2121 		/*
2122 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2123 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2124 		 * DMA_FROM_DEVICE);
2125 		 */
2126 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2127 					    QDF_DMA_FROM_DEVICE);
2128 
2129 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2130 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2131 					&pipe_info->nbuf_dma_err_count,
2132 					 HIF_RX_NBUF_MAP_FAILURE,
2133 					"HIF_RX_NBUF_MAP_FAILURE");
2134 			qdf_nbuf_free(nbuf);
2135 			return status;
2136 		}
2137 
2138 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2139 
2140 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2141 					       buf_sz, DMA_FROM_DEVICE);
2142 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2143 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2144 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2145 					&pipe_info->nbuf_ce_enqueue_err_count,
2146 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2147 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2148 
2149 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2150 						QDF_DMA_FROM_DEVICE);
2151 			qdf_nbuf_free(nbuf);
2152 			return status;
2153 		}
2154 
2155 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2156 		bufs_posted++;
2157 	}
2158 	pipe_info->nbuf_alloc_err_count =
2159 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2160 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2161 	pipe_info->nbuf_dma_err_count =
2162 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2163 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2164 	pipe_info->nbuf_ce_enqueue_err_count =
2165 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2166 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2167 
2168 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2169 
2170 	return QDF_STATUS_SUCCESS;
2171 }
2172 
2173 /*
2174  * Try to post all desired receive buffers for all pipes.
2175  * Returns 0 for non fastpath rx copy engine as
2176  * oom_allocation_work will be scheduled to recover any
2177  * failures, non-zero if unable to completely replenish
2178  * receive buffers for fastpath rx Copy engine.
2179  */
2180 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2181 {
2182 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2183 	int pipe_num;
2184 	struct CE_state *ce_state = NULL;
2185 	QDF_STATUS qdf_status;
2186 
2187 	A_TARGET_ACCESS_LIKELY(scn);
2188 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2189 		struct HIF_CE_pipe_info *pipe_info;
2190 
2191 		ce_state = scn->ce_id_to_state[pipe_num];
2192 		pipe_info = &hif_state->pipe_info[pipe_num];
2193 
2194 		if (hif_is_nss_wifi_enabled(scn) &&
2195 		    ce_state && (ce_state->htt_rx_data))
2196 			continue;
2197 
2198 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2199 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2200 			ce_state->htt_rx_data &&
2201 			scn->fastpath_mode_on) {
2202 			A_TARGET_ACCESS_UNLIKELY(scn);
2203 			return qdf_status;
2204 		}
2205 	}
2206 
2207 	A_TARGET_ACCESS_UNLIKELY(scn);
2208 
2209 	return QDF_STATUS_SUCCESS;
2210 }
2211 
2212 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2213 {
2214 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2215 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2216 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2217 
2218 	hif_update_fastpath_recv_bufs_cnt(scn);
2219 
2220 	hif_msg_callbacks_install(scn);
2221 
2222 	if (hif_completion_thread_startup(hif_state))
2223 		return QDF_STATUS_E_FAILURE;
2224 
2225 	/* enable buffer cleanup */
2226 	hif_state->started = true;
2227 
2228 	/* Post buffers once to start things off. */
2229 	qdf_status = hif_post_recv_buffers(scn);
2230 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2231 		/* cleanup is done in hif_ce_disable */
2232 		HIF_ERROR("%s:failed to post buffers", __func__);
2233 		return qdf_status;
2234 	}
2235 
2236 	return qdf_status;
2237 }
2238 
2239 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2240 {
2241 	struct hif_softc *scn;
2242 	struct CE_handle *ce_hdl;
2243 	uint32_t buf_sz;
2244 	struct HIF_CE_state *hif_state;
2245 	qdf_nbuf_t netbuf;
2246 	qdf_dma_addr_t CE_data;
2247 	void *per_CE_context;
2248 
2249 	buf_sz = pipe_info->buf_sz;
2250 	/* Unused Copy Engine */
2251 	if (buf_sz == 0)
2252 		return;
2253 
2254 
2255 	hif_state = pipe_info->HIF_CE_state;
2256 	if (!hif_state->started)
2257 		return;
2258 
2259 	scn = HIF_GET_SOFTC(hif_state);
2260 	ce_hdl = pipe_info->ce_hdl;
2261 
2262 	if (scn->qdf_dev == NULL)
2263 		return;
2264 	while (ce_revoke_recv_next
2265 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2266 			&CE_data) == QDF_STATUS_SUCCESS) {
2267 		if (netbuf) {
2268 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2269 					      QDF_DMA_FROM_DEVICE);
2270 			qdf_nbuf_free(netbuf);
2271 		}
2272 	}
2273 }
2274 
2275 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2276 {
2277 	struct CE_handle *ce_hdl;
2278 	struct HIF_CE_state *hif_state;
2279 	struct hif_softc *scn;
2280 	qdf_nbuf_t netbuf;
2281 	void *per_CE_context;
2282 	qdf_dma_addr_t CE_data;
2283 	unsigned int nbytes;
2284 	unsigned int id;
2285 	uint32_t buf_sz;
2286 	uint32_t toeplitz_hash_result;
2287 
2288 	buf_sz = pipe_info->buf_sz;
2289 	if (buf_sz == 0) {
2290 		/* Unused Copy Engine */
2291 		return;
2292 	}
2293 
2294 	hif_state = pipe_info->HIF_CE_state;
2295 	if (!hif_state->started) {
2296 		return;
2297 	}
2298 
2299 	scn = HIF_GET_SOFTC(hif_state);
2300 
2301 	ce_hdl = pipe_info->ce_hdl;
2302 
2303 	while (ce_cancel_send_next
2304 		       (ce_hdl, &per_CE_context,
2305 		       (void **)&netbuf, &CE_data, &nbytes,
2306 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2307 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2308 			/*
2309 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2310 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2311 			 * freed in htt_htc_misc_pkt_pool_free() in
2312 			 * wlantl_close(), so do not free them here again
2313 			 * by checking whether it's the endpoint
2314 			 * which they are queued in.
2315 			 */
2316 			if (id == scn->htc_htt_tx_endpoint)
2317 				return;
2318 			/* Indicate the completion to higher
2319 			 * layer to free the buffer
2320 			 */
2321 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2322 				pipe_info->pipe_callbacks.
2323 				    txCompletionHandler(pipe_info->
2324 					    pipe_callbacks.Context,
2325 					    netbuf, id, toeplitz_hash_result);
2326 		}
2327 	}
2328 }
2329 
2330 /*
2331  * Cleanup residual buffers for device shutdown:
2332  *    buffers that were enqueued for receive
2333  *    buffers that were to be sent
2334  * Note: Buffers that had completed but which were
2335  * not yet processed are on a completion queue. They
2336  * are handled when the completion thread shuts down.
2337  */
2338 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2339 {
2340 	int pipe_num;
2341 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2342 	struct CE_state *ce_state;
2343 
2344 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2345 		struct HIF_CE_pipe_info *pipe_info;
2346 
2347 		ce_state = scn->ce_id_to_state[pipe_num];
2348 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2349 				((ce_state->htt_tx_data) ||
2350 				 (ce_state->htt_rx_data))) {
2351 			continue;
2352 		}
2353 
2354 		pipe_info = &hif_state->pipe_info[pipe_num];
2355 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2356 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2357 	}
2358 }
2359 
2360 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2361 {
2362 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2363 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2364 
2365 	hif_buffer_cleanup(hif_state);
2366 }
2367 
2368 static void hif_destroy_oom_work(struct hif_softc *scn)
2369 {
2370 	struct CE_state *ce_state;
2371 	int ce_id;
2372 
2373 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2374 		ce_state = scn->ce_id_to_state[ce_id];
2375 		if (ce_state)
2376 			qdf_destroy_work(scn->qdf_dev,
2377 					 &ce_state->oom_allocation_work);
2378 	}
2379 }
2380 
2381 void hif_ce_stop(struct hif_softc *scn)
2382 {
2383 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2384 	int pipe_num;
2385 
2386 	/*
2387 	 * before cleaning up any memory, ensure irq &
2388 	 * bottom half contexts will not be re-entered
2389 	 */
2390 	hif_disable_isr(&scn->osc);
2391 	hif_destroy_oom_work(scn);
2392 	scn->hif_init_done = false;
2393 
2394 	/*
2395 	 * At this point, asynchronous threads are stopped,
2396 	 * The Target should not DMA nor interrupt, Host code may
2397 	 * not initiate anything more.  So we just need to clean
2398 	 * up Host-side state.
2399 	 */
2400 
2401 	if (scn->athdiag_procfs_inited) {
2402 		athdiag_procfs_remove();
2403 		scn->athdiag_procfs_inited = false;
2404 	}
2405 
2406 	hif_buffer_cleanup(hif_state);
2407 
2408 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2409 		struct HIF_CE_pipe_info *pipe_info;
2410 		struct CE_attr attr;
2411 		struct CE_handle *ce_diag = hif_state->ce_diag;
2412 
2413 		pipe_info = &hif_state->pipe_info[pipe_num];
2414 		if (pipe_info->ce_hdl) {
2415 			if (pipe_info->ce_hdl != ce_diag) {
2416 				attr = hif_state->host_ce_config[pipe_num];
2417 				if (attr.src_nentries)
2418 					qdf_spinlock_destroy(&pipe_info->
2419 							completion_freeq_lock);
2420 			}
2421 			ce_fini(pipe_info->ce_hdl);
2422 			pipe_info->ce_hdl = NULL;
2423 			pipe_info->buf_sz = 0;
2424 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2425 		}
2426 	}
2427 
2428 	if (hif_state->sleep_timer_init) {
2429 		qdf_timer_stop(&hif_state->sleep_timer);
2430 		qdf_timer_free(&hif_state->sleep_timer);
2431 		hif_state->sleep_timer_init = false;
2432 	}
2433 
2434 	hif_state->started = false;
2435 }
2436 
2437 
2438 /**
2439  * hif_get_target_ce_config() - get copy engine configuration
2440  * @target_ce_config_ret: basic copy engine configuration
2441  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2442  * @target_service_to_ce_map_ret: service mapping for the copy engines
2443  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2444  * @target_shadow_reg_cfg_ret: shadow register configuration
2445  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2446  *
2447  * providing accessor to these values outside of this file.
2448  * currently these are stored in static pointers to const sections.
2449  * there are multiple configurations that are selected from at compile time.
2450  * Runtime selection would need to consider mode, target type and bus type.
2451  *
2452  * Return: return by parameter.
2453  */
2454 void hif_get_target_ce_config(struct hif_softc *scn,
2455 		struct CE_pipe_config **target_ce_config_ret,
2456 		uint32_t *target_ce_config_sz_ret,
2457 		struct service_to_pipe **target_service_to_ce_map_ret,
2458 		uint32_t *target_service_to_ce_map_sz_ret,
2459 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2460 		uint32_t *shadow_cfg_sz_ret)
2461 {
2462 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2463 
2464 	*target_ce_config_ret = hif_state->target_ce_config;
2465 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2466 
2467 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2468 				       target_service_to_ce_map_sz_ret);
2469 
2470 	if (target_shadow_reg_cfg_ret)
2471 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2472 
2473 	if (shadow_cfg_sz_ret)
2474 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2475 }
2476 
2477 #ifdef CONFIG_SHADOW_V2
2478 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2479 {
2480 	int i;
2481 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2482 		  "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2483 
2484 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2485 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2486 		     "%s: i %d, val %x\n", __func__, i,
2487 		     cfg->shadow_reg_v2_cfg[i].addr);
2488 	}
2489 }
2490 
2491 #else
2492 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2493 {
2494 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2495 		  "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2496 }
2497 #endif
2498 
2499 /**
2500  * hif_wlan_enable(): call the platform driver to enable wlan
2501  * @scn: HIF Context
2502  *
2503  * This function passes the con_mode and CE configuration to
2504  * platform driver to enable wlan.
2505  *
2506  * Return: linux error code
2507  */
2508 int hif_wlan_enable(struct hif_softc *scn)
2509 {
2510 	struct pld_wlan_enable_cfg cfg;
2511 	enum pld_driver_mode mode;
2512 	uint32_t con_mode = hif_get_conparam(scn);
2513 
2514 	hif_get_target_ce_config(scn,
2515 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2516 			&cfg.num_ce_tgt_cfg,
2517 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2518 			&cfg.num_ce_svc_pipe_cfg,
2519 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2520 			&cfg.num_shadow_reg_cfg);
2521 
2522 	/* translate from structure size to array size */
2523 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2524 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2525 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2526 
2527 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2528 			      &cfg.num_shadow_reg_v2_cfg);
2529 
2530 	hif_print_hal_shadow_register_cfg(&cfg);
2531 
2532 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2533 		mode = PLD_FTM;
2534 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2535 		mode = PLD_COLDBOOT_CALIBRATION;
2536 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2537 		mode = PLD_EPPING;
2538 	else
2539 		mode = PLD_MISSION;
2540 
2541 	if (BYPASS_QMI)
2542 		return 0;
2543 	else
2544 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2545 				       mode, QWLAN_VERSIONSTR);
2546 }
2547 
2548 #ifdef WLAN_FEATURE_EPPING
2549 
2550 #define CE_EPPING_USES_IRQ true
2551 
2552 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2553 {
2554 	if (CE_EPPING_USES_IRQ)
2555 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2556 	else
2557 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2558 	hif_state->target_ce_config = target_ce_config_wlan_epping;
2559 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2560 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2561 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2562 }
2563 #endif
2564 
2565 /**
2566  * hif_ce_prepare_config() - load the correct static tables.
2567  * @scn: hif context
2568  *
2569  * Epping uses different static attribute tables than mission mode.
2570  */
2571 void hif_ce_prepare_config(struct hif_softc *scn)
2572 {
2573 	uint32_t mode = hif_get_conparam(scn);
2574 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2575 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2576 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2577 
2578 	hif_state->ce_services = ce_services_attach(scn);
2579 
2580 	scn->ce_count = HOST_CE_COUNT;
2581 	/* if epping is enabled we need to use the epping configuration. */
2582 	if (QDF_IS_EPPING_ENABLED(mode)) {
2583 		hif_ce_prepare_epping_config(hif_state);
2584 	}
2585 
2586 	switch (tgt_info->target_type) {
2587 	default:
2588 		hif_state->host_ce_config = host_ce_config_wlan;
2589 		hif_state->target_ce_config = target_ce_config_wlan;
2590 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
2591 		break;
2592 	case TARGET_TYPE_AR900B:
2593 	case TARGET_TYPE_QCA9984:
2594 	case TARGET_TYPE_IPQ4019:
2595 	case TARGET_TYPE_QCA9888:
2596 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2597 			hif_state->host_ce_config =
2598 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2599 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2600 			hif_state->host_ce_config =
2601 				host_lowdesc_ce_cfg_wlan_ar900b;
2602 		} else {
2603 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2604 		}
2605 
2606 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2607 		hif_state->target_ce_config_sz =
2608 				sizeof(target_ce_config_wlan_ar900b);
2609 
2610 		break;
2611 
2612 	case TARGET_TYPE_AR9888:
2613 	case TARGET_TYPE_AR9888V2:
2614 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2615 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2616 		} else {
2617 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2618 		}
2619 
2620 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2621 		hif_state->target_ce_config_sz =
2622 					sizeof(target_ce_config_wlan_ar9888);
2623 
2624 		break;
2625 
2626 	case TARGET_TYPE_QCA8074:
2627 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2628 			hif_state->host_ce_config =
2629 					host_ce_config_wlan_qca8074_pci;
2630 			hif_state->target_ce_config =
2631 				target_ce_config_wlan_qca8074_pci;
2632 			hif_state->target_ce_config_sz =
2633 				sizeof(target_ce_config_wlan_qca8074_pci);
2634 		} else {
2635 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2636 			hif_state->target_ce_config =
2637 					target_ce_config_wlan_qca8074;
2638 			hif_state->target_ce_config_sz =
2639 				sizeof(target_ce_config_wlan_qca8074);
2640 		}
2641 		break;
2642 	case TARGET_TYPE_QCA6290:
2643 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2644 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2645 		hif_state->target_ce_config_sz =
2646 					sizeof(target_ce_config_wlan_qca6290);
2647 
2648 		scn->ce_count = QCA_6290_CE_COUNT;
2649 		break;
2650 	}
2651 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
2652 }
2653 
2654 /**
2655  * hif_ce_open() - do ce specific allocations
2656  * @hif_sc: pointer to hif context
2657  *
2658  * return: 0 for success or QDF_STATUS_E_NOMEM
2659  */
2660 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2661 {
2662 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2663 
2664 	qdf_spinlock_create(&hif_state->irq_reg_lock);
2665 	qdf_spinlock_create(&hif_state->keep_awake_lock);
2666 	return QDF_STATUS_SUCCESS;
2667 }
2668 
2669 /**
2670  * hif_ce_close() - do ce specific free
2671  * @hif_sc: pointer to hif context
2672  */
2673 void hif_ce_close(struct hif_softc *hif_sc)
2674 {
2675 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2676 
2677 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
2678 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
2679 }
2680 
2681 /**
2682  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2683  * @hif_sc: hif context
2684  *
2685  * uses state variables to support cleaning up when hif_config_ce fails.
2686  */
2687 void hif_unconfig_ce(struct hif_softc *hif_sc)
2688 {
2689 	int pipe_num;
2690 	struct HIF_CE_pipe_info *pipe_info;
2691 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2692 
2693 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2694 		pipe_info = &hif_state->pipe_info[pipe_num];
2695 		if (pipe_info->ce_hdl) {
2696 			ce_unregister_irq(hif_state, (1 << pipe_num));
2697 			ce_fini(pipe_info->ce_hdl);
2698 			pipe_info->ce_hdl = NULL;
2699 			pipe_info->buf_sz = 0;
2700 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2701 		}
2702 	}
2703 	if (hif_sc->athdiag_procfs_inited) {
2704 		athdiag_procfs_remove();
2705 		hif_sc->athdiag_procfs_inited = false;
2706 	}
2707 }
2708 
2709 #ifdef CONFIG_BYPASS_QMI
2710 #define FW_SHARED_MEM (2 * 1024 * 1024)
2711 
2712 /**
2713  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2714  * @scn: pointer to HIF structure
2715  *
2716  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2717  *
2718  * Return: void
2719  */
2720 static void hif_post_static_buf_to_target(struct hif_softc *scn)
2721 {
2722 	void *target_va;
2723 	phys_addr_t target_pa;
2724 
2725 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2726 				FW_SHARED_MEM, &target_pa);
2727 	if (NULL == target_va) {
2728 		HIF_TRACE("Memory allocation failed could not post target buf");
2729 		return;
2730 	}
2731 	hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2732 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
2733 }
2734 #else
2735 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2736 {
2737 }
2738 #endif
2739 
2740 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2741 				bool wait_for_it)
2742 {
2743 	/* todo */
2744 	return 0;
2745 }
2746 
2747 /**
2748  * hif_config_ce() - configure copy engines
2749  * @scn: hif context
2750  *
2751  * Prepares fw, copy engine hardware and host sw according
2752  * to the attributes selected by hif_ce_prepare_config.
2753  *
2754  * also calls athdiag_procfs_init
2755  *
2756  * return: 0 for success nonzero for failure.
2757  */
2758 int hif_config_ce(struct hif_softc *scn)
2759 {
2760 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2761 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2762 	struct HIF_CE_pipe_info *pipe_info;
2763 	int pipe_num;
2764 	struct CE_state *ce_state = NULL;
2765 
2766 #ifdef ADRASTEA_SHADOW_REGISTERS
2767 	int i;
2768 #endif
2769 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
2770 
2771 	scn->notice_send = true;
2772 
2773 	hif_post_static_buf_to_target(scn);
2774 
2775 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
2776 
2777 	hif_config_rri_on_ddr(scn);
2778 
2779 	if (ce_srng_based(scn))
2780 		scn->bus_ops.hif_target_sleep_state_adjust =
2781 			&hif_srng_sleep_state_adjust;
2782 
2783 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2784 	 * index. Disable data storing
2785 	 */
2786 	reset_ce_debug_history(scn);
2787 
2788 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2789 		struct CE_attr *attr;
2790 
2791 		pipe_info = &hif_state->pipe_info[pipe_num];
2792 		pipe_info->pipe_num = pipe_num;
2793 		pipe_info->HIF_CE_state = hif_state;
2794 		attr = &hif_state->host_ce_config[pipe_num];
2795 
2796 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
2797 		ce_state = scn->ce_id_to_state[pipe_num];
2798 		if (!ce_state) {
2799 			A_TARGET_ACCESS_UNLIKELY(scn);
2800 			goto err;
2801 		}
2802 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
2803 		QDF_ASSERT(pipe_info->ce_hdl != NULL);
2804 		if (pipe_info->ce_hdl == NULL) {
2805 			rv = QDF_STATUS_E_FAILURE;
2806 			A_TARGET_ACCESS_UNLIKELY(scn);
2807 			goto err;
2808 		}
2809 
2810 		ce_state->lro_data = qdf_lro_init();
2811 
2812 		if (attr->flags & CE_ATTR_DIAG) {
2813 			/* Reserve the ultimate CE for
2814 			 * Diagnostic Window support
2815 			 */
2816 			hif_state->ce_diag = pipe_info->ce_hdl;
2817 			continue;
2818 		}
2819 
2820 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2821 				(ce_state->htt_rx_data))
2822 			continue;
2823 
2824 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
2825 		if (attr->dest_nentries > 0) {
2826 			atomic_set(&pipe_info->recv_bufs_needed,
2827 				   init_buffer_count(attr->dest_nentries - 1));
2828 			/*SRNG based CE has one entry less */
2829 			if (ce_srng_based(scn))
2830 				atomic_dec(&pipe_info->recv_bufs_needed);
2831 		} else {
2832 			atomic_set(&pipe_info->recv_bufs_needed, 0);
2833 		}
2834 		ce_tasklet_init(hif_state, (1 << pipe_num));
2835 		ce_register_irq(hif_state, (1 << pipe_num));
2836 	}
2837 
2838 	if (athdiag_procfs_init(scn) != 0) {
2839 		A_TARGET_ACCESS_UNLIKELY(scn);
2840 		goto err;
2841 	}
2842 	scn->athdiag_procfs_inited = true;
2843 
2844 	HIF_DBG("%s: ce_init done", __func__);
2845 
2846 	init_tasklet_workers(hif_hdl);
2847 
2848 	HIF_DBG("%s: X, ret = %d", __func__, rv);
2849 
2850 #ifdef ADRASTEA_SHADOW_REGISTERS
2851 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
2852 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2853 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
2854 			  __func__, i,
2855 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2856 	}
2857 #endif
2858 
2859 	return rv != QDF_STATUS_SUCCESS;
2860 
2861 err:
2862 	/* Failure, so clean up */
2863 	hif_unconfig_ce(scn);
2864 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
2865 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
2866 }
2867 
2868 #ifdef WLAN_FEATURE_FASTPATH
2869 /**
2870  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2871  * @handler: Callback funtcion
2872  * @context: handle for callback function
2873  *
2874  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2875  */
2876 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2877 				fastpath_msg_handler handler,
2878 				void *context)
2879 {
2880 	struct CE_state *ce_state;
2881 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2882 	int i;
2883 
2884 	if (!scn) {
2885 		HIF_ERROR("%s: scn is NULL", __func__);
2886 		QDF_ASSERT(0);
2887 		return QDF_STATUS_E_FAILURE;
2888 	}
2889 
2890 	if (!scn->fastpath_mode_on) {
2891 		HIF_WARN("%s: Fastpath mode disabled", __func__);
2892 		return QDF_STATUS_E_FAILURE;
2893 	}
2894 
2895 	for (i = 0; i < scn->ce_count; i++) {
2896 		ce_state = scn->ce_id_to_state[i];
2897 		if (ce_state->htt_rx_data) {
2898 			ce_state->fastpath_handler = handler;
2899 			ce_state->context = context;
2900 		}
2901 	}
2902 
2903 	return QDF_STATUS_SUCCESS;
2904 }
2905 qdf_export_symbol(hif_ce_fastpath_cb_register);
2906 #endif
2907 
2908 #ifdef IPA_OFFLOAD
2909 /**
2910  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
2911  * @scn: bus context
2912  * @ce_sr_base_paddr: copyengine source ring base physical address
2913  * @ce_sr_ring_size: copyengine source ring size
2914  * @ce_reg_paddr: copyengine register physical address
2915  *
2916  * IPA micro controller data path offload feature enabled,
2917  * HIF should release copy engine related resource information to IPA UC
2918  * IPA UC will access hardware resource with released information
2919  *
2920  * Return: None
2921  */
2922 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
2923 			     qdf_shared_mem_t **ce_sr,
2924 			     uint32_t *ce_sr_ring_size,
2925 			     qdf_dma_addr_t *ce_reg_paddr)
2926 {
2927 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2928 	struct HIF_CE_pipe_info *pipe_info =
2929 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2930 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2931 
2932 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
2933 			    ce_reg_paddr);
2934 }
2935 #endif /* IPA_OFFLOAD */
2936 
2937 
2938 #ifdef ADRASTEA_SHADOW_REGISTERS
2939 
2940 /*
2941  * Current shadow register config
2942  *
2943  * -----------------------------------------------------------
2944  * Shadow Register      |     CE   |    src/dst write index
2945  * -----------------------------------------------------------
2946  *         0            |     0    |           src
2947  *         1     No Config - Doesn't point to anything
2948  *         2     No Config - Doesn't point to anything
2949  *         3            |     3    |           src
2950  *         4            |     4    |           src
2951  *         5            |     5    |           src
2952  *         6     No Config - Doesn't point to anything
2953  *         7            |     7    |           src
2954  *         8     No Config - Doesn't point to anything
2955  *         9     No Config - Doesn't point to anything
2956  *         10    No Config - Doesn't point to anything
2957  *         11    No Config - Doesn't point to anything
2958  * -----------------------------------------------------------
2959  *         12    No Config - Doesn't point to anything
2960  *         13           |     1    |           dst
2961  *         14           |     2    |           dst
2962  *         15    No Config - Doesn't point to anything
2963  *         16    No Config - Doesn't point to anything
2964  *         17    No Config - Doesn't point to anything
2965  *         18    No Config - Doesn't point to anything
2966  *         19           |     7    |           dst
2967  *         20           |     8    |           dst
2968  *         21    No Config - Doesn't point to anything
2969  *         22    No Config - Doesn't point to anything
2970  *         23    No Config - Doesn't point to anything
2971  * -----------------------------------------------------------
2972  *
2973  *
2974  * ToDo - Move shadow register config to following in the future
2975  * This helps free up a block of shadow registers towards the end.
2976  * Can be used for other purposes
2977  *
2978  * -----------------------------------------------------------
2979  * Shadow Register      |     CE   |    src/dst write index
2980  * -----------------------------------------------------------
2981  *      0            |     0    |           src
2982  *      1            |     3    |           src
2983  *      2            |     4    |           src
2984  *      3            |     5    |           src
2985  *      4            |     7    |           src
2986  * -----------------------------------------------------------
2987  *      5            |     1    |           dst
2988  *      6            |     2    |           dst
2989  *      7            |     7    |           dst
2990  *      8            |     8    |           dst
2991  * -----------------------------------------------------------
2992  *      9     No Config - Doesn't point to anything
2993  *      12    No Config - Doesn't point to anything
2994  *      13    No Config - Doesn't point to anything
2995  *      14    No Config - Doesn't point to anything
2996  *      15    No Config - Doesn't point to anything
2997  *      16    No Config - Doesn't point to anything
2998  *      17    No Config - Doesn't point to anything
2999  *      18    No Config - Doesn't point to anything
3000  *      19    No Config - Doesn't point to anything
3001  *      20    No Config - Doesn't point to anything
3002  *      21    No Config - Doesn't point to anything
3003  *      22    No Config - Doesn't point to anything
3004  *      23    No Config - Doesn't point to anything
3005  * -----------------------------------------------------------
3006 */
3007 
3008 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3009 {
3010 	u32 addr = 0;
3011 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3012 
3013 	switch (ce) {
3014 	case 0:
3015 		addr = SHADOW_VALUE0;
3016 		break;
3017 	case 3:
3018 		addr = SHADOW_VALUE3;
3019 		break;
3020 	case 4:
3021 		addr = SHADOW_VALUE4;
3022 		break;
3023 	case 5:
3024 		addr = SHADOW_VALUE5;
3025 		break;
3026 	case 7:
3027 		addr = SHADOW_VALUE7;
3028 		break;
3029 	default:
3030 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3031 		QDF_ASSERT(0);
3032 	}
3033 	return addr;
3034 
3035 }
3036 
3037 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3038 {
3039 	u32 addr = 0;
3040 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3041 
3042 	switch (ce) {
3043 	case 1:
3044 		addr = SHADOW_VALUE13;
3045 		break;
3046 	case 2:
3047 		addr = SHADOW_VALUE14;
3048 		break;
3049 	case 5:
3050 		addr = SHADOW_VALUE17;
3051 		break;
3052 	case 7:
3053 		addr = SHADOW_VALUE19;
3054 		break;
3055 	case 8:
3056 		addr = SHADOW_VALUE20;
3057 		break;
3058 	case 9:
3059 		addr = SHADOW_VALUE21;
3060 		break;
3061 	case 10:
3062 		addr = SHADOW_VALUE22;
3063 		break;
3064 	case 11:
3065 		addr = SHADOW_VALUE23;
3066 		break;
3067 	default:
3068 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3069 		QDF_ASSERT(0);
3070 	}
3071 
3072 	return addr;
3073 
3074 }
3075 #endif
3076 
3077 #if defined(FEATURE_LRO)
3078 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3079 {
3080 	struct CE_state *ce_state;
3081 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3082 
3083 	ce_state = scn->ce_id_to_state[ctx_id];
3084 
3085 	return ce_state->lro_data;
3086 }
3087 #endif
3088 
3089 /**
3090  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3091  * this service
3092  * @scn: hif_softc pointer.
3093  * @svc_id: Service ID for which the mapping is needed.
3094  * @ul_pipe: address of the container in which ul pipe is returned.
3095  * @dl_pipe: address of the container in which dl pipe is returned.
3096  * @ul_is_polled: address of the container in which a bool
3097  *			indicating if the UL CE for this service
3098  *			is polled is returned.
3099  * @dl_is_polled: address of the container in which a bool
3100  *			indicating if the DL CE for this service
3101  *			is polled is returned.
3102  *
3103  * Return: Indicates whether the service has been found in the table.
3104  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3105  *         There will be warning logs if either leg has not been updated
3106  *         because it missed the entry in the table (but this is not an err).
3107  */
3108 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3109 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3110 			int *dl_is_polled)
3111 {
3112 	int status = QDF_STATUS_E_INVAL;
3113 	unsigned int i;
3114 	struct service_to_pipe element;
3115 	struct service_to_pipe *tgt_svc_map_to_use;
3116 	uint32_t sz_tgt_svc_map_to_use;
3117 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3118 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3119 	bool dl_updated = false;
3120 	bool ul_updated = false;
3121 
3122 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3123 				       &sz_tgt_svc_map_to_use);
3124 
3125 	*dl_is_polled = 0;  /* polling for received messages not supported */
3126 
3127 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3128 
3129 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3130 		if (element.service_id == svc_id) {
3131 			if (element.pipedir == PIPEDIR_OUT) {
3132 				*ul_pipe = element.pipenum;
3133 				*ul_is_polled =
3134 					(hif_state->host_ce_config[*ul_pipe].flags &
3135 					 CE_ATTR_DISABLE_INTR) != 0;
3136 				ul_updated = true;
3137 			} else if (element.pipedir == PIPEDIR_IN) {
3138 				*dl_pipe = element.pipenum;
3139 				dl_updated = true;
3140 			}
3141 			status = QDF_STATUS_SUCCESS;
3142 		}
3143 	}
3144 	if (ul_updated == false)
3145 		HIF_INFO("%s: ul pipe is NOT updated for service %d",
3146 			 __func__, svc_id);
3147 	if (dl_updated == false)
3148 		HIF_INFO("%s: dl pipe is NOT updated for service %d",
3149 			 __func__, svc_id);
3150 
3151 	return status;
3152 }
3153 
3154 #ifdef SHADOW_REG_DEBUG
3155 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3156 		uint32_t CE_ctrl_addr)
3157 {
3158 	uint32_t read_from_hw, srri_from_ddr = 0;
3159 
3160 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3161 
3162 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3163 
3164 	if (read_from_hw != srri_from_ddr) {
3165 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3166 		       __func__, srri_from_ddr, read_from_hw,
3167 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3168 		QDF_ASSERT(0);
3169 	}
3170 	return srri_from_ddr;
3171 }
3172 
3173 
3174 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3175 		uint32_t CE_ctrl_addr)
3176 {
3177 	uint32_t read_from_hw, drri_from_ddr = 0;
3178 
3179 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3180 
3181 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3182 
3183 	if (read_from_hw != drri_from_ddr) {
3184 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3185 		       drri_from_ddr, read_from_hw,
3186 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3187 		QDF_ASSERT(0);
3188 	}
3189 	return drri_from_ddr;
3190 }
3191 
3192 #endif
3193 
3194 #ifdef ADRASTEA_RRI_ON_DDR
3195 /**
3196  * hif_get_src_ring_read_index(): Called to get the SRRI
3197  *
3198  * @scn: hif_softc pointer
3199  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3200  *
3201  * This function returns the SRRI to the caller. For CEs that
3202  * dont have interrupts enabled, we look at the DDR based SRRI
3203  *
3204  * Return: SRRI
3205  */
3206 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3207 		uint32_t CE_ctrl_addr)
3208 {
3209 	struct CE_attr attr;
3210 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3211 
3212 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3213 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3214 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3215 	} else {
3216 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3217 			return A_TARGET_READ(scn,
3218 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3219 		else
3220 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3221 					CE_ctrl_addr);
3222 	}
3223 }
3224 
3225 /**
3226  * hif_get_dst_ring_read_index(): Called to get the DRRI
3227  *
3228  * @scn: hif_softc pointer
3229  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3230  *
3231  * This function returns the DRRI to the caller. For CEs that
3232  * dont have interrupts enabled, we look at the DDR based DRRI
3233  *
3234  * Return: DRRI
3235  */
3236 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3237 		uint32_t CE_ctrl_addr)
3238 {
3239 	struct CE_attr attr;
3240 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3241 
3242 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3243 
3244 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3245 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3246 	} else {
3247 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3248 			return A_TARGET_READ(scn,
3249 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3250 		else
3251 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3252 					CE_ctrl_addr);
3253 	}
3254 }
3255 
3256 /**
3257  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3258  *
3259  * @scn: hif_softc pointer
3260  *
3261  * This function allocates non cached memory on ddr and sends
3262  * the physical address of this memory to the CE hardware. The
3263  * hardware updates the RRI on this particular location.
3264  *
3265  * Return: None
3266  */
3267 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3268 {
3269 	unsigned int i;
3270 	qdf_dma_addr_t paddr_rri_on_ddr;
3271 	uint32_t high_paddr, low_paddr;
3272 
3273 	scn->vaddr_rri_on_ddr =
3274 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3275 		scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3276 		&paddr_rri_on_ddr);
3277 
3278 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3279 	low_paddr  = BITS0_TO_31(paddr_rri_on_ddr);
3280 	high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3281 
3282 	HIF_DBG("%s using srri and drri from DDR", __func__);
3283 
3284 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3285 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3286 
3287 	for (i = 0; i < CE_COUNT; i++)
3288 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3289 
3290 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
3291 
3292 }
3293 #else
3294 
3295 /**
3296  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3297  *
3298  * @scn: hif_softc pointer
3299  *
3300  * This is a dummy implementation for platforms that don't
3301  * support this functionality.
3302  *
3303  * Return: None
3304  */
3305 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3306 {
3307 }
3308 #endif
3309 
3310 /**
3311  * hif_dump_ce_registers() - dump ce registers
3312  * @scn: hif_opaque_softc pointer.
3313  *
3314  * Output the copy engine registers
3315  *
3316  * Return: 0 for success or error code
3317  */
3318 int hif_dump_ce_registers(struct hif_softc *scn)
3319 {
3320 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3321 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3322 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3323 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3324 	uint16_t i;
3325 	QDF_STATUS status;
3326 
3327 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3328 		if (scn->ce_id_to_state[i] == NULL) {
3329 			HIF_DBG("CE%d not used.", i);
3330 			continue;
3331 		}
3332 
3333 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3334 					   (uint8_t *) &ce_reg_values[0],
3335 					   ce_reg_word_size * sizeof(uint32_t));
3336 
3337 		if (status != QDF_STATUS_SUCCESS) {
3338 			HIF_ERROR("Dumping CE register failed!");
3339 			return -EACCES;
3340 		}
3341 		HIF_ERROR("CE%d=>\n", i);
3342 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3343 				   (uint8_t *) &ce_reg_values[0],
3344 				   ce_reg_word_size * sizeof(uint32_t));
3345 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3346 				+ SR_WR_INDEX_ADDRESS),
3347 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3348 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3349 				+ CURRENT_SRRI_ADDRESS),
3350 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3351 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3352 				+ DST_WR_INDEX_ADDRESS),
3353 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3354 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3355 				+ CURRENT_DRRI_ADDRESS),
3356 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3357 		qdf_print("---\n");
3358 	}
3359 	return 0;
3360 }
3361 qdf_export_symbol(hif_dump_ce_registers);
3362 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3363 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3364 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3365 {
3366 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3367 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3368 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3369 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3370 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3371 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3372 	struct CE_ring_state *src_ring = ce_state->src_ring;
3373 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3374 
3375 	if (src_ring) {
3376 		hif_info->ul_pipe.nentries = src_ring->nentries;
3377 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3378 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3379 		hif_info->ul_pipe.write_index = src_ring->write_index;
3380 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3381 		hif_info->ul_pipe.base_addr_CE_space =
3382 			src_ring->base_addr_CE_space;
3383 		hif_info->ul_pipe.base_addr_owner_space =
3384 			src_ring->base_addr_owner_space;
3385 	}
3386 
3387 
3388 	if (dest_ring) {
3389 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3390 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3391 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3392 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3393 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3394 		hif_info->dl_pipe.base_addr_CE_space =
3395 			dest_ring->base_addr_CE_space;
3396 		hif_info->dl_pipe.base_addr_owner_space =
3397 			dest_ring->base_addr_owner_space;
3398 	}
3399 
3400 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3401 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3402 
3403 	return hif_info;
3404 }
3405 qdf_export_symbol(hif_get_addl_pipe_info);
3406 
3407 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3408 {
3409 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3410 
3411 	scn->nss_wifi_ol_mode = mode;
3412 	return 0;
3413 }
3414 qdf_export_symbol(hif_set_nss_wifiol_mode);
3415 #endif
3416 
3417 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3418 {
3419 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3420 	scn->hif_attribute = hif_attrib;
3421 }
3422 
3423 
3424 /* disable interrupts (only applicable for legacy copy engine currently */
3425 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3426 {
3427 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3428 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3429 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3430 
3431 	Q_TARGET_ACCESS_BEGIN(scn);
3432 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3433 	Q_TARGET_ACCESS_END(scn);
3434 }
3435 qdf_export_symbol(hif_disable_interrupt);
3436 
3437 /**
3438  * hif_fw_event_handler() - hif fw event handler
3439  * @hif_state: pointer to hif ce state structure
3440  *
3441  * Process fw events and raise HTC callback to process fw events.
3442  *
3443  * Return: none
3444  */
3445 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3446 {
3447 	struct hif_msg_callbacks *msg_callbacks =
3448 		&hif_state->msg_callbacks_current;
3449 
3450 	if (!msg_callbacks->fwEventHandler)
3451 		return;
3452 
3453 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3454 			QDF_STATUS_E_FAILURE);
3455 }
3456 
3457 #ifndef QCA_WIFI_3_0
3458 /**
3459  * hif_fw_interrupt_handler() - FW interrupt handler
3460  * @irq: irq number
3461  * @arg: the user pointer
3462  *
3463  * Called from the PCI interrupt handler when a
3464  * firmware-generated interrupt to the Host.
3465  *
3466  * only registered for legacy ce devices
3467  *
3468  * Return: status of handled irq
3469  */
3470 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3471 {
3472 	struct hif_softc *scn = arg;
3473 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3474 	uint32_t fw_indicator_address, fw_indicator;
3475 
3476 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3477 		return ATH_ISR_NOSCHED;
3478 
3479 	fw_indicator_address = hif_state->fw_indicator_address;
3480 	/* For sudden unplug this will return ~0 */
3481 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3482 
3483 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3484 		/* ACK: clear Target-side pending event */
3485 		A_TARGET_WRITE(scn, fw_indicator_address,
3486 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3487 		if (Q_TARGET_ACCESS_END(scn) < 0)
3488 			return ATH_ISR_SCHED;
3489 
3490 		if (hif_state->started) {
3491 			hif_fw_event_handler(hif_state);
3492 		} else {
3493 			/*
3494 			 * Probable Target failure before we're prepared
3495 			 * to handle it.  Generally unexpected.
3496 			 */
3497 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3498 				("%s: Early firmware event indicated\n",
3499 				 __func__));
3500 		}
3501 	} else {
3502 		if (Q_TARGET_ACCESS_END(scn) < 0)
3503 			return ATH_ISR_SCHED;
3504 	}
3505 
3506 	return ATH_ISR_SCHED;
3507 }
3508 #else
3509 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3510 {
3511 	return ATH_ISR_SCHED;
3512 }
3513 #endif /* #ifdef QCA_WIFI_3_0 */
3514 
3515 
3516 /**
3517  * hif_wlan_disable(): call the platform driver to disable wlan
3518  * @scn: HIF Context
3519  *
3520  * This function passes the con_mode to platform driver to disable
3521  * wlan.
3522  *
3523  * Return: void
3524  */
3525 void hif_wlan_disable(struct hif_softc *scn)
3526 {
3527 	enum pld_driver_mode mode;
3528 	uint32_t con_mode = hif_get_conparam(scn);
3529 
3530 	if (scn->target_status == TARGET_STATUS_RESET)
3531 		return;
3532 
3533 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3534 		mode = PLD_FTM;
3535 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3536 		mode = PLD_EPPING;
3537 	else
3538 		mode = PLD_MISSION;
3539 
3540 	pld_wlan_disable(scn->qdf_dev->dev, mode);
3541 }
3542 
3543 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3544 {
3545 	QDF_STATUS status;
3546 	uint8_t ul_pipe, dl_pipe;
3547 	int ul_is_polled, dl_is_polled;
3548 
3549 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3550 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3551 					 HTC_CTRL_RSVD_SVC,
3552 					 &ul_pipe, &dl_pipe,
3553 					 &ul_is_polled, &dl_is_polled);
3554 	if (status) {
3555 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3556 		return qdf_status_to_os_return(status);
3557 	}
3558 
3559 	*ce_id = dl_pipe;
3560 
3561 	return 0;
3562 }
3563