xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #ifndef CONFIG_WIN
41 #include "qwlan_version.h"
42 #endif
43 #include "qdf_module.h"
44 
45 #define CE_POLL_TIMEOUT 10      /* ms */
46 
47 #define AGC_DUMP         1
48 #define CHANINFO_DUMP    2
49 #define BB_WATCHDOG_DUMP 3
50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51 #define PCIE_ACCESS_DUMP 4
52 #endif
53 #include "mp_dev.h"
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
56 	!defined(QCA_WIFI_SUPPORT_SRNG)
57 #define QCA_WIFI_SUPPORT_SRNG
58 #endif
59 
60 /* Forward references */
61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
62 
63 /*
64  * Fix EV118783, poll to check whether a BMI response comes
65  * other than waiting for the interruption which may be lost.
66  */
67 /* #define BMI_RSP_POLLING */
68 #define BMI_RSP_TO_MILLISEC  1000
69 
70 #ifdef CONFIG_BYPASS_QMI
71 #define BYPASS_QMI 1
72 #else
73 #define BYPASS_QMI 0
74 #endif
75 
76 #ifdef CONFIG_WIN
77 #if ENABLE_10_4_FW_HDR
78 #define WDI_IPA_SERVICE_GROUP 5
79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
82 #endif /* ENABLE_10_4_FW_HDR */
83 #endif
84 
85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
86 static void hif_config_rri_on_ddr(struct hif_softc *scn);
87 
88 /**
89  * hif_target_access_log_dump() - dump access log
90  *
91  * dump access log
92  *
93  * Return: n/a
94  */
95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96 static void hif_target_access_log_dump(void)
97 {
98 	hif_target_dump_access_log();
99 }
100 #endif
101 
102 
103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 		      uint8_t cmd_id, bool start)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	switch (cmd_id) {
109 	case AGC_DUMP:
110 		if (start)
111 			priv_start_agc(scn);
112 		else
113 			priv_dump_agc(scn);
114 		break;
115 	case CHANINFO_DUMP:
116 		if (start)
117 			priv_start_cap_chaninfo(scn);
118 		else
119 			priv_dump_chaninfo(scn);
120 		break;
121 	case BB_WATCHDOG_DUMP:
122 		priv_dump_bbwatchdog(scn);
123 		break;
124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 	case PCIE_ACCESS_DUMP:
126 		hif_target_access_log_dump();
127 		break;
128 #endif
129 	default:
130 		HIF_ERROR("%s: Invalid htc dump command", __func__);
131 		break;
132 	}
133 }
134 
135 static void ce_poll_timeout(void *arg)
136 {
137 	struct CE_state *CE_state = (struct CE_state *)arg;
138 
139 	if (CE_state->timer_inited) {
140 		ce_per_engine_service(CE_state->scn, CE_state->id);
141 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
142 	}
143 }
144 
145 static unsigned int roundup_pwr2(unsigned int n)
146 {
147 	int i;
148 	unsigned int test_pwr2;
149 
150 	if (!(n & (n - 1)))
151 		return n; /* already a power of 2 */
152 
153 	test_pwr2 = 4;
154 	for (i = 0; i < 29; i++) {
155 		if (test_pwr2 > n)
156 			return test_pwr2;
157 		test_pwr2 = test_pwr2 << 1;
158 	}
159 
160 	QDF_ASSERT(0); /* n too large */
161 	return 0;
162 }
163 
164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166 
167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
177 #ifdef QCA_WIFI_3_0_ADRASTEA
178 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
181 #endif
182 };
183 
184 #ifdef QCN7605_SUPPORT
185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194 };
195 #endif
196 
197 #ifdef WLAN_FEATURE_EPPING
198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208 };
209 #endif
210 
211 /* CE_PCI TABLE */
212 /*
213  * NOTE: the table below is out of date, though still a useful reference.
214  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215  * mapping of HTC services to HIF pipes.
216  */
217 /*
218  * This authoritative table defines Copy Engine configuration and the mapping
219  * of services/endpoints to CEs.  A subset of this information is passed to
220  * the Target during startup as a prerequisite to entering BMI phase.
221  * See:
222  *    target_service_to_ce_map - Target-side mapping
223  *    hif_map_service_to_pipe      - Host-side mapping
224  *    target_ce_config         - Target-side configuration
225  *    host_ce_config           - Host-side configuration
226    ============================================================================
227    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
228  |                      |      | ctio | Size     | Frequency
229  |                      |      | n    |          |
230    ============================================================================
231    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
232    descriptor |                      |      |      | O(100B)  | and regular
233    download   |                      |      |      |          |
234    ----------------------------------------------------------------------------
235    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
236    indication |                      |      |      | O(10B)   | regular
237    upload     |                      |      |      |          |
238    ----------------------------------------------------------------------------
239    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
240    upload     |                      |      |      | O(1000B) | (frequent
241    e.g. noise |                      |      |      |          | during IP1.0
242    packets    |                      |      |      |          | testing)
243    ----------------------------------------------------------------------------
244    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
245    download   |                      |      |      | O(1000B) | (frequent
246    e.g.       |                      |      |      |          | during IP1.0
247    misdirecte |                      |      |      |          | testing)
248    d EAPOL    |                      |      |      |          |
249    packets    |                      |      |      |          |
250    ----------------------------------------------------------------------------
251    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
252  | DATA_VO (uplink)     |      |      |          |
253    ----------------------------------------------------------------------------
254    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
255  | DATA_VO (downlink)   |      |      |          |
256    ----------------------------------------------------------------------------
257    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258  |                      |      |      | O(100B)  |
259    ----------------------------------------------------------------------------
260    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
261    messages   | (downlink)           |      |      | O(100B)  |
262  |                      |      |      |          |
263    ----------------------------------------------------------------------------
264    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
265  | HTC_RAW_STREAMS      |      |      |          |
266  | (uplink)             |      |      |          |
267    ----------------------------------------------------------------------------
268    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
269  | HTC_RAW_STREAMS      |      |      |          |
270  | (downlink)           |      |      |          |
271    ----------------------------------------------------------------------------
272    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
273  |                      |      |      |          | infrequent
274    ============================================================================
275  */
276 
277 /*
278  * Map from service/endpoint to Copy Engine.
279  * This table is derived from the CE_PCI TABLE, above.
280  * It is passed to the Target at startup for use by firmware.
281  */
282 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 	{
284 		WMI_DATA_VO_SVC,
285 		PIPEDIR_OUT,    /* out = UL = host -> target */
286 		3,
287 	},
288 	{
289 		WMI_DATA_VO_SVC,
290 		PIPEDIR_IN,     /* in = DL = target -> host */
291 		2,
292 	},
293 	{
294 		WMI_DATA_BK_SVC,
295 		PIPEDIR_OUT,    /* out = UL = host -> target */
296 		3,
297 	},
298 	{
299 		WMI_DATA_BK_SVC,
300 		PIPEDIR_IN,     /* in = DL = target -> host */
301 		2,
302 	},
303 	{
304 		WMI_DATA_BE_SVC,
305 		PIPEDIR_OUT,    /* out = UL = host -> target */
306 		3,
307 	},
308 	{
309 		WMI_DATA_BE_SVC,
310 		PIPEDIR_IN,     /* in = DL = target -> host */
311 		2,
312 	},
313 	{
314 		WMI_DATA_VI_SVC,
315 		PIPEDIR_OUT,    /* out = UL = host -> target */
316 		3,
317 	},
318 	{
319 		WMI_DATA_VI_SVC,
320 		PIPEDIR_IN,     /* in = DL = target -> host */
321 		2,
322 	},
323 	{
324 		WMI_CONTROL_SVC,
325 		PIPEDIR_OUT,    /* out = UL = host -> target */
326 		3,
327 	},
328 	{
329 		WMI_CONTROL_SVC,
330 		PIPEDIR_IN,     /* in = DL = target -> host */
331 		2,
332 	},
333 	{
334 		HTC_CTRL_RSVD_SVC,
335 		PIPEDIR_OUT,    /* out = UL = host -> target */
336 		0,              /* could be moved to 3 (share with WMI) */
337 	},
338 	{
339 		HTC_CTRL_RSVD_SVC,
340 		PIPEDIR_IN,     /* in = DL = target -> host */
341 		2,
342 	},
343 	{
344 		HTC_RAW_STREAMS_SVC, /* not currently used */
345 		PIPEDIR_OUT,    /* out = UL = host -> target */
346 		0,
347 	},
348 	{
349 		HTC_RAW_STREAMS_SVC, /* not currently used */
350 		PIPEDIR_IN,     /* in = DL = target -> host */
351 		2,
352 	},
353 	{
354 		HTT_DATA_MSG_SVC,
355 		PIPEDIR_OUT,    /* out = UL = host -> target */
356 		4,
357 	},
358 	{
359 		HTT_DATA_MSG_SVC,
360 		PIPEDIR_IN,     /* in = DL = target -> host */
361 		1,
362 	},
363 	{
364 		WDI_IPA_TX_SVC,
365 		PIPEDIR_OUT,    /* in = DL = target -> host */
366 		5,
367 	},
368 #if defined(QCA_WIFI_3_0_ADRASTEA)
369 	{
370 		HTT_DATA2_MSG_SVC,
371 		PIPEDIR_IN,    /* in = DL = target -> host */
372 		9,
373 	},
374 	{
375 		HTT_DATA3_MSG_SVC,
376 		PIPEDIR_IN,    /* in = DL = target -> host */
377 		10,
378 	},
379 	{
380 		PACKET_LOG_SVC,
381 		PIPEDIR_IN,    /* in = DL = target -> host */
382 		11,
383 	},
384 #endif
385 	/* (Additions here) */
386 
387 	{                       /* Must be last */
388 		0,
389 		0,
390 		0,
391 	},
392 };
393 
394 /* PIPEDIR_OUT = HOST to Target */
395 /* PIPEDIR_IN  = TARGET to HOST */
396 #if (defined(QCA_WIFI_QCA8074))
397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
416 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
417 	/* (Additions here) */
418 	{ 0, 0, 0, },
419 };
420 #else
421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422 };
423 #endif
424 
425 /* PIPEDIR_OUT = HOST to Target */
426 /* PIPEDIR_IN  = TARGET to HOST */
427 #ifdef QCN7605_SUPPORT
428 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
429 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
430 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
431 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
432 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
433 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
434 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
435 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
436 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
437 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
438 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
439 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
440 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
441 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
442 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
443 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
444 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
445 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
446 #ifdef IPA_OFFLOAD
447 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
448 #else
449 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
450 #endif
451 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
452 	/* (Additions here) */
453 	{ 0, 0, 0, },
454 };
455 #endif
456 
457 #if (defined(QCA_WIFI_QCA6290))
458 #ifdef CONFIG_WIN
459 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
460 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
461 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
462 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
463 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
464 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
465 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
466 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
467 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
468 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
469 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
470 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
472 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
473 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
474 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
475 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
476 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
477 	/* (Additions here) */
478 	{ 0, 0, 0, },
479 };
480 #else
481 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
482 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
483 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
484 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
485 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
486 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
487 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
488 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
489 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
490 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
491 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
492 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
493 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
494 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
495 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
496 	/* (Additions here) */
497 	{ 0, 0, 0, },
498 };
499 #endif
500 #else
501 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
502 };
503 #endif
504 
505 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
506 	{
507 		WMI_DATA_VO_SVC,
508 		PIPEDIR_OUT,    /* out = UL = host -> target */
509 		3,
510 	},
511 	{
512 		WMI_DATA_VO_SVC,
513 		PIPEDIR_IN,     /* in = DL = target -> host */
514 		2,
515 	},
516 	{
517 		WMI_DATA_BK_SVC,
518 		PIPEDIR_OUT,    /* out = UL = host -> target */
519 		3,
520 	},
521 	{
522 		WMI_DATA_BK_SVC,
523 		PIPEDIR_IN,     /* in = DL = target -> host */
524 		2,
525 	},
526 	{
527 		WMI_DATA_BE_SVC,
528 		PIPEDIR_OUT,    /* out = UL = host -> target */
529 		3,
530 	},
531 	{
532 		WMI_DATA_BE_SVC,
533 		PIPEDIR_IN,     /* in = DL = target -> host */
534 		2,
535 	},
536 	{
537 		WMI_DATA_VI_SVC,
538 		PIPEDIR_OUT,    /* out = UL = host -> target */
539 		3,
540 	},
541 	{
542 		WMI_DATA_VI_SVC,
543 		PIPEDIR_IN,     /* in = DL = target -> host */
544 		2,
545 	},
546 	{
547 		WMI_CONTROL_SVC,
548 		PIPEDIR_OUT,    /* out = UL = host -> target */
549 		3,
550 	},
551 	{
552 		WMI_CONTROL_SVC,
553 		PIPEDIR_IN,     /* in = DL = target -> host */
554 		2,
555 	},
556 	{
557 		HTC_CTRL_RSVD_SVC,
558 		PIPEDIR_OUT,    /* out = UL = host -> target */
559 		0,              /* could be moved to 3 (share with WMI) */
560 	},
561 	{
562 		HTC_CTRL_RSVD_SVC,
563 		PIPEDIR_IN,     /* in = DL = target -> host */
564 		1,
565 	},
566 	{
567 		HTC_RAW_STREAMS_SVC, /* not currently used */
568 		PIPEDIR_OUT,    /* out = UL = host -> target */
569 		0,
570 	},
571 	{
572 		HTC_RAW_STREAMS_SVC, /* not currently used */
573 		PIPEDIR_IN,     /* in = DL = target -> host */
574 		1,
575 	},
576 	{
577 		HTT_DATA_MSG_SVC,
578 		PIPEDIR_OUT,    /* out = UL = host -> target */
579 		4,
580 	},
581 #ifdef WLAN_FEATURE_FASTPATH
582 	{
583 		HTT_DATA_MSG_SVC,
584 		PIPEDIR_IN,     /* in = DL = target -> host */
585 		5,
586 	},
587 #else /* WLAN_FEATURE_FASTPATH */
588 	{
589 		HTT_DATA_MSG_SVC,
590 		PIPEDIR_IN,  /* in = DL = target -> host */
591 		1,
592 	},
593 #endif /* WLAN_FEATURE_FASTPATH */
594 
595 	/* (Additions here) */
596 
597 	{                       /* Must be last */
598 		0,
599 		0,
600 		0,
601 	},
602 };
603 
604 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
605 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
606 
607 #ifdef WLAN_FEATURE_EPPING
608 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
609 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
610 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
611 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
612 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
613 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
614 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
615 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
616 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
617 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
618 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
619 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
620 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
621 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
622 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
623 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
624 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
625 	{0, 0, 0,},             /* Must be last */
626 };
627 
628 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
629 					   **tgt_svc_map_to_use,
630 					   uint32_t *sz_tgt_svc_map_to_use)
631 {
632 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
633 	*sz_tgt_svc_map_to_use =
634 			sizeof(target_service_to_ce_map_wlan_epping);
635 }
636 #endif
637 
638 #ifdef QCN7605_SUPPORT
639 static inline
640 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
641 			       uint32_t *sz_tgt_svc_map_to_use)
642 {
643 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
644 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
645 }
646 #else
647 static inline
648 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
649 			       uint32_t *sz_tgt_svc_map_to_use)
650 {
651 	HIF_ERROR("%s: QCN7605 not supported", __func__);
652 }
653 #endif
654 
655 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
656 				    struct service_to_pipe **tgt_svc_map_to_use,
657 				    uint32_t *sz_tgt_svc_map_to_use)
658 {
659 	uint32_t mode = hif_get_conparam(scn);
660 	struct hif_target_info *tgt_info = &scn->target_info;
661 
662 	if (QDF_IS_EPPING_ENABLED(mode)) {
663 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
664 						      sz_tgt_svc_map_to_use);
665 	} else {
666 		switch (tgt_info->target_type) {
667 		default:
668 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
669 			*sz_tgt_svc_map_to_use =
670 				sizeof(target_service_to_ce_map_wlan);
671 			break;
672 		case TARGET_TYPE_QCN7605:
673 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
674 						  sz_tgt_svc_map_to_use);
675 			break;
676 		case TARGET_TYPE_AR900B:
677 		case TARGET_TYPE_QCA9984:
678 		case TARGET_TYPE_IPQ4019:
679 		case TARGET_TYPE_QCA9888:
680 		case TARGET_TYPE_AR9888:
681 		case TARGET_TYPE_AR9888V2:
682 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
683 			*sz_tgt_svc_map_to_use =
684 				sizeof(target_service_to_ce_map_ar900b);
685 			break;
686 		case TARGET_TYPE_QCA6290:
687 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
688 			*sz_tgt_svc_map_to_use =
689 				sizeof(target_service_to_ce_map_qca6290);
690 			break;
691 		case TARGET_TYPE_QCA8074:
692 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
693 			*sz_tgt_svc_map_to_use =
694 				sizeof(target_service_to_ce_map_qca8074);
695 			break;
696 		}
697 	}
698 }
699 
700 /**
701  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
702  * @ce_state : pointer to the state context of the CE
703  *
704  * Description:
705  *   Sets htt_rx_data attribute of the state structure if the
706  *   CE serves one of the HTT DATA services.
707  *
708  * Return:
709  *  false (attribute set to false)
710  *  true  (attribute set to true);
711  */
712 static bool ce_mark_datapath(struct CE_state *ce_state)
713 {
714 	struct service_to_pipe *svc_map;
715 	uint32_t map_sz, map_len;
716 	int    i;
717 	bool   rc = false;
718 
719 	if (ce_state != NULL) {
720 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
721 					       &map_sz);
722 
723 		map_len = map_sz / sizeof(struct service_to_pipe);
724 		for (i = 0; i < map_len; i++) {
725 			if ((svc_map[i].pipenum == ce_state->id) &&
726 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
727 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
728 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
729 				/* HTT CEs are unidirectional */
730 				if (svc_map[i].pipedir == PIPEDIR_IN)
731 					ce_state->htt_rx_data = true;
732 				else
733 					ce_state->htt_tx_data = true;
734 				rc = true;
735 			}
736 		}
737 	}
738 	return rc;
739 }
740 
741 /**
742  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
743  * @ce_id: ce in question
744  * @ring: ring state being examined
745  * @type: "src_ring" or "dest_ring" string for identifying the ring
746  *
747  * Warns on non-zero index values.
748  * Causes a kernel panic if the ring is not empty durring initialization.
749  */
750 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
751 					 char *type)
752 {
753 	if (ring->write_index != 0 || ring->sw_index != 0)
754 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
755 			  ce_id, type, ring->sw_index, ring->write_index);
756 	if (ring->write_index != ring->sw_index)
757 		QDF_BUG(0);
758 }
759 
760 #ifdef IPA_OFFLOAD
761 /**
762  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
763  * @scn: softc instance
764  * @ce_id: ce in question
765  * @base_addr: pointer to copyengine ring base address
766  * @ce_ring: copyengine instance
767  * @nentries: number of entries should be allocated
768  * @desc_size: ce desc size
769  *
770  * Return: QDF_STATUS_SUCCESS - for success
771  */
772 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
773 				     qdf_dma_addr_t *base_addr,
774 				     struct CE_ring_state *ce_ring,
775 				     unsigned int nentries, uint32_t desc_size)
776 {
777 	if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
778 		scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev,
779 			nentries * desc_size + CE_DESC_RING_ALIGN);
780 		if (!scn->ipa_ce_ring) {
781 			HIF_ERROR("%s: Failed to allocate memory for IPA ce ring",
782 				  __func__);
783 			return QDF_STATUS_E_NOMEM;
784 		}
785 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
786 						&scn->ipa_ce_ring->mem_info);
787 		ce_ring->base_addr_owner_space_unaligned =
788 						scn->ipa_ce_ring->vaddr;
789 	} else {
790 		ce_ring->base_addr_owner_space_unaligned =
791 			qdf_mem_alloc_consistent(scn->qdf_dev,
792 						 scn->qdf_dev->dev,
793 						 (nentries * desc_size +
794 						 CE_DESC_RING_ALIGN),
795 						 base_addr);
796 		if (!ce_ring->base_addr_owner_space_unaligned) {
797 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
798 				  __func__, CE_id);
799 			return QDF_STATUS_E_NOMEM;
800 		}
801 	}
802 	return QDF_STATUS_SUCCESS;
803 }
804 
805 /**
806  * ce_free_desc_ring() - Frees copyengine descriptor ring
807  * @scn: softc instance
808  * @ce_id: ce in question
809  * @ce_ring: copyengine instance
810  * @desc_size: ce desc size
811  *
812  * Return: None
813  */
814 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
815 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
816 {
817 	if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
818 		qdf_mem_shared_mem_free(scn->qdf_dev,
819 					scn->ipa_ce_ring);
820 		ce_ring->base_addr_owner_space_unaligned = NULL;
821 	} else {
822 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
823 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
824 			ce_ring->base_addr_owner_space_unaligned,
825 			ce_ring->base_addr_CE_space, 0);
826 		ce_ring->base_addr_owner_space_unaligned = NULL;
827 	}
828 }
829 #else
830 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
831 				     qdf_dma_addr_t *base_addr,
832 				     struct CE_ring_state *ce_ring,
833 				     unsigned int nentries, uint32_t desc_size)
834 {
835 	ce_ring->base_addr_owner_space_unaligned =
836 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
837 					 (nentries * desc_size +
838 					 CE_DESC_RING_ALIGN), base_addr);
839 	if (!ce_ring->base_addr_owner_space_unaligned) {
840 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
841 			  __func__, CE_id);
842 		return QDF_STATUS_E_NOMEM;
843 	}
844 	return QDF_STATUS_SUCCESS;
845 }
846 
847 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
848 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
849 {
850 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
851 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
852 		ce_ring->base_addr_owner_space_unaligned,
853 		ce_ring->base_addr_CE_space, 0);
854 	ce_ring->base_addr_owner_space_unaligned = NULL;
855 }
856 #endif /* IPA_OFFLOAD */
857 
858 /**
859  * ce_srng_based() - Does this target use srng
860  * @ce_state : pointer to the state context of the CE
861  *
862  * Description:
863  *   returns true if the target is SRNG based
864  *
865  * Return:
866  *  false (attribute set to false)
867  *  true  (attribute set to true);
868  */
869 bool ce_srng_based(struct hif_softc *scn)
870 {
871 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
872 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
873 
874 	switch (tgt_info->target_type) {
875 	case TARGET_TYPE_QCA8074:
876 	case TARGET_TYPE_QCA6290:
877 		return true;
878 	default:
879 		return false;
880 	}
881 	return false;
882 }
883 qdf_export_symbol(ce_srng_based);
884 
885 #ifdef QCA_WIFI_SUPPORT_SRNG
886 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
887 {
888 	if (ce_srng_based(scn))
889 		return ce_services_srng();
890 
891 	return ce_services_legacy();
892 }
893 
894 
895 #else	/* QCA_LITHIUM */
896 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
897 {
898 	return ce_services_legacy();
899 }
900 #endif /* QCA_LITHIUM */
901 
902 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
903 		struct pld_shadow_reg_v2_cfg **shadow_config,
904 		int *num_shadow_registers_configured) {
905 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
906 
907 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
908 			scn, shadow_config, num_shadow_registers_configured);
909 }
910 
911 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
912 						uint8_t ring_type)
913 {
914 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
915 
916 	return hif_state->ce_services->ce_get_desc_size(ring_type);
917 }
918 
919 
920 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
921 		uint8_t ring_type, uint32_t nentries)
922 {
923 	uint32_t ce_nbytes;
924 	char *ptr;
925 	qdf_dma_addr_t base_addr;
926 	struct CE_ring_state *ce_ring;
927 	uint32_t desc_size;
928 	struct hif_softc *scn = CE_state->scn;
929 
930 	ce_nbytes = sizeof(struct CE_ring_state)
931 		+ (nentries * sizeof(void *));
932 	ptr = qdf_mem_malloc(ce_nbytes);
933 	if (!ptr)
934 		return NULL;
935 
936 	ce_ring = (struct CE_ring_state *)ptr;
937 	ptr += sizeof(struct CE_ring_state);
938 	ce_ring->nentries = nentries;
939 	ce_ring->nentries_mask = nentries - 1;
940 
941 	ce_ring->low_water_mark_nentries = 0;
942 	ce_ring->high_water_mark_nentries = nentries;
943 	ce_ring->per_transfer_context = (void **)ptr;
944 
945 	desc_size = ce_get_desc_size(scn, ring_type);
946 
947 	/* Legacy platforms that do not support cache
948 	 * coherent DMA are unsupported
949 	 */
950 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
951 			       ce_ring, nentries,
952 			       desc_size) !=
953 	    QDF_STATUS_SUCCESS) {
954 		HIF_ERROR("%s: ring has no DMA mem",
955 				__func__);
956 		qdf_mem_free(ptr);
957 		return NULL;
958 	}
959 	ce_ring->base_addr_CE_space_unaligned = base_addr;
960 
961 	/* Correctly initialize memory to 0 to
962 	 * prevent garbage data crashing system
963 	 * when download firmware
964 	 */
965 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
966 			nentries * desc_size +
967 			CE_DESC_RING_ALIGN);
968 
969 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
970 
971 		ce_ring->base_addr_CE_space =
972 			(ce_ring->base_addr_CE_space_unaligned +
973 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
974 
975 		ce_ring->base_addr_owner_space = (void *)
976 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
977 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
978 	} else {
979 		ce_ring->base_addr_CE_space =
980 				ce_ring->base_addr_CE_space_unaligned;
981 		ce_ring->base_addr_owner_space =
982 				ce_ring->base_addr_owner_space_unaligned;
983 	}
984 
985 	return ce_ring;
986 }
987 
988 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
989 			uint32_t ce_id, struct CE_ring_state *ring,
990 			struct CE_attr *attr)
991 {
992 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
993 
994 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
995 					      ring, attr);
996 }
997 
998 int hif_ce_bus_early_suspend(struct hif_softc *scn)
999 {
1000 	uint8_t ul_pipe, dl_pipe;
1001 	int ce_id, status, ul_is_polled, dl_is_polled;
1002 	struct CE_state *ce_state;
1003 
1004 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1005 					 &ul_pipe, &dl_pipe,
1006 					 &ul_is_polled, &dl_is_polled);
1007 	if (status) {
1008 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1009 		return status;
1010 	}
1011 
1012 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1013 		if (ce_id == ul_pipe)
1014 			continue;
1015 		if (ce_id == dl_pipe)
1016 			continue;
1017 
1018 		ce_state = scn->ce_id_to_state[ce_id];
1019 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1020 		if (ce_state->state == CE_RUNNING)
1021 			ce_state->state = CE_PAUSED;
1022 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1023 	}
1024 
1025 	return status;
1026 }
1027 
1028 int hif_ce_bus_late_resume(struct hif_softc *scn)
1029 {
1030 	int ce_id;
1031 	struct CE_state *ce_state;
1032 	int write_index;
1033 	bool index_updated;
1034 
1035 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1036 		ce_state = scn->ce_id_to_state[ce_id];
1037 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1038 		if (ce_state->state == CE_PENDING) {
1039 			write_index = ce_state->src_ring->write_index;
1040 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1041 					write_index);
1042 			ce_state->state = CE_RUNNING;
1043 			index_updated = true;
1044 		} else {
1045 			index_updated = false;
1046 		}
1047 
1048 		if (ce_state->state == CE_PAUSED)
1049 			ce_state->state = CE_RUNNING;
1050 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1051 
1052 		if (index_updated)
1053 			hif_record_ce_desc_event(scn, ce_id,
1054 				RESUME_WRITE_INDEX_UPDATE,
1055 				NULL, NULL, write_index, 0);
1056 	}
1057 
1058 	return 0;
1059 }
1060 
1061 /**
1062  * ce_oom_recovery() - try to recover rx ce from oom condition
1063  * @context: CE_state of the CE with oom rx ring
1064  *
1065  * the executing work Will continue to be rescheduled until
1066  * at least 1 descriptor is successfully posted to the rx ring.
1067  *
1068  * return: none
1069  */
1070 static void ce_oom_recovery(void *context)
1071 {
1072 	struct CE_state *ce_state = context;
1073 	struct hif_softc *scn = ce_state->scn;
1074 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1075 	struct HIF_CE_pipe_info *pipe_info =
1076 		&ce_softc->pipe_info[ce_state->id];
1077 
1078 	hif_post_recv_buffers_for_pipe(pipe_info);
1079 }
1080 
1081 #if HIF_CE_DEBUG_DATA_BUF
1082 /**
1083  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1084  * the CE descriptors.
1085  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1086  * @scn: hif scn handle
1087  * ce_id: Copy Engine Id
1088  *
1089  * Return: QDF_STATUS
1090  */
1091 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1092 {
1093 	struct hif_ce_desc_event *event = NULL;
1094 	struct hif_ce_desc_event *hist_ev = NULL;
1095 	uint32_t index = 0;
1096 
1097 	hist_ev =
1098 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1099 
1100 	if (!hist_ev)
1101 		return QDF_STATUS_E_NOMEM;
1102 
1103 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1104 		event = &hist_ev[index];
1105 		event->data =
1106 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1107 		if (event->data == NULL)
1108 			return QDF_STATUS_E_NOMEM;
1109 	}
1110 	return QDF_STATUS_SUCCESS;
1111 }
1112 
1113 /**
1114  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1115  * the CE descriptors.
1116  * @scn: hif scn handle
1117  * ce_id: Copy Engine Id
1118  *
1119  * Return:
1120  */
1121 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1122 {
1123 	struct hif_ce_desc_event *event = NULL;
1124 	struct hif_ce_desc_event *hist_ev = NULL;
1125 	uint32_t index = 0;
1126 
1127 	hist_ev =
1128 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1129 
1130 	if (!hist_ev)
1131 		return;
1132 
1133 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1134 		event = &hist_ev[index];
1135 		if (event->data != NULL)
1136 			qdf_mem_free(event->data);
1137 		event->data = NULL;
1138 		event = NULL;
1139 	}
1140 }
1141 #endif /* HIF_CE_DEBUG_DATA_BUF */
1142 
1143 /*
1144  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1145  * for defined here
1146  */
1147 #if HIF_CE_DEBUG_DATA_BUF
1148 /**
1149  * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
1150  * @scn: hif scn handle
1151  * ce_id: Copy Engine Id
1152  *
1153  * Return: QDF_STATUS
1154  */
1155 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1156 						unsigned int CE_id)
1157 {
1158 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1159 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1160 
1161 	if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1162 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1163 		return QDF_STATUS_E_NOMEM;
1164 	} else {
1165 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1166 		return QDF_STATUS_SUCCESS;
1167 	}
1168 }
1169 
1170 /**
1171  * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1172  * storing.
1173  * @scn: hif scn handle
1174  * ce_id: Copy Engine Id
1175  *
1176  * Return:
1177  */
1178 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1179 						unsigned int CE_id)
1180 {
1181 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1182 	struct hif_ce_desc_event *hist_ev =
1183 			(struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1184 
1185 	if (!hist_ev)
1186 		return;
1187 
1188 #if HIF_CE_DEBUG_DATA_BUF
1189 	if (ce_hist->data_enable[CE_id] == 1) {
1190 		ce_hist->data_enable[CE_id] = 0;
1191 		free_mem_ce_debug_hist_data(scn, CE_id);
1192 	}
1193 #endif
1194 	ce_hist->enable[CE_id] = 0;
1195 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1196 	ce_hist->hist_ev[CE_id] = NULL;
1197 }
1198 
1199 /**
1200  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1201  * CE records on the console using sysfs.
1202  * @scn: hif scn handle
1203  *
1204  * Return:
1205  */
1206 static inline void reset_ce_debug_history(struct hif_softc *scn)
1207 {
1208 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1209 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1210 	 * index. Disable data storing
1211 	 */
1212 	ce_hist->hist_index = 0;
1213 	ce_hist->hist_id = 0;
1214 }
1215 #else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1216 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1217 						unsigned int CE_id)
1218 {
1219 	return QDF_STATUS_SUCCESS;
1220 }
1221 
1222 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1223 						unsigned int CE_id)
1224 {
1225 }
1226 
1227 static inline void reset_ce_debug_history(struct hif_softc *scn)
1228 {
1229 }
1230 #endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1231 
1232 /*
1233  * Initialize a Copy Engine based on caller-supplied attributes.
1234  * This may be called once to initialize both source and destination
1235  * rings or it may be called twice for separate source and destination
1236  * initialization. It may be that only one side or the other is
1237  * initialized by software/firmware.
1238  *
1239  * This should be called durring the initialization sequence before
1240  * interupts are enabled, so we don't have to worry about thread safety.
1241  */
1242 struct CE_handle *ce_init(struct hif_softc *scn,
1243 			  unsigned int CE_id, struct CE_attr *attr)
1244 {
1245 	struct CE_state *CE_state;
1246 	uint32_t ctrl_addr;
1247 	unsigned int nentries;
1248 	bool malloc_CE_state = false;
1249 	bool malloc_src_ring = false;
1250 	int status;
1251 
1252 	QDF_ASSERT(CE_id < scn->ce_count);
1253 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1254 	CE_state = scn->ce_id_to_state[CE_id];
1255 
1256 	if (!CE_state) {
1257 		CE_state =
1258 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1259 		if (!CE_state) {
1260 			HIF_ERROR("%s: CE_state has no mem", __func__);
1261 			return NULL;
1262 		}
1263 		malloc_CE_state = true;
1264 		qdf_spinlock_create(&CE_state->ce_index_lock);
1265 
1266 		CE_state->id = CE_id;
1267 		CE_state->ctrl_addr = ctrl_addr;
1268 		CE_state->state = CE_RUNNING;
1269 		CE_state->attr_flags = attr->flags;
1270 	}
1271 	CE_state->scn = scn;
1272 
1273 	qdf_atomic_init(&CE_state->rx_pending);
1274 	if (attr == NULL) {
1275 		/* Already initialized; caller wants the handle */
1276 		return (struct CE_handle *)CE_state;
1277 	}
1278 
1279 	if (CE_state->src_sz_max)
1280 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1281 	else
1282 		CE_state->src_sz_max = attr->src_sz_max;
1283 
1284 	ce_init_ce_desc_event_log(scn, CE_id,
1285 				  attr->src_nentries + attr->dest_nentries);
1286 
1287 	/* source ring setup */
1288 	nentries = attr->src_nentries;
1289 	if (nentries) {
1290 		struct CE_ring_state *src_ring;
1291 
1292 		nentries = roundup_pwr2(nentries);
1293 		if (CE_state->src_ring) {
1294 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1295 		} else {
1296 			src_ring = CE_state->src_ring =
1297 				ce_alloc_ring_state(CE_state,
1298 						CE_RING_SRC,
1299 						nentries);
1300 			if (!src_ring) {
1301 				/* cannot allocate src ring. If the
1302 				 * CE_state is allocated locally free
1303 				 * CE_State and return error.
1304 				 */
1305 				HIF_ERROR("%s: src ring has no mem", __func__);
1306 				if (malloc_CE_state) {
1307 					/* allocated CE_state locally */
1308 					qdf_mem_free(CE_state);
1309 					malloc_CE_state = false;
1310 				}
1311 				return NULL;
1312 			}
1313 			/* we can allocate src ring. Mark that the src ring is
1314 			 * allocated locally
1315 			 */
1316 			malloc_src_ring = true;
1317 
1318 			/*
1319 			 * Also allocate a shadow src ring in
1320 			 * regular mem to use for faster access.
1321 			 */
1322 			src_ring->shadow_base_unaligned =
1323 				qdf_mem_malloc(nentries *
1324 					       sizeof(struct CE_src_desc) +
1325 					       CE_DESC_RING_ALIGN);
1326 			if (src_ring->shadow_base_unaligned == NULL) {
1327 				HIF_ERROR("%s: src ring no shadow_base mem",
1328 					  __func__);
1329 				goto error_no_dma_mem;
1330 			}
1331 			src_ring->shadow_base = (struct CE_src_desc *)
1332 				(((size_t) src_ring->shadow_base_unaligned +
1333 				CE_DESC_RING_ALIGN - 1) &
1334 				 ~(CE_DESC_RING_ALIGN - 1));
1335 
1336 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1337 					       src_ring, attr);
1338 			if (status < 0)
1339 				goto error_target_access;
1340 
1341 			ce_ring_test_initial_indexes(CE_id, src_ring,
1342 						     "src_ring");
1343 		}
1344 	}
1345 
1346 	/* destination ring setup */
1347 	nentries = attr->dest_nentries;
1348 	if (nentries) {
1349 		struct CE_ring_state *dest_ring;
1350 
1351 		nentries = roundup_pwr2(nentries);
1352 		if (CE_state->dest_ring) {
1353 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1354 		} else {
1355 			dest_ring = CE_state->dest_ring =
1356 				ce_alloc_ring_state(CE_state,
1357 						CE_RING_DEST,
1358 						nentries);
1359 			if (!dest_ring) {
1360 				/* cannot allocate dst ring. If the CE_state
1361 				 * or src ring is allocated locally free
1362 				 * CE_State and src ring and return error.
1363 				 */
1364 				HIF_ERROR("%s: dest ring has no mem",
1365 					  __func__);
1366 				goto error_no_dma_mem;
1367 			}
1368 
1369 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1370 				      dest_ring, attr);
1371 			if (status < 0)
1372 				goto error_target_access;
1373 
1374 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1375 						     "dest_ring");
1376 
1377 			/* For srng based target, init status ring here */
1378 			if (ce_srng_based(CE_state->scn)) {
1379 				CE_state->status_ring =
1380 					ce_alloc_ring_state(CE_state,
1381 							CE_RING_STATUS,
1382 							nentries);
1383 				if (CE_state->status_ring == NULL) {
1384 					/*Allocation failed. Cleanup*/
1385 					qdf_mem_free(CE_state->dest_ring);
1386 					if (malloc_src_ring) {
1387 						qdf_mem_free
1388 							(CE_state->src_ring);
1389 						CE_state->src_ring = NULL;
1390 						malloc_src_ring = false;
1391 					}
1392 					if (malloc_CE_state) {
1393 						/* allocated CE_state locally */
1394 						scn->ce_id_to_state[CE_id] =
1395 							NULL;
1396 						qdf_mem_free(CE_state);
1397 						malloc_CE_state = false;
1398 					}
1399 
1400 					return NULL;
1401 				}
1402 
1403 				status = ce_ring_setup(scn, CE_RING_STATUS,
1404 					       CE_id, CE_state->status_ring,
1405 					       attr);
1406 				if (status < 0)
1407 					goto error_target_access;
1408 
1409 			}
1410 
1411 			/* epping */
1412 			/* poll timer */
1413 			if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL) ||
1414 					scn->polled_mode_on) {
1415 				qdf_timer_init(scn->qdf_dev,
1416 						       &CE_state->poll_timer,
1417 						       ce_poll_timeout,
1418 						       CE_state,
1419 						       QDF_TIMER_TYPE_SW);
1420 				CE_state->timer_inited = true;
1421 				qdf_timer_mod(&CE_state->poll_timer,
1422 						      CE_POLL_TIMEOUT);
1423 			}
1424 		}
1425 	}
1426 
1427 	if (!ce_srng_based(scn)) {
1428 		/* Enable CE error interrupts */
1429 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1430 			goto error_target_access;
1431 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1432 		if (Q_TARGET_ACCESS_END(scn) < 0)
1433 			goto error_target_access;
1434 	}
1435 
1436 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1437 			ce_oom_recovery, CE_state);
1438 
1439 	/* update the htt_data attribute */
1440 	ce_mark_datapath(CE_state);
1441 	scn->ce_id_to_state[CE_id] = CE_state;
1442 
1443 	alloc_mem_ce_debug_history(scn, CE_id);
1444 
1445 	return (struct CE_handle *)CE_state;
1446 
1447 error_target_access:
1448 error_no_dma_mem:
1449 	ce_fini((struct CE_handle *)CE_state);
1450 	return NULL;
1451 }
1452 
1453 #ifdef WLAN_FEATURE_FASTPATH
1454 /**
1455  * hif_enable_fastpath() Update that we have enabled fastpath mode
1456  * @hif_ctx: HIF context
1457  *
1458  * For use in data path
1459  *
1460  * Retrun: void
1461  */
1462 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1463 {
1464 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1465 
1466 	if (ce_srng_based(scn)) {
1467 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1468 		return;
1469 	}
1470 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1471 	scn->fastpath_mode_on = true;
1472 }
1473 
1474 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx)
1475 {
1476 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1477 	HIF_DBG("%s, Enabling polled mode", __func__);
1478 
1479 	scn->polled_mode_on = true;
1480 }
1481 
1482 /**
1483  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1484  * @hif_ctx: HIF Context
1485  *
1486  * For use in data path to skip HTC
1487  *
1488  * Return: bool
1489  */
1490 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1491 {
1492 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1493 
1494 	return scn->fastpath_mode_on;
1495 }
1496 
1497 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1498 {
1499 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1500 
1501 	return scn->polled_mode_on;
1502 }
1503 
1504 /**
1505  * hif_get_ce_handle - API to get CE handle for FastPath mode
1506  * @hif_ctx: HIF Context
1507  * @id: CopyEngine Id
1508  *
1509  * API to return CE handle for fastpath mode
1510  *
1511  * Return: void
1512  */
1513 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1514 {
1515 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1516 
1517 	return scn->ce_id_to_state[id];
1518 }
1519 
1520 /**
1521  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1522  * No processing is required inside this function.
1523  * @ce_hdl: Cope engine handle
1524  * Using an assert, this function makes sure that,
1525  * the TX CE has been processed completely.
1526  *
1527  * This is called while dismantling CE structures. No other thread
1528  * should be using these structures while dismantling is occurring
1529  * therfore no locking is needed.
1530  *
1531  * Return: none
1532  */
1533 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1534 {
1535 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1536 	struct CE_ring_state *src_ring = ce_state->src_ring;
1537 	struct hif_softc *sc = ce_state->scn;
1538 	uint32_t sw_index, write_index;
1539 
1540 	if (hif_is_nss_wifi_enabled(sc))
1541 		return;
1542 
1543 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1544 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1545 			 __func__, __LINE__);
1546 		sw_index = src_ring->sw_index;
1547 		write_index = src_ring->sw_index;
1548 
1549 		/* At this point Tx CE should be clean */
1550 		qdf_assert_always(sw_index == write_index);
1551 	}
1552 }
1553 
1554 /**
1555  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1556  * @ce_hdl: Handle to CE
1557  *
1558  * These buffers are never allocated on the fly, but
1559  * are allocated only once during HIF start and freed
1560  * only once during HIF stop.
1561  * NOTE:
1562  * The assumption here is there is no in-flight DMA in progress
1563  * currently, so that buffers can be freed up safely.
1564  *
1565  * Return: NONE
1566  */
1567 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1568 {
1569 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1570 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1571 	qdf_nbuf_t nbuf;
1572 	int i;
1573 
1574 	if (ce_state->scn->fastpath_mode_on == false)
1575 		return;
1576 
1577 	if (!ce_state->htt_rx_data)
1578 		return;
1579 
1580 	/*
1581 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1582 	 * this CE is completely full: does not leave one blank space, to
1583 	 * distinguish between empty queue & full queue. So free all the
1584 	 * entries.
1585 	 */
1586 	for (i = 0; i < dst_ring->nentries; i++) {
1587 		nbuf = dst_ring->per_transfer_context[i];
1588 
1589 		/*
1590 		 * The reasons for doing this check are:
1591 		 * 1) Protect against calling cleanup before allocating buffers
1592 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1593 		 *    could have a partially filled ring, because of a memory
1594 		 *    allocation failure in the middle of allocating ring.
1595 		 *    This check accounts for that case, checking
1596 		 *    fastpath_mode_on flag or started flag would not have
1597 		 *    covered that case. This is not in performance path,
1598 		 *    so OK to do this.
1599 		 */
1600 		if (nbuf) {
1601 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1602 					      QDF_DMA_FROM_DEVICE);
1603 			qdf_nbuf_free(nbuf);
1604 		}
1605 	}
1606 }
1607 
1608 /**
1609  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1610  * @scn: HIF handle
1611  *
1612  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1613  * Hence we have to post all the entries in the pipe, even, in the beginning
1614  * unlike for other CE pipes where one less than dest_nentries are filled in
1615  * the beginning.
1616  *
1617  * Return: None
1618  */
1619 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1620 {
1621 	int pipe_num;
1622 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1623 
1624 	if (scn->fastpath_mode_on == false)
1625 		return;
1626 
1627 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1628 		struct HIF_CE_pipe_info *pipe_info =
1629 			&hif_state->pipe_info[pipe_num];
1630 		struct CE_state *ce_state =
1631 			scn->ce_id_to_state[pipe_info->pipe_num];
1632 
1633 		if (ce_state->htt_rx_data)
1634 			atomic_inc(&pipe_info->recv_bufs_needed);
1635 	}
1636 }
1637 #else
1638 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1639 {
1640 }
1641 
1642 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1643 {
1644 	return false;
1645 }
1646 
1647 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1648 {
1649 	return false;
1650 }
1651 #endif /* WLAN_FEATURE_FASTPATH */
1652 
1653 void ce_fini(struct CE_handle *copyeng)
1654 {
1655 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1656 	unsigned int CE_id = CE_state->id;
1657 	struct hif_softc *scn = CE_state->scn;
1658 	uint32_t desc_size;
1659 
1660 	bool inited = CE_state->timer_inited;
1661 	CE_state->state = CE_UNUSED;
1662 	scn->ce_id_to_state[CE_id] = NULL;
1663 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1664 	CE_state->timer_inited = false;
1665 	qdf_lro_deinit(CE_state->lro_data);
1666 
1667 	if (CE_state->src_ring) {
1668 		/* Cleanup the datapath Tx ring */
1669 		ce_h2t_tx_ce_cleanup(copyeng);
1670 
1671 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1672 		if (CE_state->src_ring->shadow_base_unaligned)
1673 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1674 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1675 			ce_free_desc_ring(scn, CE_state->id,
1676 					  CE_state->src_ring,
1677 					  desc_size);
1678 		qdf_mem_free(CE_state->src_ring);
1679 	}
1680 	if (CE_state->dest_ring) {
1681 		/* Cleanup the datapath Rx ring */
1682 		ce_t2h_msg_ce_cleanup(copyeng);
1683 
1684 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1685 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1686 			ce_free_desc_ring(scn, CE_state->id,
1687 					  CE_state->dest_ring,
1688 					  desc_size);
1689 		qdf_mem_free(CE_state->dest_ring);
1690 
1691 		/* epping */
1692 		if (inited) {
1693 			qdf_timer_free(&CE_state->poll_timer);
1694 		}
1695 	}
1696 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1697 		/* Cleanup the datapath Tx ring */
1698 		ce_h2t_tx_ce_cleanup(copyeng);
1699 
1700 		if (CE_state->status_ring->shadow_base_unaligned)
1701 			qdf_mem_free(
1702 				CE_state->status_ring->shadow_base_unaligned);
1703 
1704 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1705 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1706 			ce_free_desc_ring(scn, CE_state->id,
1707 					  CE_state->status_ring,
1708 					  desc_size);
1709 		qdf_mem_free(CE_state->status_ring);
1710 	}
1711 
1712 	free_mem_ce_debug_history(scn, CE_id);
1713 	reset_ce_debug_history(scn);
1714 	ce_deinit_ce_desc_event_log(scn, CE_id);
1715 
1716 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1717 	qdf_mem_free(CE_state);
1718 }
1719 
1720 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1721 {
1722 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1723 
1724 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1725 		  sizeof(hif_state->msg_callbacks_pending));
1726 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1727 		  sizeof(hif_state->msg_callbacks_current));
1728 }
1729 
1730 /* Send the first nbytes bytes of the buffer */
1731 QDF_STATUS
1732 hif_send_head(struct hif_opaque_softc *hif_ctx,
1733 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1734 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1735 {
1736 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1737 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1738 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1739 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1740 	int bytes = nbytes, nfrags = 0;
1741 	struct ce_sendlist sendlist;
1742 	int status, i = 0;
1743 	unsigned int mux_id = 0;
1744 
1745 	QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
1746 
1747 	transfer_id =
1748 		(mux_id & MUX_ID_MASK) |
1749 		(transfer_id & TRANSACTION_ID_MASK);
1750 	data_attr &= DESC_DATA_FLAG_MASK;
1751 	/*
1752 	 * The common case involves sending multiple fragments within a
1753 	 * single download (the tx descriptor and the tx frame header).
1754 	 * So, optimize for the case of multiple fragments by not even
1755 	 * checking whether it's necessary to use a sendlist.
1756 	 * The overhead of using a sendlist for a single buffer download
1757 	 * is not a big deal, since it happens rarely (for WMI messages).
1758 	 */
1759 	ce_sendlist_init(&sendlist);
1760 	do {
1761 		qdf_dma_addr_t frag_paddr;
1762 		int frag_bytes;
1763 
1764 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1765 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1766 		/*
1767 		 * Clear the packet offset for all but the first CE desc.
1768 		 */
1769 		if (i++ > 0)
1770 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1771 
1772 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1773 				    frag_bytes >
1774 				    bytes ? bytes : frag_bytes,
1775 				    qdf_nbuf_get_frag_is_wordstream
1776 				    (nbuf,
1777 				    nfrags) ? 0 :
1778 				    CE_SEND_FLAG_SWAP_DISABLE,
1779 				    data_attr);
1780 		if (status != QDF_STATUS_SUCCESS) {
1781 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1782 				__func__, nfrags);
1783 			return status;
1784 		}
1785 		bytes -= frag_bytes;
1786 		nfrags++;
1787 	} while (bytes > 0);
1788 
1789 	/* Make sure we have resources to handle this request */
1790 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1791 	if (pipe_info->num_sends_allowed < nfrags) {
1792 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1793 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1794 		return QDF_STATUS_E_RESOURCES;
1795 	}
1796 	pipe_info->num_sends_allowed -= nfrags;
1797 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1798 
1799 	if (qdf_unlikely(ce_hdl == NULL)) {
1800 		HIF_ERROR("%s: error CE handle is null", __func__);
1801 		return A_ERROR;
1802 	}
1803 
1804 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
1805 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
1806 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1807 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
1808 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
1809 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1810 
1811 	return status;
1812 }
1813 
1814 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1815 								int force)
1816 {
1817 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1818 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1819 
1820 	if (!force) {
1821 		int resources;
1822 		/*
1823 		 * Decide whether to actually poll for completions, or just
1824 		 * wait for a later chance. If there seem to be plenty of
1825 		 * resources left, then just wait, since checking involves
1826 		 * reading a CE register, which is a relatively expensive
1827 		 * operation.
1828 		 */
1829 		resources = hif_get_free_queue_number(hif_ctx, pipe);
1830 		/*
1831 		 * If at least 50% of the total resources are still available,
1832 		 * don't bother checking again yet.
1833 		 */
1834 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1835 									 1))
1836 			return;
1837 	}
1838 #if ATH_11AC_TXCOMPACT
1839 	ce_per_engine_servicereap(scn, pipe);
1840 #else
1841 	ce_per_engine_service(scn, pipe);
1842 #endif
1843 }
1844 
1845 uint16_t
1846 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
1847 {
1848 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1849 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1850 	uint16_t rv;
1851 
1852 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1853 	rv = pipe_info->num_sends_allowed;
1854 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1855 	return rv;
1856 }
1857 
1858 /* Called by lower (CE) layer when a send to Target completes. */
1859 static void
1860 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
1861 		     void *transfer_context, qdf_dma_addr_t CE_data,
1862 		     unsigned int nbytes, unsigned int transfer_id,
1863 		     unsigned int sw_index, unsigned int hw_index,
1864 		     unsigned int toeplitz_hash_result)
1865 {
1866 	struct HIF_CE_pipe_info *pipe_info =
1867 		(struct HIF_CE_pipe_info *)ce_context;
1868 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1869 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1870 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
1871 	struct hif_msg_callbacks *msg_callbacks =
1872 		&pipe_info->pipe_callbacks;
1873 
1874 	do {
1875 		/*
1876 		 * The upper layer callback will be triggered
1877 		 * when last fragment is complteted.
1878 		 */
1879 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
1880 			if (scn->target_status == TARGET_STATUS_RESET) {
1881 
1882 				qdf_nbuf_unmap_single(scn->qdf_dev,
1883 						      transfer_context,
1884 						      QDF_DMA_TO_DEVICE);
1885 				qdf_nbuf_free(transfer_context);
1886 			} else
1887 				msg_callbacks->txCompletionHandler(
1888 					msg_callbacks->Context,
1889 					transfer_context, transfer_id,
1890 					toeplitz_hash_result);
1891 		}
1892 
1893 		qdf_spin_lock(&pipe_info->completion_freeq_lock);
1894 		pipe_info->num_sends_allowed++;
1895 		qdf_spin_unlock(&pipe_info->completion_freeq_lock);
1896 	} while (ce_completed_send_next(copyeng,
1897 			&ce_context, &transfer_context,
1898 			&CE_data, &nbytes, &transfer_id,
1899 			&sw_idx, &hw_idx,
1900 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
1901 }
1902 
1903 /**
1904  * hif_ce_do_recv(): send message from copy engine to upper layers
1905  * @msg_callbacks: structure containing callback and callback context
1906  * @netbuff: skb containing message
1907  * @nbytes: number of bytes in the message
1908  * @pipe_info: used for the pipe_number info
1909  *
1910  * Checks the packet length, configures the length in the netbuff,
1911  * and calls the upper layer callback.
1912  *
1913  * return: None
1914  */
1915 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
1916 		qdf_nbuf_t netbuf, int nbytes,
1917 		struct HIF_CE_pipe_info *pipe_info) {
1918 	if (nbytes <= pipe_info->buf_sz) {
1919 		qdf_nbuf_set_pktlen(netbuf, nbytes);
1920 		msg_callbacks->
1921 			rxCompletionHandler(msg_callbacks->Context,
1922 					netbuf, pipe_info->pipe_num);
1923 	} else {
1924 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
1925 				__func__, netbuf, nbytes);
1926 
1927 		qdf_nbuf_free(netbuf);
1928 	}
1929 }
1930 
1931 /* Called by lower (CE) layer when data is received from the Target. */
1932 static void
1933 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
1934 		     void *transfer_context, qdf_dma_addr_t CE_data,
1935 		     unsigned int nbytes, unsigned int transfer_id,
1936 		     unsigned int flags)
1937 {
1938 	struct HIF_CE_pipe_info *pipe_info =
1939 		(struct HIF_CE_pipe_info *)ce_context;
1940 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1941 	struct CE_state *ce_state = (struct CE_state *) copyeng;
1942 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1943 #ifdef HIF_PCI
1944 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1945 #endif
1946 	struct hif_msg_callbacks *msg_callbacks =
1947 		 &pipe_info->pipe_callbacks;
1948 
1949 	do {
1950 #ifdef HIF_PCI
1951 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1952 #endif
1953 		qdf_nbuf_unmap_single(scn->qdf_dev,
1954 				      (qdf_nbuf_t) transfer_context,
1955 				      QDF_DMA_FROM_DEVICE);
1956 
1957 		atomic_inc(&pipe_info->recv_bufs_needed);
1958 		hif_post_recv_buffers_for_pipe(pipe_info);
1959 		if (scn->target_status == TARGET_STATUS_RESET)
1960 			qdf_nbuf_free(transfer_context);
1961 		else
1962 			hif_ce_do_recv(msg_callbacks, transfer_context,
1963 				nbytes, pipe_info);
1964 
1965 		/* Set up force_break flag if num of receices reaches
1966 		 * MAX_NUM_OF_RECEIVES
1967 		 */
1968 		ce_state->receive_count++;
1969 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1970 			ce_state->force_break = 1;
1971 			break;
1972 		}
1973 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1974 					&CE_data, &nbytes, &transfer_id,
1975 					&flags) == QDF_STATUS_SUCCESS);
1976 
1977 }
1978 
1979 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1980 
1981 void
1982 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
1983 	      struct hif_msg_callbacks *callbacks)
1984 {
1985 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1986 
1987 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1988 	spin_lock_init(&pcie_access_log_lock);
1989 #endif
1990 	/* Save callbacks for later installation */
1991 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
1992 		 sizeof(hif_state->msg_callbacks_pending));
1993 
1994 }
1995 
1996 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1997 {
1998 	struct CE_handle *ce_diag = hif_state->ce_diag;
1999 	int pipe_num;
2000 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2001 	struct hif_msg_callbacks *hif_msg_callbacks =
2002 		&hif_state->msg_callbacks_current;
2003 
2004 	/* daemonize("hif_compl_thread"); */
2005 
2006 	if (scn->ce_count == 0) {
2007 		HIF_ERROR("%s: Invalid ce_count", __func__);
2008 		return -EINVAL;
2009 	}
2010 
2011 	if (!hif_msg_callbacks ||
2012 			!hif_msg_callbacks->rxCompletionHandler ||
2013 			!hif_msg_callbacks->txCompletionHandler) {
2014 		HIF_ERROR("%s: no completion handler registered", __func__);
2015 		return -EFAULT;
2016 	}
2017 
2018 	A_TARGET_ACCESS_LIKELY(scn);
2019 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2020 		struct CE_attr attr;
2021 		struct HIF_CE_pipe_info *pipe_info;
2022 
2023 		pipe_info = &hif_state->pipe_info[pipe_num];
2024 		if (pipe_info->ce_hdl == ce_diag)
2025 			continue;       /* Handle Diagnostic CE specially */
2026 		attr = hif_state->host_ce_config[pipe_num];
2027 		if (attr.src_nentries) {
2028 			/* pipe used to send to target */
2029 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2030 					 __func__, pipe_num, pipe_info);
2031 			ce_send_cb_register(pipe_info->ce_hdl,
2032 					    hif_pci_ce_send_done, pipe_info,
2033 					    attr.flags & CE_ATTR_DISABLE_INTR);
2034 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2035 		}
2036 		if (attr.dest_nentries) {
2037 			/* pipe used to receive from target */
2038 			ce_recv_cb_register(pipe_info->ce_hdl,
2039 					    hif_pci_ce_recv_data, pipe_info,
2040 					    attr.flags & CE_ATTR_DISABLE_INTR);
2041 		}
2042 
2043 		if (attr.src_nentries)
2044 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2045 
2046 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2047 					sizeof(pipe_info->pipe_callbacks));
2048 	}
2049 
2050 	A_TARGET_ACCESS_UNLIKELY(scn);
2051 	return 0;
2052 }
2053 
2054 /*
2055  * Install pending msg callbacks.
2056  *
2057  * TBDXXX: This hack is needed because upper layers install msg callbacks
2058  * for use with HTC before BMI is done; yet this HIF implementation
2059  * needs to continue to use BMI msg callbacks. Really, upper layers
2060  * should not register HTC callbacks until AFTER BMI phase.
2061  */
2062 static void hif_msg_callbacks_install(struct hif_softc *scn)
2063 {
2064 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2065 
2066 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2067 		 &hif_state->msg_callbacks_pending,
2068 		 sizeof(hif_state->msg_callbacks_pending));
2069 }
2070 
2071 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2072 							uint8_t *DLPipe)
2073 {
2074 	int ul_is_polled, dl_is_polled;
2075 
2076 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2077 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2078 }
2079 
2080 /**
2081  * hif_dump_pipe_debug_count() - Log error count
2082  * @scn: hif_softc pointer.
2083  *
2084  * Output the pipe error counts of each pipe to log file
2085  *
2086  * Return: N/A
2087  */
2088 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2089 {
2090 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2091 	int pipe_num;
2092 
2093 	if (hif_state == NULL) {
2094 		HIF_ERROR("%s hif_state is NULL", __func__);
2095 		return;
2096 	}
2097 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2098 		struct HIF_CE_pipe_info *pipe_info;
2099 
2100 	pipe_info = &hif_state->pipe_info[pipe_num];
2101 
2102 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2103 			pipe_info->nbuf_dma_err_count > 0 ||
2104 			pipe_info->nbuf_ce_enqueue_err_count)
2105 		HIF_ERROR(
2106 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2107 			__func__, pipe_info->pipe_num,
2108 			atomic_read(&pipe_info->recv_bufs_needed),
2109 			pipe_info->nbuf_alloc_err_count,
2110 			pipe_info->nbuf_dma_err_count,
2111 			pipe_info->nbuf_ce_enqueue_err_count);
2112 	}
2113 }
2114 
2115 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2116 					  void *nbuf, uint32_t *error_cnt,
2117 					  enum hif_ce_event_type failure_type,
2118 					  const char *failure_type_string)
2119 {
2120 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2121 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2122 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2123 	int ce_id = CE_state->id;
2124 	uint32_t error_cnt_tmp;
2125 
2126 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2127 	error_cnt_tmp = ++(*error_cnt);
2128 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2129 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2130 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2131 		  failure_type_string);
2132 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2133 				 NULL, nbuf, bufs_needed_tmp, 0);
2134 	/* if we fail to allocate the last buffer for an rx pipe,
2135 	 *	there is no trigger to refill the ce and we will
2136 	 *	eventually crash
2137 	 */
2138 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2139 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2140 
2141 }
2142 
2143 
2144 
2145 
2146 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2147 {
2148 	struct CE_handle *ce_hdl;
2149 	qdf_size_t buf_sz;
2150 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2151 	QDF_STATUS status;
2152 	uint32_t bufs_posted = 0;
2153 
2154 	buf_sz = pipe_info->buf_sz;
2155 	if (buf_sz == 0) {
2156 		/* Unused Copy Engine */
2157 		return QDF_STATUS_SUCCESS;
2158 	}
2159 
2160 	ce_hdl = pipe_info->ce_hdl;
2161 
2162 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2163 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2164 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2165 		qdf_nbuf_t nbuf;
2166 
2167 		atomic_dec(&pipe_info->recv_bufs_needed);
2168 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2169 
2170 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2171 		if (!nbuf) {
2172 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2173 					&pipe_info->nbuf_alloc_err_count,
2174 					 HIF_RX_NBUF_ALLOC_FAILURE,
2175 					"HIF_RX_NBUF_ALLOC_FAILURE");
2176 			return QDF_STATUS_E_NOMEM;
2177 		}
2178 
2179 		/*
2180 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2181 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2182 		 * DMA_FROM_DEVICE);
2183 		 */
2184 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2185 					    QDF_DMA_FROM_DEVICE);
2186 
2187 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2188 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2189 					&pipe_info->nbuf_dma_err_count,
2190 					 HIF_RX_NBUF_MAP_FAILURE,
2191 					"HIF_RX_NBUF_MAP_FAILURE");
2192 			qdf_nbuf_free(nbuf);
2193 			return status;
2194 		}
2195 
2196 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2197 
2198 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2199 					       buf_sz, DMA_FROM_DEVICE);
2200 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2201 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2202 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2203 					&pipe_info->nbuf_ce_enqueue_err_count,
2204 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2205 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2206 
2207 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2208 						QDF_DMA_FROM_DEVICE);
2209 			qdf_nbuf_free(nbuf);
2210 			return status;
2211 		}
2212 
2213 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2214 		bufs_posted++;
2215 	}
2216 	pipe_info->nbuf_alloc_err_count =
2217 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2218 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2219 	pipe_info->nbuf_dma_err_count =
2220 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2221 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2222 	pipe_info->nbuf_ce_enqueue_err_count =
2223 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2224 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2225 
2226 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2227 
2228 	return QDF_STATUS_SUCCESS;
2229 }
2230 
2231 /*
2232  * Try to post all desired receive buffers for all pipes.
2233  * Returns 0 for non fastpath rx copy engine as
2234  * oom_allocation_work will be scheduled to recover any
2235  * failures, non-zero if unable to completely replenish
2236  * receive buffers for fastpath rx Copy engine.
2237  */
2238 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2239 {
2240 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2241 	int pipe_num;
2242 	struct CE_state *ce_state = NULL;
2243 	QDF_STATUS qdf_status;
2244 
2245 	A_TARGET_ACCESS_LIKELY(scn);
2246 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2247 		struct HIF_CE_pipe_info *pipe_info;
2248 
2249 		ce_state = scn->ce_id_to_state[pipe_num];
2250 		pipe_info = &hif_state->pipe_info[pipe_num];
2251 
2252 		if (hif_is_nss_wifi_enabled(scn) &&
2253 		    ce_state && (ce_state->htt_rx_data))
2254 			continue;
2255 
2256 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2257 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2258 			ce_state->htt_rx_data &&
2259 			scn->fastpath_mode_on) {
2260 			A_TARGET_ACCESS_UNLIKELY(scn);
2261 			return qdf_status;
2262 		}
2263 	}
2264 
2265 	A_TARGET_ACCESS_UNLIKELY(scn);
2266 
2267 	return QDF_STATUS_SUCCESS;
2268 }
2269 
2270 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2271 {
2272 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2273 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2274 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2275 
2276 	hif_update_fastpath_recv_bufs_cnt(scn);
2277 
2278 	hif_msg_callbacks_install(scn);
2279 
2280 	if (hif_completion_thread_startup(hif_state))
2281 		return QDF_STATUS_E_FAILURE;
2282 
2283 	/* enable buffer cleanup */
2284 	hif_state->started = true;
2285 
2286 	/* Post buffers once to start things off. */
2287 	qdf_status = hif_post_recv_buffers(scn);
2288 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2289 		/* cleanup is done in hif_ce_disable */
2290 		HIF_ERROR("%s:failed to post buffers", __func__);
2291 		return qdf_status;
2292 	}
2293 
2294 	return qdf_status;
2295 }
2296 
2297 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2298 {
2299 	struct hif_softc *scn;
2300 	struct CE_handle *ce_hdl;
2301 	uint32_t buf_sz;
2302 	struct HIF_CE_state *hif_state;
2303 	qdf_nbuf_t netbuf;
2304 	qdf_dma_addr_t CE_data;
2305 	void *per_CE_context;
2306 
2307 	buf_sz = pipe_info->buf_sz;
2308 	/* Unused Copy Engine */
2309 	if (buf_sz == 0)
2310 		return;
2311 
2312 
2313 	hif_state = pipe_info->HIF_CE_state;
2314 	if (!hif_state->started)
2315 		return;
2316 
2317 	scn = HIF_GET_SOFTC(hif_state);
2318 	ce_hdl = pipe_info->ce_hdl;
2319 
2320 	if (scn->qdf_dev == NULL)
2321 		return;
2322 	while (ce_revoke_recv_next
2323 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2324 			&CE_data) == QDF_STATUS_SUCCESS) {
2325 		if (netbuf) {
2326 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2327 					      QDF_DMA_FROM_DEVICE);
2328 			qdf_nbuf_free(netbuf);
2329 		}
2330 	}
2331 }
2332 
2333 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2334 {
2335 	struct CE_handle *ce_hdl;
2336 	struct HIF_CE_state *hif_state;
2337 	struct hif_softc *scn;
2338 	qdf_nbuf_t netbuf;
2339 	void *per_CE_context;
2340 	qdf_dma_addr_t CE_data;
2341 	unsigned int nbytes;
2342 	unsigned int id;
2343 	uint32_t buf_sz;
2344 	uint32_t toeplitz_hash_result;
2345 
2346 	buf_sz = pipe_info->buf_sz;
2347 	if (buf_sz == 0) {
2348 		/* Unused Copy Engine */
2349 		return;
2350 	}
2351 
2352 	hif_state = pipe_info->HIF_CE_state;
2353 	if (!hif_state->started) {
2354 		return;
2355 	}
2356 
2357 	scn = HIF_GET_SOFTC(hif_state);
2358 
2359 	ce_hdl = pipe_info->ce_hdl;
2360 
2361 	while (ce_cancel_send_next
2362 		       (ce_hdl, &per_CE_context,
2363 		       (void **)&netbuf, &CE_data, &nbytes,
2364 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2365 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2366 			/*
2367 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2368 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2369 			 * freed in htt_htc_misc_pkt_pool_free() in
2370 			 * wlantl_close(), so do not free them here again
2371 			 * by checking whether it's the endpoint
2372 			 * which they are queued in.
2373 			 */
2374 			if (id == scn->htc_htt_tx_endpoint)
2375 				return;
2376 			/* Indicate the completion to higher
2377 			 * layer to free the buffer
2378 			 */
2379 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2380 				pipe_info->pipe_callbacks.
2381 				    txCompletionHandler(pipe_info->
2382 					    pipe_callbacks.Context,
2383 					    netbuf, id, toeplitz_hash_result);
2384 		}
2385 	}
2386 }
2387 
2388 /*
2389  * Cleanup residual buffers for device shutdown:
2390  *    buffers that were enqueued for receive
2391  *    buffers that were to be sent
2392  * Note: Buffers that had completed but which were
2393  * not yet processed are on a completion queue. They
2394  * are handled when the completion thread shuts down.
2395  */
2396 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2397 {
2398 	int pipe_num;
2399 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2400 	struct CE_state *ce_state;
2401 
2402 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2403 		struct HIF_CE_pipe_info *pipe_info;
2404 
2405 		ce_state = scn->ce_id_to_state[pipe_num];
2406 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2407 				((ce_state->htt_tx_data) ||
2408 				 (ce_state->htt_rx_data))) {
2409 			continue;
2410 		}
2411 
2412 		pipe_info = &hif_state->pipe_info[pipe_num];
2413 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2414 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2415 	}
2416 }
2417 
2418 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2419 {
2420 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2421 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2422 
2423 	hif_buffer_cleanup(hif_state);
2424 }
2425 
2426 static void hif_destroy_oom_work(struct hif_softc *scn)
2427 {
2428 	struct CE_state *ce_state;
2429 	int ce_id;
2430 
2431 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2432 		ce_state = scn->ce_id_to_state[ce_id];
2433 		if (ce_state)
2434 			qdf_destroy_work(scn->qdf_dev,
2435 					 &ce_state->oom_allocation_work);
2436 	}
2437 }
2438 
2439 void hif_ce_stop(struct hif_softc *scn)
2440 {
2441 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2442 	int pipe_num;
2443 
2444 	/*
2445 	 * before cleaning up any memory, ensure irq &
2446 	 * bottom half contexts will not be re-entered
2447 	 */
2448 	hif_disable_isr(&scn->osc);
2449 	hif_destroy_oom_work(scn);
2450 	scn->hif_init_done = false;
2451 
2452 	/*
2453 	 * At this point, asynchronous threads are stopped,
2454 	 * The Target should not DMA nor interrupt, Host code may
2455 	 * not initiate anything more.  So we just need to clean
2456 	 * up Host-side state.
2457 	 */
2458 
2459 	if (scn->athdiag_procfs_inited) {
2460 		athdiag_procfs_remove();
2461 		scn->athdiag_procfs_inited = false;
2462 	}
2463 
2464 	hif_buffer_cleanup(hif_state);
2465 
2466 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2467 		struct HIF_CE_pipe_info *pipe_info;
2468 		struct CE_attr attr;
2469 		struct CE_handle *ce_diag = hif_state->ce_diag;
2470 
2471 		pipe_info = &hif_state->pipe_info[pipe_num];
2472 		if (pipe_info->ce_hdl) {
2473 			if (pipe_info->ce_hdl != ce_diag) {
2474 				attr = hif_state->host_ce_config[pipe_num];
2475 				if (attr.src_nentries)
2476 					qdf_spinlock_destroy(&pipe_info->
2477 							completion_freeq_lock);
2478 			}
2479 			ce_fini(pipe_info->ce_hdl);
2480 			pipe_info->ce_hdl = NULL;
2481 			pipe_info->buf_sz = 0;
2482 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2483 		}
2484 	}
2485 
2486 	if (hif_state->sleep_timer_init) {
2487 		qdf_timer_stop(&hif_state->sleep_timer);
2488 		qdf_timer_free(&hif_state->sleep_timer);
2489 		hif_state->sleep_timer_init = false;
2490 	}
2491 
2492 	hif_state->started = false;
2493 }
2494 
2495 #ifdef QCN7605_SUPPORT
2496 static inline
2497 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2498 				    **target_shadow_reg_cfg_ret,
2499 				    uint32_t *shadow_cfg_sz_ret)
2500 {
2501 	if (target_shadow_reg_cfg_ret)
2502 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605;
2503 	if (shadow_cfg_sz_ret)
2504 		*shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605);
2505 }
2506 #else
2507 static inline
2508 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2509 				    **target_shadow_reg_cfg_ret,
2510 				    uint32_t *shadow_cfg_sz_ret)
2511 {
2512 	HIF_ERROR("QCN7605 not supported");
2513 }
2514 #endif
2515 
2516 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2517 				   struct shadow_reg_cfg
2518 				   **target_shadow_reg_cfg_ret,
2519 				   uint32_t *shadow_cfg_sz_ret)
2520 {
2521 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2522 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2523 
2524 	switch (tgt_info->target_type) {
2525 	case TARGET_TYPE_QCN7605:
2526 		hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret,
2527 					       shadow_cfg_sz_ret);
2528 		break;
2529 	default:
2530 		if (target_shadow_reg_cfg_ret)
2531 			*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2532 		if (shadow_cfg_sz_ret)
2533 			*shadow_cfg_sz_ret = shadow_cfg_sz;
2534 	}
2535 }
2536 
2537 /**
2538  * hif_get_target_ce_config() - get copy engine configuration
2539  * @target_ce_config_ret: basic copy engine configuration
2540  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2541  * @target_service_to_ce_map_ret: service mapping for the copy engines
2542  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2543  * @target_shadow_reg_cfg_ret: shadow register configuration
2544  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2545  *
2546  * providing accessor to these values outside of this file.
2547  * currently these are stored in static pointers to const sections.
2548  * there are multiple configurations that are selected from at compile time.
2549  * Runtime selection would need to consider mode, target type and bus type.
2550  *
2551  * Return: return by parameter.
2552  */
2553 void hif_get_target_ce_config(struct hif_softc *scn,
2554 		struct CE_pipe_config **target_ce_config_ret,
2555 		uint32_t *target_ce_config_sz_ret,
2556 		struct service_to_pipe **target_service_to_ce_map_ret,
2557 		uint32_t *target_service_to_ce_map_sz_ret,
2558 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2559 		uint32_t *shadow_cfg_sz_ret)
2560 {
2561 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2562 
2563 	*target_ce_config_ret = hif_state->target_ce_config;
2564 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2565 
2566 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2567 				       target_service_to_ce_map_sz_ret);
2568 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2569 			       shadow_cfg_sz_ret);
2570 }
2571 
2572 #ifdef CONFIG_SHADOW_V2
2573 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2574 {
2575 	int i;
2576 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2577 		  "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2578 
2579 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2580 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2581 		     "%s: i %d, val %x\n", __func__, i,
2582 		     cfg->shadow_reg_v2_cfg[i].addr);
2583 	}
2584 }
2585 
2586 #else
2587 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2588 {
2589 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2590 		  "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2591 }
2592 #endif
2593 
2594 /**
2595  * hif_wlan_enable(): call the platform driver to enable wlan
2596  * @scn: HIF Context
2597  *
2598  * This function passes the con_mode and CE configuration to
2599  * platform driver to enable wlan.
2600  *
2601  * Return: linux error code
2602  */
2603 int hif_wlan_enable(struct hif_softc *scn)
2604 {
2605 	struct pld_wlan_enable_cfg cfg;
2606 	enum pld_driver_mode mode;
2607 	uint32_t con_mode = hif_get_conparam(scn);
2608 
2609 	hif_get_target_ce_config(scn,
2610 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2611 			&cfg.num_ce_tgt_cfg,
2612 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2613 			&cfg.num_ce_svc_pipe_cfg,
2614 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2615 			&cfg.num_shadow_reg_cfg);
2616 
2617 	/* translate from structure size to array size */
2618 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2619 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2620 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2621 
2622 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2623 			      &cfg.num_shadow_reg_v2_cfg);
2624 
2625 	hif_print_hal_shadow_register_cfg(&cfg);
2626 
2627 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2628 		mode = PLD_FTM;
2629 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2630 		mode = PLD_COLDBOOT_CALIBRATION;
2631 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2632 		mode = PLD_EPPING;
2633 	else
2634 		mode = PLD_MISSION;
2635 
2636 	if (BYPASS_QMI)
2637 		return 0;
2638 	else
2639 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2640 				       mode, QWLAN_VERSIONSTR);
2641 }
2642 
2643 #ifdef WLAN_FEATURE_EPPING
2644 
2645 #define CE_EPPING_USES_IRQ true
2646 
2647 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2648 {
2649 	if (CE_EPPING_USES_IRQ)
2650 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2651 	else
2652 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2653 	hif_state->target_ce_config = target_ce_config_wlan_epping;
2654 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2655 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2656 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2657 }
2658 #endif
2659 
2660 #ifdef QCN7605_SUPPORT
2661 static inline
2662 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2663 			       struct HIF_CE_state *hif_state)
2664 {
2665 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
2666 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
2667 	hif_state->target_ce_config_sz =
2668 				 sizeof(target_ce_config_wlan_qcn7605);
2669 	scn->ce_count = QCN7605_CE_COUNT;
2670 }
2671 #else
2672 static inline
2673 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2674 			       struct HIF_CE_state *hif_state)
2675 {
2676 	HIF_ERROR("QCN7605 not supported");
2677 }
2678 #endif
2679 
2680 /**
2681  * hif_ce_prepare_config() - load the correct static tables.
2682  * @scn: hif context
2683  *
2684  * Epping uses different static attribute tables than mission mode.
2685  */
2686 void hif_ce_prepare_config(struct hif_softc *scn)
2687 {
2688 	uint32_t mode = hif_get_conparam(scn);
2689 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2690 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2691 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2692 
2693 	hif_state->ce_services = ce_services_attach(scn);
2694 
2695 	scn->ce_count = HOST_CE_COUNT;
2696 	/* if epping is enabled we need to use the epping configuration. */
2697 	if (QDF_IS_EPPING_ENABLED(mode)) {
2698 		hif_ce_prepare_epping_config(hif_state);
2699 	}
2700 
2701 	switch (tgt_info->target_type) {
2702 	default:
2703 		hif_state->host_ce_config = host_ce_config_wlan;
2704 		hif_state->target_ce_config = target_ce_config_wlan;
2705 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
2706 		break;
2707 	case TARGET_TYPE_QCN7605:
2708 		hif_set_ce_config_qcn7605(scn, hif_state);
2709 		break;
2710 	case TARGET_TYPE_AR900B:
2711 	case TARGET_TYPE_QCA9984:
2712 	case TARGET_TYPE_IPQ4019:
2713 	case TARGET_TYPE_QCA9888:
2714 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2715 			hif_state->host_ce_config =
2716 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2717 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2718 			hif_state->host_ce_config =
2719 				host_lowdesc_ce_cfg_wlan_ar900b;
2720 		} else {
2721 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2722 		}
2723 
2724 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2725 		hif_state->target_ce_config_sz =
2726 				sizeof(target_ce_config_wlan_ar900b);
2727 
2728 		break;
2729 
2730 	case TARGET_TYPE_AR9888:
2731 	case TARGET_TYPE_AR9888V2:
2732 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2733 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2734 		} else {
2735 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2736 		}
2737 
2738 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2739 		hif_state->target_ce_config_sz =
2740 					sizeof(target_ce_config_wlan_ar9888);
2741 
2742 		break;
2743 
2744 	case TARGET_TYPE_QCA8074:
2745 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2746 			hif_state->host_ce_config =
2747 					host_ce_config_wlan_qca8074_pci;
2748 			hif_state->target_ce_config =
2749 				target_ce_config_wlan_qca8074_pci;
2750 			hif_state->target_ce_config_sz =
2751 				sizeof(target_ce_config_wlan_qca8074_pci);
2752 		} else {
2753 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2754 			hif_state->target_ce_config =
2755 					target_ce_config_wlan_qca8074;
2756 			hif_state->target_ce_config_sz =
2757 				sizeof(target_ce_config_wlan_qca8074);
2758 		}
2759 		break;
2760 	case TARGET_TYPE_QCA6290:
2761 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2762 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2763 		hif_state->target_ce_config_sz =
2764 					sizeof(target_ce_config_wlan_qca6290);
2765 
2766 		scn->ce_count = QCA_6290_CE_COUNT;
2767 		break;
2768 	}
2769 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
2770 }
2771 
2772 /**
2773  * hif_ce_open() - do ce specific allocations
2774  * @hif_sc: pointer to hif context
2775  *
2776  * return: 0 for success or QDF_STATUS_E_NOMEM
2777  */
2778 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2779 {
2780 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2781 
2782 	qdf_spinlock_create(&hif_state->irq_reg_lock);
2783 	qdf_spinlock_create(&hif_state->keep_awake_lock);
2784 	return QDF_STATUS_SUCCESS;
2785 }
2786 
2787 /**
2788  * hif_ce_close() - do ce specific free
2789  * @hif_sc: pointer to hif context
2790  */
2791 void hif_ce_close(struct hif_softc *hif_sc)
2792 {
2793 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2794 
2795 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
2796 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
2797 }
2798 
2799 /**
2800  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2801  * @hif_sc: hif context
2802  *
2803  * uses state variables to support cleaning up when hif_config_ce fails.
2804  */
2805 void hif_unconfig_ce(struct hif_softc *hif_sc)
2806 {
2807 	int pipe_num;
2808 	struct HIF_CE_pipe_info *pipe_info;
2809 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2810 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
2811 
2812 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2813 		pipe_info = &hif_state->pipe_info[pipe_num];
2814 		if (pipe_info->ce_hdl) {
2815 			ce_unregister_irq(hif_state, (1 << pipe_num));
2816 			ce_fini(pipe_info->ce_hdl);
2817 			pipe_info->ce_hdl = NULL;
2818 			pipe_info->buf_sz = 0;
2819 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2820 		}
2821 	}
2822 	deinit_tasklet_workers(hif_hdl);
2823 	if (hif_sc->athdiag_procfs_inited) {
2824 		athdiag_procfs_remove();
2825 		hif_sc->athdiag_procfs_inited = false;
2826 	}
2827 }
2828 
2829 #ifdef CONFIG_BYPASS_QMI
2830 #define FW_SHARED_MEM (2 * 1024 * 1024)
2831 
2832 /**
2833  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2834  * @scn: pointer to HIF structure
2835  *
2836  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2837  *
2838  * Return: void
2839  */
2840 static void hif_post_static_buf_to_target(struct hif_softc *scn)
2841 {
2842 	void *target_va;
2843 	phys_addr_t target_pa;
2844 
2845 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2846 				FW_SHARED_MEM, &target_pa);
2847 	if (NULL == target_va) {
2848 		HIF_TRACE("Memory allocation failed could not post target buf");
2849 		return;
2850 	}
2851 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2852 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
2853 }
2854 #else
2855 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2856 {
2857 }
2858 #endif
2859 
2860 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2861 				bool wait_for_it)
2862 {
2863 	/* todo */
2864 	return 0;
2865 }
2866 
2867 /**
2868  * hif_config_ce() - configure copy engines
2869  * @scn: hif context
2870  *
2871  * Prepares fw, copy engine hardware and host sw according
2872  * to the attributes selected by hif_ce_prepare_config.
2873  *
2874  * also calls athdiag_procfs_init
2875  *
2876  * return: 0 for success nonzero for failure.
2877  */
2878 int hif_config_ce(struct hif_softc *scn)
2879 {
2880 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2881 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2882 	struct HIF_CE_pipe_info *pipe_info;
2883 	int pipe_num;
2884 	struct CE_state *ce_state = NULL;
2885 
2886 #ifdef ADRASTEA_SHADOW_REGISTERS
2887 	int i;
2888 #endif
2889 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
2890 
2891 	scn->notice_send = true;
2892 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2893 
2894 	hif_post_static_buf_to_target(scn);
2895 
2896 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
2897 
2898 	hif_config_rri_on_ddr(scn);
2899 
2900 	if (ce_srng_based(scn))
2901 		scn->bus_ops.hif_target_sleep_state_adjust =
2902 			&hif_srng_sleep_state_adjust;
2903 
2904 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2905 	 * index. Disable data storing
2906 	 */
2907 	reset_ce_debug_history(scn);
2908 
2909 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2910 		struct CE_attr *attr;
2911 
2912 		pipe_info = &hif_state->pipe_info[pipe_num];
2913 		pipe_info->pipe_num = pipe_num;
2914 		pipe_info->HIF_CE_state = hif_state;
2915 		attr = &hif_state->host_ce_config[pipe_num];
2916 
2917 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
2918 		ce_state = scn->ce_id_to_state[pipe_num];
2919 		if (!ce_state) {
2920 			A_TARGET_ACCESS_UNLIKELY(scn);
2921 			goto err;
2922 		}
2923 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
2924 		QDF_ASSERT(pipe_info->ce_hdl != NULL);
2925 		if (pipe_info->ce_hdl == NULL) {
2926 			rv = QDF_STATUS_E_FAILURE;
2927 			A_TARGET_ACCESS_UNLIKELY(scn);
2928 			goto err;
2929 		}
2930 
2931 		ce_state->lro_data = qdf_lro_init();
2932 
2933 		if (attr->flags & CE_ATTR_DIAG) {
2934 			/* Reserve the ultimate CE for
2935 			 * Diagnostic Window support
2936 			 */
2937 			hif_state->ce_diag = pipe_info->ce_hdl;
2938 			continue;
2939 		}
2940 
2941 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2942 				(ce_state->htt_rx_data))
2943 			continue;
2944 
2945 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
2946 		if (attr->dest_nentries > 0) {
2947 			atomic_set(&pipe_info->recv_bufs_needed,
2948 				   init_buffer_count(attr->dest_nentries - 1));
2949 			/*SRNG based CE has one entry less */
2950 			if (ce_srng_based(scn))
2951 				atomic_dec(&pipe_info->recv_bufs_needed);
2952 		} else {
2953 			atomic_set(&pipe_info->recv_bufs_needed, 0);
2954 		}
2955 		ce_tasklet_init(hif_state, (1 << pipe_num));
2956 		ce_register_irq(hif_state, (1 << pipe_num));
2957 	}
2958 
2959 	if (athdiag_procfs_init(scn) != 0) {
2960 		A_TARGET_ACCESS_UNLIKELY(scn);
2961 		goto err;
2962 	}
2963 	scn->athdiag_procfs_inited = true;
2964 
2965 	HIF_DBG("%s: ce_init done", __func__);
2966 
2967 	init_tasklet_workers(hif_hdl);
2968 
2969 	HIF_DBG("%s: X, ret = %d", __func__, rv);
2970 
2971 #ifdef ADRASTEA_SHADOW_REGISTERS
2972 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
2973 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2974 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
2975 			  __func__, i,
2976 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2977 	}
2978 #endif
2979 
2980 	return rv != QDF_STATUS_SUCCESS;
2981 
2982 err:
2983 	/* Failure, so clean up */
2984 	hif_unconfig_ce(scn);
2985 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
2986 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
2987 }
2988 
2989 #ifdef WLAN_FEATURE_FASTPATH
2990 /**
2991  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2992  * @handler: Callback funtcion
2993  * @context: handle for callback function
2994  *
2995  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2996  */
2997 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2998 				fastpath_msg_handler handler,
2999 				void *context)
3000 {
3001 	struct CE_state *ce_state;
3002 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3003 	int i;
3004 
3005 	if (!scn) {
3006 		HIF_ERROR("%s: scn is NULL", __func__);
3007 		QDF_ASSERT(0);
3008 		return QDF_STATUS_E_FAILURE;
3009 	}
3010 
3011 	if (!scn->fastpath_mode_on) {
3012 		HIF_WARN("%s: Fastpath mode disabled", __func__);
3013 		return QDF_STATUS_E_FAILURE;
3014 	}
3015 
3016 	for (i = 0; i < scn->ce_count; i++) {
3017 		ce_state = scn->ce_id_to_state[i];
3018 		if (ce_state->htt_rx_data) {
3019 			ce_state->fastpath_handler = handler;
3020 			ce_state->context = context;
3021 		}
3022 	}
3023 
3024 	return QDF_STATUS_SUCCESS;
3025 }
3026 qdf_export_symbol(hif_ce_fastpath_cb_register);
3027 #endif
3028 
3029 #ifdef IPA_OFFLOAD
3030 /**
3031  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3032  * @scn: bus context
3033  * @ce_sr_base_paddr: copyengine source ring base physical address
3034  * @ce_sr_ring_size: copyengine source ring size
3035  * @ce_reg_paddr: copyengine register physical address
3036  *
3037  * IPA micro controller data path offload feature enabled,
3038  * HIF should release copy engine related resource information to IPA UC
3039  * IPA UC will access hardware resource with released information
3040  *
3041  * Return: None
3042  */
3043 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3044 			     qdf_shared_mem_t **ce_sr,
3045 			     uint32_t *ce_sr_ring_size,
3046 			     qdf_dma_addr_t *ce_reg_paddr)
3047 {
3048 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3049 	struct HIF_CE_pipe_info *pipe_info =
3050 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3051 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3052 
3053 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3054 			    ce_reg_paddr);
3055 }
3056 #endif /* IPA_OFFLOAD */
3057 
3058 
3059 #ifdef ADRASTEA_SHADOW_REGISTERS
3060 
3061 /*
3062  * Current shadow register config
3063  *
3064  * -----------------------------------------------------------
3065  * Shadow Register      |     CE   |    src/dst write index
3066  * -----------------------------------------------------------
3067  *         0            |     0    |           src
3068  *         1     No Config - Doesn't point to anything
3069  *         2     No Config - Doesn't point to anything
3070  *         3            |     3    |           src
3071  *         4            |     4    |           src
3072  *         5            |     5    |           src
3073  *         6     No Config - Doesn't point to anything
3074  *         7            |     7    |           src
3075  *         8     No Config - Doesn't point to anything
3076  *         9     No Config - Doesn't point to anything
3077  *         10    No Config - Doesn't point to anything
3078  *         11    No Config - Doesn't point to anything
3079  * -----------------------------------------------------------
3080  *         12    No Config - Doesn't point to anything
3081  *         13           |     1    |           dst
3082  *         14           |     2    |           dst
3083  *         15    No Config - Doesn't point to anything
3084  *         16    No Config - Doesn't point to anything
3085  *         17    No Config - Doesn't point to anything
3086  *         18    No Config - Doesn't point to anything
3087  *         19           |     7    |           dst
3088  *         20           |     8    |           dst
3089  *         21    No Config - Doesn't point to anything
3090  *         22    No Config - Doesn't point to anything
3091  *         23    No Config - Doesn't point to anything
3092  * -----------------------------------------------------------
3093  *
3094  *
3095  * ToDo - Move shadow register config to following in the future
3096  * This helps free up a block of shadow registers towards the end.
3097  * Can be used for other purposes
3098  *
3099  * -----------------------------------------------------------
3100  * Shadow Register      |     CE   |    src/dst write index
3101  * -----------------------------------------------------------
3102  *      0            |     0    |           src
3103  *      1            |     3    |           src
3104  *      2            |     4    |           src
3105  *      3            |     5    |           src
3106  *      4            |     7    |           src
3107  * -----------------------------------------------------------
3108  *      5            |     1    |           dst
3109  *      6            |     2    |           dst
3110  *      7            |     7    |           dst
3111  *      8            |     8    |           dst
3112  * -----------------------------------------------------------
3113  *      9     No Config - Doesn't point to anything
3114  *      12    No Config - Doesn't point to anything
3115  *      13    No Config - Doesn't point to anything
3116  *      14    No Config - Doesn't point to anything
3117  *      15    No Config - Doesn't point to anything
3118  *      16    No Config - Doesn't point to anything
3119  *      17    No Config - Doesn't point to anything
3120  *      18    No Config - Doesn't point to anything
3121  *      19    No Config - Doesn't point to anything
3122  *      20    No Config - Doesn't point to anything
3123  *      21    No Config - Doesn't point to anything
3124  *      22    No Config - Doesn't point to anything
3125  *      23    No Config - Doesn't point to anything
3126  * -----------------------------------------------------------
3127 */
3128 #ifndef QCN7605_SUPPORT
3129 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3130 {
3131 	u32 addr = 0;
3132 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3133 
3134 	switch (ce) {
3135 	case 0:
3136 		addr = SHADOW_VALUE0;
3137 		break;
3138 	case 3:
3139 		addr = SHADOW_VALUE3;
3140 		break;
3141 	case 4:
3142 		addr = SHADOW_VALUE4;
3143 		break;
3144 	case 5:
3145 		addr = SHADOW_VALUE5;
3146 		break;
3147 	case 7:
3148 		addr = SHADOW_VALUE7;
3149 		break;
3150 	default:
3151 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3152 		QDF_ASSERT(0);
3153 	}
3154 	return addr;
3155 
3156 }
3157 
3158 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3159 {
3160 	u32 addr = 0;
3161 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3162 
3163 	switch (ce) {
3164 	case 1:
3165 		addr = SHADOW_VALUE13;
3166 		break;
3167 	case 2:
3168 		addr = SHADOW_VALUE14;
3169 		break;
3170 	case 5:
3171 		addr = SHADOW_VALUE17;
3172 		break;
3173 	case 7:
3174 		addr = SHADOW_VALUE19;
3175 		break;
3176 	case 8:
3177 		addr = SHADOW_VALUE20;
3178 		break;
3179 	case 9:
3180 		addr = SHADOW_VALUE21;
3181 		break;
3182 	case 10:
3183 		addr = SHADOW_VALUE22;
3184 		break;
3185 	case 11:
3186 		addr = SHADOW_VALUE23;
3187 		break;
3188 	default:
3189 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3190 		QDF_ASSERT(0);
3191 	}
3192 
3193 	return addr;
3194 
3195 }
3196 #else
3197 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3198 {
3199 	u32 addr = 0;
3200 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3201 
3202 	switch (ce) {
3203 	case 0:
3204 		addr = SHADOW_VALUE0;
3205 		break;
3206 	case 4:
3207 		addr = SHADOW_VALUE4;
3208 		break;
3209 	case 5:
3210 		addr = SHADOW_VALUE5;
3211 		break;
3212 	default:
3213 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3214 		QDF_ASSERT(0);
3215 	}
3216 	return addr;
3217 }
3218 
3219 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3220 {
3221 	u32 addr = 0;
3222 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3223 
3224 	switch (ce) {
3225 	case 1:
3226 		addr = SHADOW_VALUE13;
3227 		break;
3228 	case 2:
3229 		addr = SHADOW_VALUE14;
3230 		break;
3231 	case 3:
3232 		addr = SHADOW_VALUE15;
3233 		break;
3234 	case 5:
3235 		addr = SHADOW_VALUE17;
3236 		break;
3237 	case 7:
3238 		addr = SHADOW_VALUE19;
3239 		break;
3240 	case 8:
3241 		addr = SHADOW_VALUE20;
3242 		break;
3243 	case 9:
3244 		addr = SHADOW_VALUE21;
3245 		break;
3246 	case 10:
3247 		addr = SHADOW_VALUE22;
3248 		break;
3249 	case 11:
3250 		addr = SHADOW_VALUE23;
3251 		break;
3252 	default:
3253 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3254 		QDF_ASSERT(0);
3255 	}
3256 
3257 	return addr;
3258 }
3259 #endif
3260 #endif
3261 
3262 #if defined(FEATURE_LRO)
3263 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3264 {
3265 	struct CE_state *ce_state;
3266 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3267 
3268 	ce_state = scn->ce_id_to_state[ctx_id];
3269 
3270 	return ce_state->lro_data;
3271 }
3272 #endif
3273 
3274 /**
3275  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3276  * this service
3277  * @scn: hif_softc pointer.
3278  * @svc_id: Service ID for which the mapping is needed.
3279  * @ul_pipe: address of the container in which ul pipe is returned.
3280  * @dl_pipe: address of the container in which dl pipe is returned.
3281  * @ul_is_polled: address of the container in which a bool
3282  *			indicating if the UL CE for this service
3283  *			is polled is returned.
3284  * @dl_is_polled: address of the container in which a bool
3285  *			indicating if the DL CE for this service
3286  *			is polled is returned.
3287  *
3288  * Return: Indicates whether the service has been found in the table.
3289  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3290  *         There will be warning logs if either leg has not been updated
3291  *         because it missed the entry in the table (but this is not an err).
3292  */
3293 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3294 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3295 			int *dl_is_polled)
3296 {
3297 	int status = QDF_STATUS_E_INVAL;
3298 	unsigned int i;
3299 	struct service_to_pipe element;
3300 	struct service_to_pipe *tgt_svc_map_to_use;
3301 	uint32_t sz_tgt_svc_map_to_use;
3302 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3303 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3304 	bool dl_updated = false;
3305 	bool ul_updated = false;
3306 
3307 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3308 				       &sz_tgt_svc_map_to_use);
3309 
3310 	*dl_is_polled = 0;  /* polling for received messages not supported */
3311 
3312 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3313 
3314 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3315 		if (element.service_id == svc_id) {
3316 			if (element.pipedir == PIPEDIR_OUT) {
3317 				*ul_pipe = element.pipenum;
3318 				*ul_is_polled =
3319 					(hif_state->host_ce_config[*ul_pipe].flags &
3320 					 CE_ATTR_DISABLE_INTR) != 0;
3321 				ul_updated = true;
3322 			} else if (element.pipedir == PIPEDIR_IN) {
3323 				*dl_pipe = element.pipenum;
3324 				dl_updated = true;
3325 			}
3326 			status = QDF_STATUS_SUCCESS;
3327 		}
3328 	}
3329 	if (ul_updated == false)
3330 		HIF_INFO("%s: ul pipe is NOT updated for service %d",
3331 			 __func__, svc_id);
3332 	if (dl_updated == false)
3333 		HIF_INFO("%s: dl pipe is NOT updated for service %d",
3334 			 __func__, svc_id);
3335 
3336 	return status;
3337 }
3338 
3339 #ifdef SHADOW_REG_DEBUG
3340 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3341 		uint32_t CE_ctrl_addr)
3342 {
3343 	uint32_t read_from_hw, srri_from_ddr = 0;
3344 
3345 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3346 
3347 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3348 
3349 	if (read_from_hw != srri_from_ddr) {
3350 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3351 		       __func__, srri_from_ddr, read_from_hw,
3352 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3353 		QDF_ASSERT(0);
3354 	}
3355 	return srri_from_ddr;
3356 }
3357 
3358 
3359 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3360 		uint32_t CE_ctrl_addr)
3361 {
3362 	uint32_t read_from_hw, drri_from_ddr = 0;
3363 
3364 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3365 
3366 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3367 
3368 	if (read_from_hw != drri_from_ddr) {
3369 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3370 		       drri_from_ddr, read_from_hw,
3371 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3372 		QDF_ASSERT(0);
3373 	}
3374 	return drri_from_ddr;
3375 }
3376 
3377 #endif
3378 
3379 #ifdef ADRASTEA_RRI_ON_DDR
3380 /**
3381  * hif_get_src_ring_read_index(): Called to get the SRRI
3382  *
3383  * @scn: hif_softc pointer
3384  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3385  *
3386  * This function returns the SRRI to the caller. For CEs that
3387  * dont have interrupts enabled, we look at the DDR based SRRI
3388  *
3389  * Return: SRRI
3390  */
3391 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3392 		uint32_t CE_ctrl_addr)
3393 {
3394 	struct CE_attr attr;
3395 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3396 
3397 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3398 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3399 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3400 	} else {
3401 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3402 			return A_TARGET_READ(scn,
3403 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3404 		else
3405 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3406 					CE_ctrl_addr);
3407 	}
3408 }
3409 
3410 /**
3411  * hif_get_dst_ring_read_index(): Called to get the DRRI
3412  *
3413  * @scn: hif_softc pointer
3414  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3415  *
3416  * This function returns the DRRI to the caller. For CEs that
3417  * dont have interrupts enabled, we look at the DDR based DRRI
3418  *
3419  * Return: DRRI
3420  */
3421 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3422 		uint32_t CE_ctrl_addr)
3423 {
3424 	struct CE_attr attr;
3425 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3426 
3427 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3428 
3429 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3430 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3431 	} else {
3432 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3433 			return A_TARGET_READ(scn,
3434 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3435 		else
3436 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3437 					CE_ctrl_addr);
3438 	}
3439 }
3440 
3441 /**
3442  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3443  *
3444  * @scn: hif_softc pointer
3445  *
3446  * This function allocates non cached memory on ddr and sends
3447  * the physical address of this memory to the CE hardware. The
3448  * hardware updates the RRI on this particular location.
3449  *
3450  * Return: None
3451  */
3452 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3453 {
3454 	unsigned int i;
3455 	qdf_dma_addr_t paddr_rri_on_ddr;
3456 	uint32_t high_paddr, low_paddr;
3457 
3458 	scn->vaddr_rri_on_ddr =
3459 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3460 		scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3461 		&paddr_rri_on_ddr);
3462 
3463 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3464 	low_paddr  = BITS0_TO_31(paddr_rri_on_ddr);
3465 	high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3466 
3467 	HIF_DBG("%s using srri and drri from DDR", __func__);
3468 
3469 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3470 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3471 
3472 	for (i = 0; i < CE_COUNT; i++)
3473 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3474 
3475 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
3476 
3477 }
3478 #else
3479 
3480 /**
3481  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3482  *
3483  * @scn: hif_softc pointer
3484  *
3485  * This is a dummy implementation for platforms that don't
3486  * support this functionality.
3487  *
3488  * Return: None
3489  */
3490 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3491 {
3492 }
3493 #endif
3494 
3495 /**
3496  * hif_dump_ce_registers() - dump ce registers
3497  * @scn: hif_opaque_softc pointer.
3498  *
3499  * Output the copy engine registers
3500  *
3501  * Return: 0 for success or error code
3502  */
3503 int hif_dump_ce_registers(struct hif_softc *scn)
3504 {
3505 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3506 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3507 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3508 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3509 	uint16_t i;
3510 	QDF_STATUS status;
3511 
3512 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3513 		if (scn->ce_id_to_state[i] == NULL) {
3514 			HIF_DBG("CE%d not used.", i);
3515 			continue;
3516 		}
3517 
3518 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3519 					   (uint8_t *) &ce_reg_values[0],
3520 					   ce_reg_word_size * sizeof(uint32_t));
3521 
3522 		if (status != QDF_STATUS_SUCCESS) {
3523 			HIF_ERROR("Dumping CE register failed!");
3524 			return -EACCES;
3525 		}
3526 		HIF_ERROR("CE%d=>\n", i);
3527 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3528 				   (uint8_t *) &ce_reg_values[0],
3529 				   ce_reg_word_size * sizeof(uint32_t));
3530 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3531 				+ SR_WR_INDEX_ADDRESS),
3532 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3533 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3534 				+ CURRENT_SRRI_ADDRESS),
3535 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3536 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3537 				+ DST_WR_INDEX_ADDRESS),
3538 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3539 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3540 				+ CURRENT_DRRI_ADDRESS),
3541 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3542 		qdf_print("---\n");
3543 	}
3544 	return 0;
3545 }
3546 qdf_export_symbol(hif_dump_ce_registers);
3547 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3548 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3549 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3550 {
3551 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3552 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3553 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3554 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3555 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3556 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3557 	struct CE_ring_state *src_ring = ce_state->src_ring;
3558 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3559 
3560 	if (src_ring) {
3561 		hif_info->ul_pipe.nentries = src_ring->nentries;
3562 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3563 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3564 		hif_info->ul_pipe.write_index = src_ring->write_index;
3565 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3566 		hif_info->ul_pipe.base_addr_CE_space =
3567 			src_ring->base_addr_CE_space;
3568 		hif_info->ul_pipe.base_addr_owner_space =
3569 			src_ring->base_addr_owner_space;
3570 	}
3571 
3572 
3573 	if (dest_ring) {
3574 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3575 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3576 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3577 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3578 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3579 		hif_info->dl_pipe.base_addr_CE_space =
3580 			dest_ring->base_addr_CE_space;
3581 		hif_info->dl_pipe.base_addr_owner_space =
3582 			dest_ring->base_addr_owner_space;
3583 	}
3584 
3585 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3586 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3587 
3588 	return hif_info;
3589 }
3590 qdf_export_symbol(hif_get_addl_pipe_info);
3591 
3592 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3593 {
3594 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3595 
3596 	scn->nss_wifi_ol_mode = mode;
3597 	return 0;
3598 }
3599 qdf_export_symbol(hif_set_nss_wifiol_mode);
3600 #endif
3601 
3602 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3603 {
3604 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3605 	scn->hif_attribute = hif_attrib;
3606 }
3607 
3608 
3609 /* disable interrupts (only applicable for legacy copy engine currently */
3610 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3611 {
3612 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3613 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3614 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3615 
3616 	Q_TARGET_ACCESS_BEGIN(scn);
3617 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3618 	Q_TARGET_ACCESS_END(scn);
3619 }
3620 qdf_export_symbol(hif_disable_interrupt);
3621 
3622 /**
3623  * hif_fw_event_handler() - hif fw event handler
3624  * @hif_state: pointer to hif ce state structure
3625  *
3626  * Process fw events and raise HTC callback to process fw events.
3627  *
3628  * Return: none
3629  */
3630 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3631 {
3632 	struct hif_msg_callbacks *msg_callbacks =
3633 		&hif_state->msg_callbacks_current;
3634 
3635 	if (!msg_callbacks->fwEventHandler)
3636 		return;
3637 
3638 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3639 			QDF_STATUS_E_FAILURE);
3640 }
3641 
3642 #ifndef QCA_WIFI_3_0
3643 /**
3644  * hif_fw_interrupt_handler() - FW interrupt handler
3645  * @irq: irq number
3646  * @arg: the user pointer
3647  *
3648  * Called from the PCI interrupt handler when a
3649  * firmware-generated interrupt to the Host.
3650  *
3651  * only registered for legacy ce devices
3652  *
3653  * Return: status of handled irq
3654  */
3655 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3656 {
3657 	struct hif_softc *scn = arg;
3658 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3659 	uint32_t fw_indicator_address, fw_indicator;
3660 
3661 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3662 		return ATH_ISR_NOSCHED;
3663 
3664 	fw_indicator_address = hif_state->fw_indicator_address;
3665 	/* For sudden unplug this will return ~0 */
3666 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3667 
3668 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3669 		/* ACK: clear Target-side pending event */
3670 		A_TARGET_WRITE(scn, fw_indicator_address,
3671 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3672 		if (Q_TARGET_ACCESS_END(scn) < 0)
3673 			return ATH_ISR_SCHED;
3674 
3675 		if (hif_state->started) {
3676 			hif_fw_event_handler(hif_state);
3677 		} else {
3678 			/*
3679 			 * Probable Target failure before we're prepared
3680 			 * to handle it.  Generally unexpected.
3681 			 * fw_indicator used as bitmap, and defined as below:
3682 			 *     FW_IND_EVENT_PENDING    0x1
3683 			 *     FW_IND_INITIALIZED      0x2
3684 			 *     FW_IND_NEEDRECOVER      0x4
3685 			 */
3686 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3687 				("%s: Early firmware event indicated 0x%x\n",
3688 				 __func__, fw_indicator));
3689 		}
3690 	} else {
3691 		if (Q_TARGET_ACCESS_END(scn) < 0)
3692 			return ATH_ISR_SCHED;
3693 	}
3694 
3695 	return ATH_ISR_SCHED;
3696 }
3697 #else
3698 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3699 {
3700 	return ATH_ISR_SCHED;
3701 }
3702 #endif /* #ifdef QCA_WIFI_3_0 */
3703 
3704 
3705 /**
3706  * hif_wlan_disable(): call the platform driver to disable wlan
3707  * @scn: HIF Context
3708  *
3709  * This function passes the con_mode to platform driver to disable
3710  * wlan.
3711  *
3712  * Return: void
3713  */
3714 void hif_wlan_disable(struct hif_softc *scn)
3715 {
3716 	enum pld_driver_mode mode;
3717 	uint32_t con_mode = hif_get_conparam(scn);
3718 
3719 	if (scn->target_status == TARGET_STATUS_RESET)
3720 		return;
3721 
3722 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3723 		mode = PLD_FTM;
3724 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3725 		mode = PLD_EPPING;
3726 	else
3727 		mode = PLD_MISSION;
3728 
3729 	pld_wlan_disable(scn->qdf_dev->dev, mode);
3730 }
3731 
3732 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3733 {
3734 	QDF_STATUS status;
3735 	uint8_t ul_pipe, dl_pipe;
3736 	int ul_is_polled, dl_is_polled;
3737 
3738 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3739 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3740 					 HTC_CTRL_RSVD_SVC,
3741 					 &ul_pipe, &dl_pipe,
3742 					 &ul_is_polled, &dl_is_polled);
3743 	if (status) {
3744 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3745 		return qdf_status_to_os_return(status);
3746 	}
3747 
3748 	*ce_id = dl_pipe;
3749 
3750 	return 0;
3751 }
3752