xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision e1d3d092f61a07549ab97f6f1f0c86554e0c642f)
1 /*
2  * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 #include "targcfg.h"
28 #include "qdf_lock.h"
29 #include "qdf_status.h"
30 #include "qdf_status.h"
31 #include <qdf_atomic.h>         /* qdf_atomic_read */
32 #include <targaddrs.h>
33 #include "hif_io32.h"
34 #include <hif.h>
35 #include "regtable.h"
36 #define ATH_MODULE_NAME hif
37 #include <a_debug.h>
38 #include "hif_main.h"
39 #include "ce_api.h"
40 #include "qdf_trace.h"
41 #include "pld_common.h"
42 #include "hif_debug.h"
43 #include "ce_internal.h"
44 #include "ce_reg.h"
45 #include "ce_assignment.h"
46 #include "ce_tasklet.h"
47 #ifndef CONFIG_WIN
48 #include "qwlan_version.h"
49 #endif
50 
51 #define CE_POLL_TIMEOUT 10      /* ms */
52 
53 #define AGC_DUMP         1
54 #define CHANINFO_DUMP    2
55 #define BB_WATCHDOG_DUMP 3
56 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
57 #define PCIE_ACCESS_DUMP 4
58 #endif
59 #include "mp_dev.h"
60 
61 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
62 	!defined(QCA_WIFI_SUPPORT_SRNG)
63 #define QCA_WIFI_SUPPORT_SRNG
64 #endif
65 
66 /* Forward references */
67 static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
68 
69 /*
70  * Fix EV118783, poll to check whether a BMI response comes
71  * other than waiting for the interruption which may be lost.
72  */
73 /* #define BMI_RSP_POLLING */
74 #define BMI_RSP_TO_MILLISEC  1000
75 
76 #ifdef CONFIG_BYPASS_QMI
77 #define BYPASS_QMI 1
78 #else
79 #define BYPASS_QMI 0
80 #endif
81 
82 #ifdef CONFIG_WIN
83 #if ENABLE_10_4_FW_HDR
84 #define WDI_IPA_SERVICE_GROUP 5
85 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
86 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
87 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
88 #endif /* ENABLE_10_4_FW_HDR */
89 #endif
90 
91 static int hif_post_recv_buffers(struct hif_softc *scn);
92 static void hif_config_rri_on_ddr(struct hif_softc *scn);
93 
94 /**
95  * hif_target_access_log_dump() - dump access log
96  *
97  * dump access log
98  *
99  * Return: n/a
100  */
101 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
102 static void hif_target_access_log_dump(void)
103 {
104 	hif_target_dump_access_log();
105 }
106 #endif
107 
108 
109 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
110 		      uint8_t cmd_id, bool start)
111 {
112 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
113 
114 	switch (cmd_id) {
115 	case AGC_DUMP:
116 		if (start)
117 			priv_start_agc(scn);
118 		else
119 			priv_dump_agc(scn);
120 		break;
121 	case CHANINFO_DUMP:
122 		if (start)
123 			priv_start_cap_chaninfo(scn);
124 		else
125 			priv_dump_chaninfo(scn);
126 		break;
127 	case BB_WATCHDOG_DUMP:
128 		priv_dump_bbwatchdog(scn);
129 		break;
130 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
131 	case PCIE_ACCESS_DUMP:
132 		hif_target_access_log_dump();
133 		break;
134 #endif
135 	default:
136 		HIF_ERROR("%s: Invalid htc dump command", __func__);
137 		break;
138 	}
139 }
140 
141 static void ce_poll_timeout(void *arg)
142 {
143 	struct CE_state *CE_state = (struct CE_state *)arg;
144 
145 	if (CE_state->timer_inited) {
146 		ce_per_engine_service(CE_state->scn, CE_state->id);
147 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
148 	}
149 }
150 
151 static unsigned int roundup_pwr2(unsigned int n)
152 {
153 	int i;
154 	unsigned int test_pwr2;
155 
156 	if (!(n & (n - 1)))
157 		return n; /* already a power of 2 */
158 
159 	test_pwr2 = 4;
160 	for (i = 0; i < 29; i++) {
161 		if (test_pwr2 > n)
162 			return test_pwr2;
163 		test_pwr2 = test_pwr2 << 1;
164 	}
165 
166 	QDF_ASSERT(0); /* n too large */
167 	return 0;
168 }
169 
170 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
171 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
172 
173 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
174 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
175 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
176 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
177 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
178 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
179 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
181 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
182 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
183 #ifdef QCA_WIFI_3_0_ADRASTEA
184 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
185 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
186 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
187 #endif
188 };
189 
190 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
191 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
192 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
193 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
194 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
195 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
196 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
197 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
198 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
199 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
200 };
201 
202 /* CE_PCI TABLE */
203 /*
204  * NOTE: the table below is out of date, though still a useful reference.
205  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
206  * mapping of HTC services to HIF pipes.
207  */
208 /*
209  * This authoritative table defines Copy Engine configuration and the mapping
210  * of services/endpoints to CEs.  A subset of this information is passed to
211  * the Target during startup as a prerequisite to entering BMI phase.
212  * See:
213  *    target_service_to_ce_map - Target-side mapping
214  *    hif_map_service_to_pipe      - Host-side mapping
215  *    target_ce_config         - Target-side configuration
216  *    host_ce_config           - Host-side configuration
217    ============================================================================
218    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
219  |                      |      | ctio | Size     | Frequency
220  |                      |      | n    |          |
221    ============================================================================
222    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
223    descriptor |                      |      |      | O(100B)  | and regular
224    download   |                      |      |      |          |
225    ----------------------------------------------------------------------------
226    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
227    indication |                      |      |      | O(10B)   | regular
228    upload     |                      |      |      |          |
229    ----------------------------------------------------------------------------
230    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
231    upload     |                      |      |      | O(1000B) | (frequent
232    e.g. noise |                      |      |      |          | during IP1.0
233    packets    |                      |      |      |          | testing)
234    ----------------------------------------------------------------------------
235    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
236    download   |                      |      |      | O(1000B) | (frequent
237    e.g.       |                      |      |      |          | during IP1.0
238    misdirecte |                      |      |      |          | testing)
239    d EAPOL    |                      |      |      |          |
240    packets    |                      |      |      |          |
241    ----------------------------------------------------------------------------
242    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
243  | DATA_VO (uplink)     |      |      |          |
244    ----------------------------------------------------------------------------
245    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
246  | DATA_VO (downlink)   |      |      |          |
247    ----------------------------------------------------------------------------
248    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
249  |                      |      |      | O(100B)  |
250    ----------------------------------------------------------------------------
251    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
252    messages   | (downlink)           |      |      | O(100B)  |
253  |                      |      |      |          |
254    ----------------------------------------------------------------------------
255    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
256  | HTC_RAW_STREAMS      |      |      |          |
257  | (uplink)             |      |      |          |
258    ----------------------------------------------------------------------------
259    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
260  | HTC_RAW_STREAMS      |      |      |          |
261  | (downlink)           |      |      |          |
262    ----------------------------------------------------------------------------
263    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
264  |                      |      |      |          | infrequent
265    ============================================================================
266  */
267 
268 /*
269  * Map from service/endpoint to Copy Engine.
270  * This table is derived from the CE_PCI TABLE, above.
271  * It is passed to the Target at startup for use by firmware.
272  */
273 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
274 	{
275 		WMI_DATA_VO_SVC,
276 		PIPEDIR_OUT,    /* out = UL = host -> target */
277 		3,
278 	},
279 	{
280 		WMI_DATA_VO_SVC,
281 		PIPEDIR_IN,     /* in = DL = target -> host */
282 		2,
283 	},
284 	{
285 		WMI_DATA_BK_SVC,
286 		PIPEDIR_OUT,    /* out = UL = host -> target */
287 		3,
288 	},
289 	{
290 		WMI_DATA_BK_SVC,
291 		PIPEDIR_IN,     /* in = DL = target -> host */
292 		2,
293 	},
294 	{
295 		WMI_DATA_BE_SVC,
296 		PIPEDIR_OUT,    /* out = UL = host -> target */
297 		3,
298 	},
299 	{
300 		WMI_DATA_BE_SVC,
301 		PIPEDIR_IN,     /* in = DL = target -> host */
302 		2,
303 	},
304 	{
305 		WMI_DATA_VI_SVC,
306 		PIPEDIR_OUT,    /* out = UL = host -> target */
307 		3,
308 	},
309 	{
310 		WMI_DATA_VI_SVC,
311 		PIPEDIR_IN,     /* in = DL = target -> host */
312 		2,
313 	},
314 	{
315 		WMI_CONTROL_SVC,
316 		PIPEDIR_OUT,    /* out = UL = host -> target */
317 		3,
318 	},
319 	{
320 		WMI_CONTROL_SVC,
321 		PIPEDIR_IN,     /* in = DL = target -> host */
322 		2,
323 	},
324 	{
325 		WMI_CONTROL_SVC_WMAC1,
326 		PIPEDIR_OUT,    /* out = UL = host -> target */
327 		7,
328 	},
329 	{
330 		WMI_CONTROL_SVC_WMAC1,
331 		PIPEDIR_IN,     /* in = DL = target -> host */
332 		2,
333 	},
334 	{
335 		WMI_CONTROL_SVC_WMAC2,
336 		PIPEDIR_OUT,    /* out = UL = host -> target */
337 		9,
338 	},
339 	{
340 		WMI_CONTROL_SVC_WMAC2,
341 		PIPEDIR_IN,     /* in = DL = target -> host */
342 		2,
343 	},
344 	{
345 		HTC_CTRL_RSVD_SVC,
346 		PIPEDIR_OUT,    /* out = UL = host -> target */
347 		0,              /* could be moved to 3 (share with WMI) */
348 	},
349 	{
350 		HTC_CTRL_RSVD_SVC,
351 		PIPEDIR_IN,     /* in = DL = target -> host */
352 		2,
353 	},
354 	{
355 		HTC_RAW_STREAMS_SVC, /* not currently used */
356 		PIPEDIR_OUT,    /* out = UL = host -> target */
357 		0,
358 	},
359 	{
360 		HTC_RAW_STREAMS_SVC, /* not currently used */
361 		PIPEDIR_IN,     /* in = DL = target -> host */
362 		2,
363 	},
364 	{
365 		HTT_DATA_MSG_SVC,
366 		PIPEDIR_OUT,    /* out = UL = host -> target */
367 		4,
368 	},
369 	{
370 		HTT_DATA_MSG_SVC,
371 		PIPEDIR_IN,     /* in = DL = target -> host */
372 		1,
373 	},
374 	{
375 		WDI_IPA_TX_SVC,
376 		PIPEDIR_OUT,    /* in = DL = target -> host */
377 		5,
378 	},
379 #if defined(QCA_WIFI_3_0_ADRASTEA)
380 	{
381 		HTT_DATA2_MSG_SVC,
382 		PIPEDIR_IN,    /* in = DL = target -> host */
383 		9,
384 	},
385 	{
386 		HTT_DATA3_MSG_SVC,
387 		PIPEDIR_IN,    /* in = DL = target -> host */
388 		10,
389 	},
390 	{
391 		PACKET_LOG_SVC,
392 		PIPEDIR_IN,    /* in = DL = target -> host */
393 		11,
394 	},
395 #endif
396 	/* (Additions here) */
397 
398 	{                       /* Must be last */
399 		0,
400 		0,
401 		0,
402 	},
403 };
404 
405 /* PIPEDIR_OUT = HOST to Target */
406 /* PIPEDIR_IN  = TARGET to HOST */
407 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
408 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
409 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
410 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
411 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
412 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
413 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
414 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
415 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
416 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
417 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
418 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
419 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
420 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
421 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
422 	/* (Additions here) */
423 	{ 0, 0, 0, },
424 };
425 
426 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
427 	{
428 		WMI_DATA_VO_SVC,
429 		PIPEDIR_OUT,    /* out = UL = host -> target */
430 		3,
431 	},
432 	{
433 		WMI_DATA_VO_SVC,
434 		PIPEDIR_IN,     /* in = DL = target -> host */
435 		2,
436 	},
437 	{
438 		WMI_DATA_BK_SVC,
439 		PIPEDIR_OUT,    /* out = UL = host -> target */
440 		3,
441 	},
442 	{
443 		WMI_DATA_BK_SVC,
444 		PIPEDIR_IN,     /* in = DL = target -> host */
445 		2,
446 	},
447 	{
448 		WMI_DATA_BE_SVC,
449 		PIPEDIR_OUT,    /* out = UL = host -> target */
450 		3,
451 	},
452 	{
453 		WMI_DATA_BE_SVC,
454 		PIPEDIR_IN,     /* in = DL = target -> host */
455 		2,
456 	},
457 	{
458 		WMI_DATA_VI_SVC,
459 		PIPEDIR_OUT,    /* out = UL = host -> target */
460 		3,
461 	},
462 	{
463 		WMI_DATA_VI_SVC,
464 		PIPEDIR_IN,     /* in = DL = target -> host */
465 		2,
466 	},
467 	{
468 		WMI_CONTROL_SVC,
469 		PIPEDIR_OUT,    /* out = UL = host -> target */
470 		3,
471 	},
472 	{
473 		WMI_CONTROL_SVC,
474 		PIPEDIR_IN,     /* in = DL = target -> host */
475 		2,
476 	},
477 	{
478 		HTC_CTRL_RSVD_SVC,
479 		PIPEDIR_OUT,    /* out = UL = host -> target */
480 		0,              /* could be moved to 3 (share with WMI) */
481 	},
482 	{
483 		HTC_CTRL_RSVD_SVC,
484 		PIPEDIR_IN,     /* in = DL = target -> host */
485 		1,
486 	},
487 	{
488 		HTC_RAW_STREAMS_SVC, /* not currently used */
489 		PIPEDIR_OUT,    /* out = UL = host -> target */
490 		0,
491 	},
492 	{
493 		HTC_RAW_STREAMS_SVC, /* not currently used */
494 		PIPEDIR_IN,     /* in = DL = target -> host */
495 		1,
496 	},
497 	{
498 		HTT_DATA_MSG_SVC,
499 		PIPEDIR_OUT,    /* out = UL = host -> target */
500 		4,
501 	},
502 #if WLAN_FEATURE_FASTPATH
503 	{
504 		HTT_DATA_MSG_SVC,
505 		PIPEDIR_IN,     /* in = DL = target -> host */
506 		5,
507 	},
508 #else /* WLAN_FEATURE_FASTPATH */
509 	{
510 		HTT_DATA_MSG_SVC,
511 		PIPEDIR_IN,  /* in = DL = target -> host */
512 		1,
513 	},
514 #endif /* WLAN_FEATURE_FASTPATH */
515 
516 	/* (Additions here) */
517 
518 	{                       /* Must be last */
519 		0,
520 		0,
521 		0,
522 	},
523 };
524 
525 
526 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
527 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
528 
529 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
530 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
531 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
532 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
533 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
534 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
535 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
536 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
537 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
538 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
539 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
540 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
541 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
542 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
543 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
544 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
545 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
546 	{0, 0, 0,},             /* Must be last */
547 };
548 
549 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
550 				    struct service_to_pipe **tgt_svc_map_to_use,
551 				    uint32_t *sz_tgt_svc_map_to_use)
552 {
553 	uint32_t mode = hif_get_conparam(scn);
554 	struct hif_target_info *tgt_info = &scn->target_info;
555 
556 	if (QDF_IS_EPPING_ENABLED(mode)) {
557 		*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
558 		*sz_tgt_svc_map_to_use =
559 			sizeof(target_service_to_ce_map_wlan_epping);
560 	} else {
561 		switch (tgt_info->target_type) {
562 		default:
563 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
564 			*sz_tgt_svc_map_to_use =
565 				sizeof(target_service_to_ce_map_wlan);
566 			break;
567 		case TARGET_TYPE_AR900B:
568 		case TARGET_TYPE_QCA9984:
569 		case TARGET_TYPE_IPQ4019:
570 		case TARGET_TYPE_QCA9888:
571 		case TARGET_TYPE_AR9888:
572 		case TARGET_TYPE_AR9888V2:
573 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
574 			*sz_tgt_svc_map_to_use =
575 				sizeof(target_service_to_ce_map_ar900b);
576 			break;
577 		case TARGET_TYPE_QCA6290:
578 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
579 			*sz_tgt_svc_map_to_use =
580 				sizeof(target_service_to_ce_map_qca6290);
581 			break;
582 		}
583 	}
584 }
585 
586 /**
587  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
588  * @ce_state : pointer to the state context of the CE
589  *
590  * Description:
591  *   Sets htt_rx_data attribute of the state structure if the
592  *   CE serves one of the HTT DATA services.
593  *
594  * Return:
595  *  false (attribute set to false)
596  *  true  (attribute set to true);
597  */
598 static bool ce_mark_datapath(struct CE_state *ce_state)
599 {
600 	struct service_to_pipe *svc_map;
601 	uint32_t map_sz, map_len;
602 	int    i;
603 	bool   rc = false;
604 
605 	if (ce_state != NULL) {
606 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
607 					       &map_sz);
608 
609 		map_len = map_sz / sizeof(struct service_to_pipe);
610 		for (i = 0; i < map_len; i++) {
611 			if ((svc_map[i].pipenum == ce_state->id) &&
612 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
613 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
614 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
615 				/* HTT CEs are unidirectional */
616 				if (svc_map[i].pipedir == PIPEDIR_IN)
617 					ce_state->htt_rx_data = true;
618 				else
619 					ce_state->htt_tx_data = true;
620 				rc = true;
621 			}
622 		}
623 	}
624 	return rc;
625 }
626 
627 /**
628  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
629  * @ce_id: ce in question
630  * @ring: ring state being examined
631  * @type: "src_ring" or "dest_ring" string for identifying the ring
632  *
633  * Warns on non-zero index values.
634  * Causes a kernel panic if the ring is not empty durring initialization.
635  */
636 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
637 					 char *type)
638 {
639 	if (ring->write_index != 0 || ring->sw_index != 0)
640 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
641 			  ce_id, type, ring->sw_index, ring->write_index);
642 	if (ring->write_index != ring->sw_index)
643 		QDF_BUG(0);
644 }
645 
646 /**
647  * ce_srng_based() - Does this target use srng
648  * @ce_state : pointer to the state context of the CE
649  *
650  * Description:
651  *   returns true if the target is SRNG based
652  *
653  * Return:
654  *  false (attribute set to false)
655  *  true  (attribute set to true);
656  */
657 bool ce_srng_based(struct hif_softc *scn)
658 {
659 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
660 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
661 
662 	switch (tgt_info->target_type) {
663 	case TARGET_TYPE_QCA8074:
664 	case TARGET_TYPE_QCA6290:
665 		return true;
666 	default:
667 		return false;
668 	}
669 	return false;
670 }
671 
672 #ifdef QCA_WIFI_SUPPORT_SRNG
673 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
674 {
675 	if (ce_srng_based(scn))
676 		return ce_services_srng();
677 
678 	return ce_services_legacy();
679 }
680 
681 
682 #else	/* QCA_LITHIUM */
683 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
684 {
685 	return ce_services_legacy();
686 }
687 #endif /* QCA_LITHIUM */
688 
689 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
690 		struct pld_shadow_reg_v2_cfg **shadow_config,
691 		int *num_shadow_registers_configured) {
692 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
693 
694 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
695 			scn, shadow_config, num_shadow_registers_configured);
696 }
697 
698 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
699 						uint8_t ring_type)
700 {
701 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
702 
703 	return hif_state->ce_services->ce_get_desc_size(ring_type);
704 }
705 
706 
707 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
708 		uint8_t ring_type, uint32_t nentries)
709 {
710 	uint32_t ce_nbytes;
711 	char *ptr;
712 	qdf_dma_addr_t base_addr;
713 	struct CE_ring_state *ce_ring;
714 	uint32_t desc_size;
715 	struct hif_softc *scn = CE_state->scn;
716 
717 	ce_nbytes = sizeof(struct CE_ring_state)
718 		+ (nentries * sizeof(void *));
719 	ptr = qdf_mem_malloc(ce_nbytes);
720 	if (!ptr)
721 		return NULL;
722 
723 	ce_ring = (struct CE_ring_state *)ptr;
724 	ptr += sizeof(struct CE_ring_state);
725 	ce_ring->nentries = nentries;
726 	ce_ring->nentries_mask = nentries - 1;
727 
728 	ce_ring->low_water_mark_nentries = 0;
729 	ce_ring->high_water_mark_nentries = nentries;
730 	ce_ring->per_transfer_context = (void **)ptr;
731 
732 	desc_size = ce_get_desc_size(scn, ring_type);
733 
734 	/* Legacy platforms that do not support cache
735 	 * coherent DMA are unsupported
736 	 */
737 	ce_ring->base_addr_owner_space_unaligned =
738 		qdf_mem_alloc_consistent(scn->qdf_dev,
739 				scn->qdf_dev->dev,
740 				(nentries *
741 				 desc_size +
742 				 CE_DESC_RING_ALIGN),
743 				&base_addr);
744 	if (ce_ring->base_addr_owner_space_unaligned
745 			== NULL) {
746 		HIF_ERROR("%s: ring has no DMA mem",
747 				__func__);
748 		qdf_mem_free(ptr);
749 		return NULL;
750 	}
751 	ce_ring->base_addr_CE_space_unaligned = base_addr;
752 
753 	/* Correctly initialize memory to 0 to
754 	 * prevent garbage data crashing system
755 	 * when download firmware
756 	 */
757 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
758 			nentries * desc_size +
759 			CE_DESC_RING_ALIGN);
760 
761 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
762 
763 		ce_ring->base_addr_CE_space =
764 			(ce_ring->base_addr_CE_space_unaligned +
765 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
766 
767 		ce_ring->base_addr_owner_space = (void *)
768 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
769 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
770 	} else {
771 		ce_ring->base_addr_CE_space =
772 				ce_ring->base_addr_CE_space_unaligned;
773 		ce_ring->base_addr_owner_space =
774 				ce_ring->base_addr_owner_space_unaligned;
775 	}
776 
777 	return ce_ring;
778 }
779 
780 static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
781 			uint32_t ce_id, struct CE_ring_state *ring,
782 			struct CE_attr *attr)
783 {
784 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
785 
786 	hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
787 					      ring, attr);
788 }
789 
790 int hif_ce_bus_early_suspend(struct hif_softc *scn)
791 {
792 	uint8_t ul_pipe, dl_pipe;
793 	int ce_id, status, ul_is_polled, dl_is_polled;
794 	struct CE_state *ce_state;
795 
796 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
797 					 &ul_pipe, &dl_pipe,
798 					 &ul_is_polled, &dl_is_polled);
799 	if (status) {
800 		HIF_ERROR("%s: pipe_mapping failure", __func__);
801 		return status;
802 	}
803 
804 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
805 		if (ce_id == ul_pipe)
806 			continue;
807 		if (ce_id == dl_pipe)
808 			continue;
809 
810 		ce_state = scn->ce_id_to_state[ce_id];
811 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
812 		if (ce_state->state == CE_RUNNING)
813 			ce_state->state = CE_PAUSED;
814 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
815 	}
816 
817 	return status;
818 }
819 
820 int hif_ce_bus_late_resume(struct hif_softc *scn)
821 {
822 	int ce_id;
823 	struct CE_state *ce_state;
824 	int write_index;
825 	bool index_updated;
826 
827 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
828 		ce_state = scn->ce_id_to_state[ce_id];
829 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
830 		if (ce_state->state == CE_PENDING) {
831 			write_index = ce_state->src_ring->write_index;
832 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
833 					write_index);
834 			ce_state->state = CE_RUNNING;
835 			index_updated = true;
836 		} else {
837 			index_updated = false;
838 		}
839 
840 		if (ce_state->state == CE_PAUSED)
841 			ce_state->state = CE_RUNNING;
842 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
843 
844 		if (index_updated)
845 			hif_record_ce_desc_event(scn, ce_id,
846 				RESUME_WRITE_INDEX_UPDATE,
847 				NULL, NULL, write_index);
848 	}
849 
850 	return 0;
851 }
852 
853 /**
854  * ce_oom_recovery() - try to recover rx ce from oom condition
855  * @context: CE_state of the CE with oom rx ring
856  *
857  * the executing work Will continue to be rescheduled untill
858  * at least 1 descriptor is successfully posted to the rx ring.
859  *
860  * return: none
861  */
862 static void ce_oom_recovery(void *context)
863 {
864 	struct CE_state *ce_state = context;
865 	struct hif_softc *scn = ce_state->scn;
866 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
867 	struct HIF_CE_pipe_info *pipe_info =
868 		&ce_softc->pipe_info[ce_state->id];
869 
870 	hif_post_recv_buffers_for_pipe(pipe_info);
871 }
872 
873 /*
874  * Initialize a Copy Engine based on caller-supplied attributes.
875  * This may be called once to initialize both source and destination
876  * rings or it may be called twice for separate source and destination
877  * initialization. It may be that only one side or the other is
878  * initialized by software/firmware.
879  *
880  * This should be called durring the initialization sequence before
881  * interupts are enabled, so we don't have to worry about thread safety.
882  */
883 struct CE_handle *ce_init(struct hif_softc *scn,
884 			  unsigned int CE_id, struct CE_attr *attr)
885 {
886 	struct CE_state *CE_state;
887 	uint32_t ctrl_addr;
888 	unsigned int nentries;
889 	bool malloc_CE_state = false;
890 	bool malloc_src_ring = false;
891 
892 	QDF_ASSERT(CE_id < scn->ce_count);
893 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
894 	CE_state = scn->ce_id_to_state[CE_id];
895 
896 	if (!CE_state) {
897 		CE_state =
898 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
899 		if (!CE_state) {
900 			HIF_ERROR("%s: CE_state has no mem", __func__);
901 			return NULL;
902 		}
903 		malloc_CE_state = true;
904 		qdf_spinlock_create(&CE_state->ce_index_lock);
905 
906 		CE_state->id = CE_id;
907 		CE_state->ctrl_addr = ctrl_addr;
908 		CE_state->state = CE_RUNNING;
909 		CE_state->attr_flags = attr->flags;
910 		qdf_spinlock_create(&CE_state->lro_unloading_lock);
911 	}
912 	CE_state->scn = scn;
913 
914 	qdf_atomic_init(&CE_state->rx_pending);
915 	if (attr == NULL) {
916 		/* Already initialized; caller wants the handle */
917 		return (struct CE_handle *)CE_state;
918 	}
919 
920 	if (CE_state->src_sz_max)
921 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
922 	else
923 		CE_state->src_sz_max = attr->src_sz_max;
924 
925 	ce_init_ce_desc_event_log(CE_id,
926 			attr->src_nentries + attr->dest_nentries);
927 
928 	/* source ring setup */
929 	nentries = attr->src_nentries;
930 	if (nentries) {
931 		struct CE_ring_state *src_ring;
932 
933 		nentries = roundup_pwr2(nentries);
934 		if (CE_state->src_ring) {
935 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
936 		} else {
937 			src_ring = CE_state->src_ring =
938 				ce_alloc_ring_state(CE_state,
939 						CE_RING_SRC,
940 						nentries);
941 			if (!src_ring) {
942 				/* cannot allocate src ring. If the
943 				 * CE_state is allocated locally free
944 				 * CE_State and return error.
945 				 */
946 				HIF_ERROR("%s: src ring has no mem", __func__);
947 				if (malloc_CE_state) {
948 					/* allocated CE_state locally */
949 					qdf_mem_free(CE_state);
950 					malloc_CE_state = false;
951 				}
952 				return NULL;
953 			}
954 			/* we can allocate src ring. Mark that the src ring is
955 			 * allocated locally
956 			 */
957 			malloc_src_ring = true;
958 
959 			/*
960 			 * Also allocate a shadow src ring in
961 			 * regular mem to use for faster access.
962 			 */
963 			src_ring->shadow_base_unaligned =
964 				qdf_mem_malloc(nentries *
965 					       sizeof(struct CE_src_desc) +
966 					       CE_DESC_RING_ALIGN);
967 			if (src_ring->shadow_base_unaligned == NULL) {
968 				HIF_ERROR("%s: src ring no shadow_base mem",
969 					  __func__);
970 				goto error_no_dma_mem;
971 			}
972 			src_ring->shadow_base = (struct CE_src_desc *)
973 				(((size_t) src_ring->shadow_base_unaligned +
974 				CE_DESC_RING_ALIGN - 1) &
975 				 ~(CE_DESC_RING_ALIGN - 1));
976 
977 			if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
978 				goto error_target_access;
979 
980 			ce_ring_setup(scn, CE_RING_SRC, CE_id, src_ring, attr);
981 
982 			if (Q_TARGET_ACCESS_END(scn) < 0)
983 				goto error_target_access;
984 			ce_ring_test_initial_indexes(CE_id, src_ring,
985 						     "src_ring");
986 		}
987 	}
988 
989 	/* destination ring setup */
990 	nentries = attr->dest_nentries;
991 	if (nentries) {
992 		struct CE_ring_state *dest_ring;
993 
994 		nentries = roundup_pwr2(nentries);
995 		if (CE_state->dest_ring) {
996 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
997 		} else {
998 			dest_ring = CE_state->dest_ring =
999 				ce_alloc_ring_state(CE_state,
1000 						CE_RING_DEST,
1001 						nentries);
1002 			if (!dest_ring) {
1003 				/* cannot allocate dst ring. If the CE_state
1004 				 * or src ring is allocated locally free
1005 				 * CE_State and src ring and return error.
1006 				 */
1007 				HIF_ERROR("%s: dest ring has no mem",
1008 					  __func__);
1009 				goto error_no_dma_mem;
1010 			}
1011 
1012 			if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1013 				goto error_target_access;
1014 
1015 			ce_ring_setup(scn, CE_RING_DEST, CE_id,
1016 				      dest_ring, attr);
1017 
1018 			if (Q_TARGET_ACCESS_END(scn) < 0)
1019 				goto error_target_access;
1020 
1021 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1022 						     "dest_ring");
1023 
1024 			/* For srng based target, init status ring here */
1025 			if (ce_srng_based(CE_state->scn)) {
1026 				CE_state->status_ring =
1027 					ce_alloc_ring_state(CE_state,
1028 							CE_RING_STATUS,
1029 							nentries);
1030 				if (CE_state->status_ring == NULL) {
1031 					/*Allocation failed. Cleanup*/
1032 					qdf_mem_free(CE_state->dest_ring);
1033 					if (malloc_src_ring) {
1034 						qdf_mem_free
1035 							(CE_state->src_ring);
1036 						CE_state->src_ring = NULL;
1037 						malloc_src_ring = false;
1038 					}
1039 					if (malloc_CE_state) {
1040 						/* allocated CE_state locally */
1041 						scn->ce_id_to_state[CE_id] =
1042 							NULL;
1043 						qdf_mem_free(CE_state);
1044 						malloc_CE_state = false;
1045 					}
1046 
1047 					return NULL;
1048 				}
1049 				if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1050 					goto error_target_access;
1051 
1052 				ce_ring_setup(scn, CE_RING_STATUS, CE_id,
1053 						CE_state->status_ring, attr);
1054 
1055 				if (Q_TARGET_ACCESS_END(scn) < 0)
1056 					goto error_target_access;
1057 
1058 			}
1059 
1060 			/* epping */
1061 			/* poll timer */
1062 			if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
1063 				qdf_timer_init(scn->qdf_dev,
1064 						       &CE_state->poll_timer,
1065 						       ce_poll_timeout,
1066 						       CE_state,
1067 						       QDF_TIMER_TYPE_SW);
1068 				CE_state->timer_inited = true;
1069 				qdf_timer_mod(&CE_state->poll_timer,
1070 						      CE_POLL_TIMEOUT);
1071 			}
1072 		}
1073 	}
1074 
1075 	if (!ce_srng_based(scn)) {
1076 		/* Enable CE error interrupts */
1077 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1078 			goto error_target_access;
1079 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1080 		if (Q_TARGET_ACCESS_END(scn) < 0)
1081 			goto error_target_access;
1082 	}
1083 
1084 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1085 			ce_oom_recovery, CE_state);
1086 
1087 	/* update the htt_data attribute */
1088 	ce_mark_datapath(CE_state);
1089 	scn->ce_id_to_state[CE_id] = CE_state;
1090 
1091 	return (struct CE_handle *)CE_state;
1092 
1093 error_target_access:
1094 error_no_dma_mem:
1095 	ce_fini((struct CE_handle *)CE_state);
1096 	return NULL;
1097 }
1098 
1099 #ifdef WLAN_FEATURE_FASTPATH
1100 /**
1101  * hif_enable_fastpath() Update that we have enabled fastpath mode
1102  * @hif_ctx: HIF context
1103  *
1104  * For use in data path
1105  *
1106  * Retrun: void
1107  */
1108 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1109 {
1110 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1111 
1112 	if (ce_srng_based(scn)) {
1113 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1114 		return;
1115 	}
1116 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1117 	scn->fastpath_mode_on = true;
1118 }
1119 
1120 /**
1121  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1122  * @hif_ctx: HIF Context
1123  *
1124  * For use in data path to skip HTC
1125  *
1126  * Return: bool
1127  */
1128 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1129 {
1130 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1131 
1132 	return scn->fastpath_mode_on;
1133 }
1134 
1135 /**
1136  * hif_get_ce_handle - API to get CE handle for FastPath mode
1137  * @hif_ctx: HIF Context
1138  * @id: CopyEngine Id
1139  *
1140  * API to return CE handle for fastpath mode
1141  *
1142  * Return: void
1143  */
1144 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1145 {
1146 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1147 
1148 	return scn->ce_id_to_state[id];
1149 }
1150 
1151 /**
1152  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1153  * No processing is required inside this function.
1154  * @ce_hdl: Cope engine handle
1155  * Using an assert, this function makes sure that,
1156  * the TX CE has been processed completely.
1157  *
1158  * This is called while dismantling CE structures. No other thread
1159  * should be using these structures while dismantling is occuring
1160  * therfore no locking is needed.
1161  *
1162  * Return: none
1163  */
1164 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1165 {
1166 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1167 	struct CE_ring_state *src_ring = ce_state->src_ring;
1168 	struct hif_softc *sc = ce_state->scn;
1169 	uint32_t sw_index, write_index;
1170 
1171 	if (hif_is_nss_wifi_enabled(sc))
1172 		return;
1173 
1174 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1175 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1176 			 __func__, __LINE__);
1177 		sw_index = src_ring->sw_index;
1178 		write_index = src_ring->sw_index;
1179 
1180 		/* At this point Tx CE should be clean */
1181 		qdf_assert_always(sw_index == write_index);
1182 	}
1183 }
1184 
1185 /**
1186  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1187  * @ce_hdl: Handle to CE
1188  *
1189  * These buffers are never allocated on the fly, but
1190  * are allocated only once during HIF start and freed
1191  * only once during HIF stop.
1192  * NOTE:
1193  * The assumption here is there is no in-flight DMA in progress
1194  * currently, so that buffers can be freed up safely.
1195  *
1196  * Return: NONE
1197  */
1198 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1199 {
1200 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1201 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1202 	qdf_nbuf_t nbuf;
1203 	int i;
1204 
1205 	if (ce_state->scn->fastpath_mode_on == false)
1206 		return;
1207 
1208 	if (!ce_state->htt_rx_data)
1209 		return;
1210 
1211 	/*
1212 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1213 	 * this CE is completely full: does not leave one blank space, to
1214 	 * distinguish between empty queue & full queue. So free all the
1215 	 * entries.
1216 	 */
1217 	for (i = 0; i < dst_ring->nentries; i++) {
1218 		nbuf = dst_ring->per_transfer_context[i];
1219 
1220 		/*
1221 		 * The reasons for doing this check are:
1222 		 * 1) Protect against calling cleanup before allocating buffers
1223 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1224 		 *    could have a partially filled ring, because of a memory
1225 		 *    allocation failure in the middle of allocating ring.
1226 		 *    This check accounts for that case, checking
1227 		 *    fastpath_mode_on flag or started flag would not have
1228 		 *    covered that case. This is not in performance path,
1229 		 *    so OK to do this.
1230 		 */
1231 		if (nbuf) {
1232 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1233 					      QDF_DMA_FROM_DEVICE);
1234 			qdf_nbuf_free(nbuf);
1235 		}
1236 	}
1237 }
1238 
1239 /**
1240  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1241  * @scn: HIF handle
1242  *
1243  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1244  * Hence we have to post all the entries in the pipe, even, in the beginning
1245  * unlike for other CE pipes where one less than dest_nentries are filled in
1246  * the beginning.
1247  *
1248  * Return: None
1249  */
1250 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1251 {
1252 	int pipe_num;
1253 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1254 
1255 	if (scn->fastpath_mode_on == false)
1256 		return;
1257 
1258 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1259 		struct HIF_CE_pipe_info *pipe_info =
1260 			&hif_state->pipe_info[pipe_num];
1261 		struct CE_state *ce_state =
1262 			scn->ce_id_to_state[pipe_info->pipe_num];
1263 
1264 		if (ce_state->htt_rx_data)
1265 			atomic_inc(&pipe_info->recv_bufs_needed);
1266 	}
1267 }
1268 #else
1269 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1270 {
1271 }
1272 
1273 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1274 {
1275 	return false;
1276 }
1277 
1278 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1279 {
1280 	return false;
1281 }
1282 #endif /* WLAN_FEATURE_FASTPATH */
1283 
1284 void ce_fini(struct CE_handle *copyeng)
1285 {
1286 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1287 	unsigned int CE_id = CE_state->id;
1288 	struct hif_softc *scn = CE_state->scn;
1289 
1290 	CE_state->state = CE_UNUSED;
1291 	scn->ce_id_to_state[CE_id] = NULL;
1292 
1293 	qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
1294 
1295 	if (CE_state->src_ring) {
1296 		/* Cleanup the datapath Tx ring */
1297 		ce_h2t_tx_ce_cleanup(copyeng);
1298 
1299 		if (CE_state->src_ring->shadow_base_unaligned)
1300 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1301 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1302 			qdf_mem_free_consistent(scn->qdf_dev,
1303 						scn->qdf_dev->dev,
1304 					    (CE_state->src_ring->nentries *
1305 					     sizeof(struct CE_src_desc) +
1306 					     CE_DESC_RING_ALIGN),
1307 					    CE_state->src_ring->
1308 					    base_addr_owner_space_unaligned,
1309 					    CE_state->src_ring->
1310 					    base_addr_CE_space, 0);
1311 		qdf_mem_free(CE_state->src_ring);
1312 	}
1313 	if (CE_state->dest_ring) {
1314 		/* Cleanup the datapath Rx ring */
1315 		ce_t2h_msg_ce_cleanup(copyeng);
1316 
1317 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1318 			qdf_mem_free_consistent(scn->qdf_dev,
1319 						scn->qdf_dev->dev,
1320 					    (CE_state->dest_ring->nentries *
1321 					     sizeof(struct CE_dest_desc) +
1322 					     CE_DESC_RING_ALIGN),
1323 					    CE_state->dest_ring->
1324 					    base_addr_owner_space_unaligned,
1325 					    CE_state->dest_ring->
1326 					    base_addr_CE_space, 0);
1327 		qdf_mem_free(CE_state->dest_ring);
1328 
1329 		/* epping */
1330 		if (CE_state->timer_inited) {
1331 			CE_state->timer_inited = false;
1332 			qdf_timer_free(&CE_state->poll_timer);
1333 		}
1334 	}
1335 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1336 		/* Cleanup the datapath Tx ring */
1337 		ce_h2t_tx_ce_cleanup(copyeng);
1338 
1339 		if (CE_state->status_ring->shadow_base_unaligned)
1340 			qdf_mem_free(
1341 				CE_state->status_ring->shadow_base_unaligned);
1342 
1343 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1344 			qdf_mem_free_consistent(scn->qdf_dev,
1345 						scn->qdf_dev->dev,
1346 					    (CE_state->status_ring->nentries *
1347 					     sizeof(struct CE_src_desc) +
1348 					     CE_DESC_RING_ALIGN),
1349 					    CE_state->status_ring->
1350 					    base_addr_owner_space_unaligned,
1351 					    CE_state->status_ring->
1352 					    base_addr_CE_space, 0);
1353 		qdf_mem_free(CE_state->status_ring);
1354 	}
1355 
1356 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1357 	qdf_mem_free(CE_state);
1358 }
1359 
1360 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1361 {
1362 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1363 
1364 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1365 		  sizeof(hif_state->msg_callbacks_pending));
1366 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1367 		  sizeof(hif_state->msg_callbacks_current));
1368 }
1369 
1370 /* Send the first nbytes bytes of the buffer */
1371 QDF_STATUS
1372 hif_send_head(struct hif_opaque_softc *hif_ctx,
1373 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1374 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1375 {
1376 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1377 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1378 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1379 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1380 	int bytes = nbytes, nfrags = 0;
1381 	struct ce_sendlist sendlist;
1382 	int status, i = 0;
1383 	unsigned int mux_id = 0;
1384 
1385 	QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
1386 
1387 	transfer_id =
1388 		(mux_id & MUX_ID_MASK) |
1389 		(transfer_id & TRANSACTION_ID_MASK);
1390 	data_attr &= DESC_DATA_FLAG_MASK;
1391 	/*
1392 	 * The common case involves sending multiple fragments within a
1393 	 * single download (the tx descriptor and the tx frame header).
1394 	 * So, optimize for the case of multiple fragments by not even
1395 	 * checking whether it's necessary to use a sendlist.
1396 	 * The overhead of using a sendlist for a single buffer download
1397 	 * is not a big deal, since it happens rarely (for WMI messages).
1398 	 */
1399 	ce_sendlist_init(&sendlist);
1400 	do {
1401 		qdf_dma_addr_t frag_paddr;
1402 		int frag_bytes;
1403 
1404 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1405 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1406 		/*
1407 		 * Clear the packet offset for all but the first CE desc.
1408 		 */
1409 		if (i++ > 0)
1410 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1411 
1412 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1413 				    frag_bytes >
1414 				    bytes ? bytes : frag_bytes,
1415 				    qdf_nbuf_get_frag_is_wordstream
1416 				    (nbuf,
1417 				    nfrags) ? 0 :
1418 				    CE_SEND_FLAG_SWAP_DISABLE,
1419 				    data_attr);
1420 		if (status != QDF_STATUS_SUCCESS) {
1421 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1422 				__func__, nfrags);
1423 			return status;
1424 		}
1425 		bytes -= frag_bytes;
1426 		nfrags++;
1427 	} while (bytes > 0);
1428 
1429 	/* Make sure we have resources to handle this request */
1430 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1431 	if (pipe_info->num_sends_allowed < nfrags) {
1432 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1433 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1434 		return QDF_STATUS_E_RESOURCES;
1435 	}
1436 	pipe_info->num_sends_allowed -= nfrags;
1437 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1438 
1439 	if (qdf_unlikely(ce_hdl == NULL)) {
1440 		HIF_ERROR("%s: error CE handle is null", __func__);
1441 		return A_ERROR;
1442 	}
1443 
1444 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
1445 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
1446 				qdf_nbuf_data_addr(nbuf),
1447 				sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
1448 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
1449 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1450 
1451 	return status;
1452 }
1453 
1454 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1455 								int force)
1456 {
1457 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1458 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1459 
1460 	if (!force) {
1461 		int resources;
1462 		/*
1463 		 * Decide whether to actually poll for completions, or just
1464 		 * wait for a later chance. If there seem to be plenty of
1465 		 * resources left, then just wait, since checking involves
1466 		 * reading a CE register, which is a relatively expensive
1467 		 * operation.
1468 		 */
1469 		resources = hif_get_free_queue_number(hif_ctx, pipe);
1470 		/*
1471 		 * If at least 50% of the total resources are still available,
1472 		 * don't bother checking again yet.
1473 		 */
1474 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1475 									 1))
1476 			return;
1477 	}
1478 #if ATH_11AC_TXCOMPACT
1479 	ce_per_engine_servicereap(scn, pipe);
1480 #else
1481 	ce_per_engine_service(scn, pipe);
1482 #endif
1483 }
1484 
1485 uint16_t
1486 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
1487 {
1488 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1489 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1490 	uint16_t rv;
1491 
1492 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1493 	rv = pipe_info->num_sends_allowed;
1494 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1495 	return rv;
1496 }
1497 
1498 /* Called by lower (CE) layer when a send to Target completes. */
1499 static void
1500 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
1501 		     void *transfer_context, qdf_dma_addr_t CE_data,
1502 		     unsigned int nbytes, unsigned int transfer_id,
1503 		     unsigned int sw_index, unsigned int hw_index,
1504 		     unsigned int toeplitz_hash_result)
1505 {
1506 	struct HIF_CE_pipe_info *pipe_info =
1507 		(struct HIF_CE_pipe_info *)ce_context;
1508 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1509 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1510 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
1511 	struct hif_msg_callbacks *msg_callbacks =
1512 		&pipe_info->pipe_callbacks;
1513 
1514 	do {
1515 		/*
1516 		 * The upper layer callback will be triggered
1517 		 * when last fragment is complteted.
1518 		 */
1519 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
1520 			if (scn->target_status == TARGET_STATUS_RESET) {
1521 
1522 				qdf_nbuf_unmap_single(scn->qdf_dev,
1523 						      transfer_context,
1524 						      QDF_DMA_TO_DEVICE);
1525 				qdf_nbuf_free(transfer_context);
1526 			} else
1527 				msg_callbacks->txCompletionHandler(
1528 					msg_callbacks->Context,
1529 					transfer_context, transfer_id,
1530 					toeplitz_hash_result);
1531 		}
1532 
1533 		qdf_spin_lock(&pipe_info->completion_freeq_lock);
1534 		pipe_info->num_sends_allowed++;
1535 		qdf_spin_unlock(&pipe_info->completion_freeq_lock);
1536 	} while (ce_completed_send_next(copyeng,
1537 			&ce_context, &transfer_context,
1538 			&CE_data, &nbytes, &transfer_id,
1539 			&sw_idx, &hw_idx,
1540 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
1541 }
1542 
1543 /**
1544  * hif_ce_do_recv(): send message from copy engine to upper layers
1545  * @msg_callbacks: structure containing callback and callback context
1546  * @netbuff: skb containing message
1547  * @nbytes: number of bytes in the message
1548  * @pipe_info: used for the pipe_number info
1549  *
1550  * Checks the packet length, configures the lenght in the netbuff,
1551  * and calls the upper layer callback.
1552  *
1553  * return: None
1554  */
1555 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
1556 		qdf_nbuf_t netbuf, int nbytes,
1557 		struct HIF_CE_pipe_info *pipe_info) {
1558 	if (nbytes <= pipe_info->buf_sz) {
1559 		qdf_nbuf_set_pktlen(netbuf, nbytes);
1560 		msg_callbacks->
1561 			rxCompletionHandler(msg_callbacks->Context,
1562 					netbuf, pipe_info->pipe_num);
1563 	} else {
1564 		HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
1565 				__func__, netbuf, nbytes);
1566 
1567 		qdf_nbuf_free(netbuf);
1568 	}
1569 }
1570 
1571 /* Called by lower (CE) layer when data is received from the Target. */
1572 static void
1573 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
1574 		     void *transfer_context, qdf_dma_addr_t CE_data,
1575 		     unsigned int nbytes, unsigned int transfer_id,
1576 		     unsigned int flags)
1577 {
1578 	struct HIF_CE_pipe_info *pipe_info =
1579 		(struct HIF_CE_pipe_info *)ce_context;
1580 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1581 	struct CE_state *ce_state = (struct CE_state *) copyeng;
1582 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1583 #ifdef HIF_PCI
1584 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
1585 #endif
1586 	struct hif_msg_callbacks *msg_callbacks =
1587 		 &pipe_info->pipe_callbacks;
1588 
1589 	do {
1590 #ifdef HIF_PCI
1591 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
1592 #endif
1593 		qdf_nbuf_unmap_single(scn->qdf_dev,
1594 				      (qdf_nbuf_t) transfer_context,
1595 				      QDF_DMA_FROM_DEVICE);
1596 
1597 		atomic_inc(&pipe_info->recv_bufs_needed);
1598 		hif_post_recv_buffers_for_pipe(pipe_info);
1599 		if (scn->target_status == TARGET_STATUS_RESET)
1600 			qdf_nbuf_free(transfer_context);
1601 		else
1602 			hif_ce_do_recv(msg_callbacks, transfer_context,
1603 				nbytes, pipe_info);
1604 
1605 		/* Set up force_break flag if num of receices reaches
1606 		 * MAX_NUM_OF_RECEIVES
1607 		 */
1608 		ce_state->receive_count++;
1609 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
1610 			ce_state->force_break = 1;
1611 			break;
1612 		}
1613 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
1614 					&CE_data, &nbytes, &transfer_id,
1615 					&flags) == QDF_STATUS_SUCCESS);
1616 
1617 }
1618 
1619 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
1620 
1621 void
1622 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
1623 	      struct hif_msg_callbacks *callbacks)
1624 {
1625 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1626 
1627 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
1628 	spin_lock_init(&pcie_access_log_lock);
1629 #endif
1630 	/* Save callbacks for later installation */
1631 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
1632 		 sizeof(hif_state->msg_callbacks_pending));
1633 
1634 }
1635 
1636 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
1637 {
1638 	struct CE_handle *ce_diag = hif_state->ce_diag;
1639 	int pipe_num;
1640 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1641 	struct hif_msg_callbacks *hif_msg_callbacks =
1642 		&hif_state->msg_callbacks_current;
1643 
1644 	/* daemonize("hif_compl_thread"); */
1645 
1646 	if (scn->ce_count == 0) {
1647 		HIF_ERROR("%s: Invalid ce_count", __func__);
1648 		return -EINVAL;
1649 	}
1650 
1651 	if (!hif_msg_callbacks ||
1652 			!hif_msg_callbacks->rxCompletionHandler ||
1653 			!hif_msg_callbacks->txCompletionHandler) {
1654 		HIF_ERROR("%s: no completion handler registered", __func__);
1655 		return -EFAULT;
1656 	}
1657 
1658 	A_TARGET_ACCESS_LIKELY(scn);
1659 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1660 		struct CE_attr attr;
1661 		struct HIF_CE_pipe_info *pipe_info;
1662 
1663 		pipe_info = &hif_state->pipe_info[pipe_num];
1664 		if (pipe_info->ce_hdl == ce_diag)
1665 			continue;       /* Handle Diagnostic CE specially */
1666 		attr = hif_state->host_ce_config[pipe_num];
1667 		if (attr.src_nentries) {
1668 			/* pipe used to send to target */
1669 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%p",
1670 					 __func__, pipe_num, pipe_info);
1671 			ce_send_cb_register(pipe_info->ce_hdl,
1672 					    hif_pci_ce_send_done, pipe_info,
1673 					    attr.flags & CE_ATTR_DISABLE_INTR);
1674 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
1675 		}
1676 		if (attr.dest_nentries) {
1677 			/* pipe used to receive from target */
1678 			ce_recv_cb_register(pipe_info->ce_hdl,
1679 					    hif_pci_ce_recv_data, pipe_info,
1680 					    attr.flags & CE_ATTR_DISABLE_INTR);
1681 		}
1682 
1683 		if (attr.src_nentries)
1684 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
1685 
1686 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
1687 					sizeof(pipe_info->pipe_callbacks));
1688 	}
1689 
1690 	A_TARGET_ACCESS_UNLIKELY(scn);
1691 	return 0;
1692 }
1693 
1694 /*
1695  * Install pending msg callbacks.
1696  *
1697  * TBDXXX: This hack is needed because upper layers install msg callbacks
1698  * for use with HTC before BMI is done; yet this HIF implementation
1699  * needs to continue to use BMI msg callbacks. Really, upper layers
1700  * should not register HTC callbacks until AFTER BMI phase.
1701  */
1702 static void hif_msg_callbacks_install(struct hif_softc *scn)
1703 {
1704 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1705 
1706 	qdf_mem_copy(&hif_state->msg_callbacks_current,
1707 		 &hif_state->msg_callbacks_pending,
1708 		 sizeof(hif_state->msg_callbacks_pending));
1709 }
1710 
1711 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
1712 							uint8_t *DLPipe)
1713 {
1714 	int ul_is_polled, dl_is_polled;
1715 
1716 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
1717 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
1718 }
1719 
1720 /**
1721  * hif_dump_pipe_debug_count() - Log error count
1722  * @scn: hif_softc pointer.
1723  *
1724  * Output the pipe error counts of each pipe to log file
1725  *
1726  * Return: N/A
1727  */
1728 void hif_dump_pipe_debug_count(struct hif_softc *scn)
1729 {
1730 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1731 	int pipe_num;
1732 
1733 	if (hif_state == NULL) {
1734 		HIF_ERROR("%s hif_state is NULL", __func__);
1735 		return;
1736 	}
1737 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1738 		struct HIF_CE_pipe_info *pipe_info;
1739 
1740 	pipe_info = &hif_state->pipe_info[pipe_num];
1741 
1742 	if (pipe_info->nbuf_alloc_err_count > 0 ||
1743 			pipe_info->nbuf_dma_err_count > 0 ||
1744 			pipe_info->nbuf_ce_enqueue_err_count)
1745 		HIF_ERROR(
1746 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
1747 			__func__, pipe_info->pipe_num,
1748 			atomic_read(&pipe_info->recv_bufs_needed),
1749 			pipe_info->nbuf_alloc_err_count,
1750 			pipe_info->nbuf_dma_err_count,
1751 			pipe_info->nbuf_ce_enqueue_err_count);
1752 	}
1753 }
1754 
1755 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
1756 					  void *nbuf, uint32_t *error_cnt,
1757 					  enum hif_ce_event_type failure_type,
1758 					  const char *failure_type_string)
1759 {
1760 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
1761 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
1762 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1763 	int ce_id = CE_state->id;
1764 	uint32_t error_cnt_tmp;
1765 
1766 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1767 	error_cnt_tmp = ++(*error_cnt);
1768 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1769 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
1770 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
1771 		  failure_type_string);
1772 	hif_record_ce_desc_event(scn, ce_id, failure_type,
1773 				 NULL, nbuf, bufs_needed_tmp);
1774 	/* if we fail to allocate the last buffer for an rx pipe,
1775 	 *	there is no trigger to refill the ce and we will
1776 	 *	eventually crash
1777 	 */
1778 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
1779 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
1780 
1781 }
1782 
1783 
1784 
1785 
1786 static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
1787 {
1788 	struct CE_handle *ce_hdl;
1789 	qdf_size_t buf_sz;
1790 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
1791 	QDF_STATUS ret;
1792 	uint32_t bufs_posted = 0;
1793 
1794 	buf_sz = pipe_info->buf_sz;
1795 	if (buf_sz == 0) {
1796 		/* Unused Copy Engine */
1797 		return 0;
1798 	}
1799 
1800 	ce_hdl = pipe_info->ce_hdl;
1801 
1802 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1803 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
1804 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
1805 		qdf_nbuf_t nbuf;
1806 		int status;
1807 
1808 		atomic_dec(&pipe_info->recv_bufs_needed);
1809 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1810 
1811 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
1812 		if (!nbuf) {
1813 			hif_post_recv_buffers_failure(pipe_info, nbuf,
1814 					&pipe_info->nbuf_alloc_err_count,
1815 					 HIF_RX_NBUF_ALLOC_FAILURE,
1816 					"HIF_RX_NBUF_ALLOC_FAILURE");
1817 			return 1;
1818 		}
1819 
1820 		/*
1821 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
1822 		 * CE_data = dma_map_single(dev, data, buf_sz, );
1823 		 * DMA_FROM_DEVICE);
1824 		 */
1825 		ret = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
1826 					    QDF_DMA_FROM_DEVICE);
1827 
1828 		if (unlikely(ret != QDF_STATUS_SUCCESS)) {
1829 			hif_post_recv_buffers_failure(pipe_info, nbuf,
1830 					&pipe_info->nbuf_dma_err_count,
1831 					 HIF_RX_NBUF_MAP_FAILURE,
1832 					"HIF_RX_NBUF_MAP_FAILURE");
1833 			qdf_nbuf_free(nbuf);
1834 			return 1;
1835 		}
1836 
1837 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
1838 
1839 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
1840 					       buf_sz, DMA_FROM_DEVICE);
1841 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
1842 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1843 		if (unlikely(status != EOK)) {
1844 			hif_post_recv_buffers_failure(pipe_info, nbuf,
1845 					&pipe_info->nbuf_ce_enqueue_err_count,
1846 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
1847 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
1848 
1849 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
1850 						QDF_DMA_FROM_DEVICE);
1851 			qdf_nbuf_free(nbuf);
1852 			return 1;
1853 		}
1854 
1855 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
1856 		bufs_posted++;
1857 	}
1858 	pipe_info->nbuf_alloc_err_count =
1859 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
1860 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
1861 	pipe_info->nbuf_dma_err_count =
1862 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
1863 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
1864 	pipe_info->nbuf_ce_enqueue_err_count =
1865 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
1866 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
1867 
1868 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
1869 
1870 	return 0;
1871 }
1872 
1873 /*
1874  * Try to post all desired receive buffers for all pipes.
1875  * Returns 0 if all desired buffers are posted,
1876  * non-zero if were were unable to completely
1877  * replenish receive buffers.
1878  */
1879 static int hif_post_recv_buffers(struct hif_softc *scn)
1880 {
1881 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1882 	int pipe_num, rv = 0;
1883 	struct CE_state *ce_state;
1884 
1885 	A_TARGET_ACCESS_LIKELY(scn);
1886 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1887 		struct HIF_CE_pipe_info *pipe_info;
1888 
1889 		ce_state = scn->ce_id_to_state[pipe_num];
1890 		pipe_info = &hif_state->pipe_info[pipe_num];
1891 
1892 		if (hif_is_nss_wifi_enabled(scn) &&
1893 		    ce_state && (ce_state->htt_rx_data))
1894 			continue;
1895 
1896 		if (hif_post_recv_buffers_for_pipe(pipe_info)) {
1897 			rv = 1;
1898 			goto done;
1899 		}
1900 	}
1901 
1902 done:
1903 	A_TARGET_ACCESS_UNLIKELY(scn);
1904 
1905 	return rv;
1906 }
1907 
1908 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
1909 {
1910 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1911 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1912 
1913 	hif_update_fastpath_recv_bufs_cnt(scn);
1914 
1915 	hif_msg_callbacks_install(scn);
1916 
1917 	if (hif_completion_thread_startup(hif_state))
1918 		return QDF_STATUS_E_FAILURE;
1919 
1920 	/* enable buffer cleanup */
1921 	hif_state->started = true;
1922 
1923 	/* Post buffers once to start things off. */
1924 	if (hif_post_recv_buffers(scn)) {
1925 		/* cleanup is done in hif_ce_disable */
1926 		HIF_ERROR("%s:failed to post buffers", __func__);
1927 		return QDF_STATUS_E_FAILURE;
1928 	}
1929 
1930 	return QDF_STATUS_SUCCESS;
1931 }
1932 
1933 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1934 {
1935 	struct hif_softc *scn;
1936 	struct CE_handle *ce_hdl;
1937 	uint32_t buf_sz;
1938 	struct HIF_CE_state *hif_state;
1939 	qdf_nbuf_t netbuf;
1940 	qdf_dma_addr_t CE_data;
1941 	void *per_CE_context;
1942 
1943 	buf_sz = pipe_info->buf_sz;
1944 	/* Unused Copy Engine */
1945 	if (buf_sz == 0)
1946 		return;
1947 
1948 
1949 	hif_state = pipe_info->HIF_CE_state;
1950 	if (!hif_state->started)
1951 		return;
1952 
1953 	scn = HIF_GET_SOFTC(hif_state);
1954 	ce_hdl = pipe_info->ce_hdl;
1955 
1956 	if (scn->qdf_dev == NULL)
1957 		return;
1958 	while (ce_revoke_recv_next
1959 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
1960 			&CE_data) == QDF_STATUS_SUCCESS) {
1961 		qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
1962 				      QDF_DMA_FROM_DEVICE);
1963 		qdf_nbuf_free(netbuf);
1964 	}
1965 }
1966 
1967 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
1968 {
1969 	struct CE_handle *ce_hdl;
1970 	struct HIF_CE_state *hif_state;
1971 	struct hif_softc *scn;
1972 	qdf_nbuf_t netbuf;
1973 	void *per_CE_context;
1974 	qdf_dma_addr_t CE_data;
1975 	unsigned int nbytes;
1976 	unsigned int id;
1977 	uint32_t buf_sz;
1978 	uint32_t toeplitz_hash_result;
1979 
1980 	buf_sz = pipe_info->buf_sz;
1981 	if (buf_sz == 0) {
1982 		/* Unused Copy Engine */
1983 		return;
1984 	}
1985 
1986 	hif_state = pipe_info->HIF_CE_state;
1987 	if (!hif_state->started) {
1988 		return;
1989 	}
1990 
1991 	scn = HIF_GET_SOFTC(hif_state);
1992 
1993 	ce_hdl = pipe_info->ce_hdl;
1994 
1995 	while (ce_cancel_send_next
1996 		       (ce_hdl, &per_CE_context,
1997 		       (void **)&netbuf, &CE_data, &nbytes,
1998 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1999 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2000 			/*
2001 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2002 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2003 			 * freed in htt_htc_misc_pkt_pool_free() in
2004 			 * wlantl_close(), so do not free them here again
2005 			 * by checking whether it's the endpoint
2006 			 * which they are queued in.
2007 			 */
2008 			if (id == scn->htc_htt_tx_endpoint)
2009 				return;
2010 			/* Indicate the completion to higher
2011 			 * layer to free the buffer
2012 			 */
2013 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2014 				pipe_info->pipe_callbacks.
2015 				    txCompletionHandler(pipe_info->
2016 					    pipe_callbacks.Context,
2017 					    netbuf, id, toeplitz_hash_result);
2018 		}
2019 	}
2020 }
2021 
2022 /*
2023  * Cleanup residual buffers for device shutdown:
2024  *    buffers that were enqueued for receive
2025  *    buffers that were to be sent
2026  * Note: Buffers that had completed but which were
2027  * not yet processed are on a completion queue. They
2028  * are handled when the completion thread shuts down.
2029  */
2030 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2031 {
2032 	int pipe_num;
2033 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2034 	struct CE_state *ce_state;
2035 
2036 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2037 		struct HIF_CE_pipe_info *pipe_info;
2038 
2039 		ce_state = scn->ce_id_to_state[pipe_num];
2040 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2041 				((ce_state->htt_tx_data) ||
2042 				 (ce_state->htt_rx_data))) {
2043 			continue;
2044 		}
2045 
2046 		pipe_info = &hif_state->pipe_info[pipe_num];
2047 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2048 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2049 	}
2050 }
2051 
2052 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2053 {
2054 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2055 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2056 
2057 	hif_buffer_cleanup(hif_state);
2058 }
2059 
2060 static void hif_destroy_oom_work(struct hif_softc *scn)
2061 {
2062 	struct CE_state *ce_state;
2063 	int ce_id;
2064 
2065 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2066 		ce_state = scn->ce_id_to_state[ce_id];
2067 		if (ce_state)
2068 			qdf_destroy_work(scn->qdf_dev,
2069 					 &ce_state->oom_allocation_work);
2070 	}
2071 }
2072 
2073 void hif_ce_stop(struct hif_softc *scn)
2074 {
2075 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2076 	int pipe_num;
2077 
2078 	/*
2079 	 * before cleaning up any memory, ensure irq &
2080 	 * bottom half contexts will not be re-entered
2081 	 */
2082 	hif_nointrs(scn);
2083 	hif_destroy_oom_work(scn);
2084 	scn->hif_init_done = false;
2085 
2086 	/*
2087 	 * At this point, asynchronous threads are stopped,
2088 	 * The Target should not DMA nor interrupt, Host code may
2089 	 * not initiate anything more.  So we just need to clean
2090 	 * up Host-side state.
2091 	 */
2092 
2093 	if (scn->athdiag_procfs_inited) {
2094 		athdiag_procfs_remove();
2095 		scn->athdiag_procfs_inited = false;
2096 	}
2097 
2098 	hif_buffer_cleanup(hif_state);
2099 
2100 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2101 		struct HIF_CE_pipe_info *pipe_info;
2102 
2103 		pipe_info = &hif_state->pipe_info[pipe_num];
2104 		if (pipe_info->ce_hdl) {
2105 			ce_fini(pipe_info->ce_hdl);
2106 			pipe_info->ce_hdl = NULL;
2107 			pipe_info->buf_sz = 0;
2108 		}
2109 	}
2110 
2111 	if (hif_state->sleep_timer_init) {
2112 		qdf_timer_stop(&hif_state->sleep_timer);
2113 		qdf_timer_free(&hif_state->sleep_timer);
2114 		hif_state->sleep_timer_init = false;
2115 	}
2116 
2117 	hif_state->started = false;
2118 }
2119 
2120 
2121 /**
2122  * hif_get_target_ce_config() - get copy engine configuration
2123  * @target_ce_config_ret: basic copy engine configuration
2124  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2125  * @target_service_to_ce_map_ret: service mapping for the copy engines
2126  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2127  * @target_shadow_reg_cfg_ret: shadow register configuration
2128  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2129  *
2130  * providing accessor to these values outside of this file.
2131  * currently these are stored in static pointers to const sections.
2132  * there are multiple configurations that are selected from at compile time.
2133  * Runtime selection would need to consider mode, target type and bus type.
2134  *
2135  * Return: return by parameter.
2136  */
2137 void hif_get_target_ce_config(struct hif_softc *scn,
2138 		struct CE_pipe_config **target_ce_config_ret,
2139 		uint32_t *target_ce_config_sz_ret,
2140 		struct service_to_pipe **target_service_to_ce_map_ret,
2141 		uint32_t *target_service_to_ce_map_sz_ret,
2142 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2143 		uint32_t *shadow_cfg_sz_ret)
2144 {
2145 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2146 
2147 	*target_ce_config_ret = hif_state->target_ce_config;
2148 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2149 
2150 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2151 				       target_service_to_ce_map_sz_ret);
2152 
2153 	if (target_shadow_reg_cfg_ret)
2154 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2155 
2156 	if (shadow_cfg_sz_ret)
2157 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2158 }
2159 
2160 #ifdef CONFIG_SHADOW_V2
2161 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2162 {
2163 	int i;
2164 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2165 		  "%s: num_config %d\n", __func__, cfg->num_shadow_reg_v2_cfg);
2166 
2167 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2168 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2169 		     "%s: i %d, val %x\n", __func__, i,
2170 		     cfg->shadow_reg_v2_cfg[i].addr);
2171 	}
2172 }
2173 
2174 #else
2175 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2176 {
2177 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2178 		  "%s: CONFIG_SHADOW_V2 not defined\n", __func__);
2179 }
2180 #endif
2181 
2182 /**
2183  * hif_wlan_enable(): call the platform driver to enable wlan
2184  * @scn: HIF Context
2185  *
2186  * This function passes the con_mode and CE configuration to
2187  * platform driver to enable wlan.
2188  *
2189  * Return: linux error code
2190  */
2191 int hif_wlan_enable(struct hif_softc *scn)
2192 {
2193 	struct pld_wlan_enable_cfg cfg;
2194 	enum pld_driver_mode mode;
2195 	uint32_t con_mode = hif_get_conparam(scn);
2196 
2197 	hif_get_target_ce_config(scn,
2198 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2199 			&cfg.num_ce_tgt_cfg,
2200 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2201 			&cfg.num_ce_svc_pipe_cfg,
2202 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2203 			&cfg.num_shadow_reg_cfg);
2204 
2205 	/* translate from structure size to array size */
2206 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2207 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2208 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2209 
2210 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2211 			      &cfg.num_shadow_reg_v2_cfg);
2212 
2213 	hif_print_hal_shadow_register_cfg(&cfg);
2214 
2215 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2216 		mode = PLD_FTM;
2217 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2218 		mode = PLD_EPPING;
2219 	else
2220 		mode = PLD_MISSION;
2221 
2222 	if (BYPASS_QMI)
2223 		return 0;
2224 	else
2225 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2226 				       mode, QWLAN_VERSIONSTR);
2227 }
2228 
2229 #define CE_EPPING_USES_IRQ true
2230 
2231 /**
2232  * hif_ce_prepare_config() - load the correct static tables.
2233  * @scn: hif context
2234  *
2235  * Epping uses different static attribute tables than mission mode.
2236  */
2237 void hif_ce_prepare_config(struct hif_softc *scn)
2238 {
2239 	uint32_t mode = hif_get_conparam(scn);
2240 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2241 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2242 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2243 
2244 	hif_state->ce_services = ce_services_attach(scn);
2245 
2246 	scn->ce_count = HOST_CE_COUNT;
2247 	/* if epping is enabled we need to use the epping configuration. */
2248 	if (QDF_IS_EPPING_ENABLED(mode)) {
2249 		if (CE_EPPING_USES_IRQ)
2250 			hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2251 		else
2252 			hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2253 		hif_state->target_ce_config = target_ce_config_wlan_epping;
2254 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2255 		target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2256 		shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2257 	}
2258 
2259 	switch (tgt_info->target_type) {
2260 	default:
2261 		hif_state->host_ce_config = host_ce_config_wlan;
2262 		hif_state->target_ce_config = target_ce_config_wlan;
2263 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
2264 		break;
2265 	case TARGET_TYPE_AR900B:
2266 	case TARGET_TYPE_QCA9984:
2267 	case TARGET_TYPE_IPQ4019:
2268 	case TARGET_TYPE_QCA9888:
2269 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2270 			hif_state->host_ce_config =
2271 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2272 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2273 			hif_state->host_ce_config =
2274 				host_lowdesc_ce_cfg_wlan_ar900b;
2275 		} else {
2276 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2277 		}
2278 
2279 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2280 		hif_state->target_ce_config_sz =
2281 				sizeof(target_ce_config_wlan_ar900b);
2282 
2283 		break;
2284 
2285 	case TARGET_TYPE_AR9888:
2286 	case TARGET_TYPE_AR9888V2:
2287 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2288 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2289 		} else {
2290 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2291 		}
2292 
2293 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2294 		hif_state->target_ce_config_sz =
2295 					sizeof(target_ce_config_wlan_ar9888);
2296 
2297 		break;
2298 
2299 	case TARGET_TYPE_QCA8074:
2300 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2301 			hif_state->host_ce_config =
2302 					host_ce_config_wlan_qca8074_pci;
2303 			hif_state->target_ce_config =
2304 				target_ce_config_wlan_qca8074_pci;
2305 			hif_state->target_ce_config_sz =
2306 				sizeof(target_ce_config_wlan_qca8074_pci);
2307 		} else {
2308 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2309 			hif_state->target_ce_config =
2310 					target_ce_config_wlan_qca8074;
2311 			hif_state->target_ce_config_sz =
2312 				sizeof(target_ce_config_wlan_qca8074);
2313 		}
2314 		break;
2315 	case TARGET_TYPE_QCA6290:
2316 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2317 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2318 		hif_state->target_ce_config_sz =
2319 					sizeof(target_ce_config_wlan_qca6290);
2320 
2321 		scn->ce_count = QCA_6290_CE_COUNT;
2322 		break;
2323 	}
2324 }
2325 
2326 /**
2327  * hif_ce_open() - do ce specific allocations
2328  * @hif_sc: pointer to hif context
2329  *
2330  * return: 0 for success or QDF_STATUS_E_NOMEM
2331  */
2332 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2333 {
2334 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2335 
2336 	qdf_spinlock_create(&hif_state->irq_reg_lock);
2337 	qdf_spinlock_create(&hif_state->keep_awake_lock);
2338 	return QDF_STATUS_SUCCESS;
2339 }
2340 
2341 /**
2342  * hif_ce_close() - do ce specific free
2343  * @hif_sc: pointer to hif context
2344  */
2345 void hif_ce_close(struct hif_softc *hif_sc)
2346 {
2347 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2348 
2349 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
2350 }
2351 
2352 /**
2353  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2354  * @hif_sc: hif context
2355  *
2356  * uses state variables to support cleaning up when hif_config_ce fails.
2357  */
2358 void hif_unconfig_ce(struct hif_softc *hif_sc)
2359 {
2360 	int pipe_num;
2361 	struct HIF_CE_pipe_info *pipe_info;
2362 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2363 
2364 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2365 		pipe_info = &hif_state->pipe_info[pipe_num];
2366 		if (pipe_info->ce_hdl) {
2367 			ce_unregister_irq(hif_state, (1 << pipe_num));
2368 			ce_fini(pipe_info->ce_hdl);
2369 			pipe_info->ce_hdl = NULL;
2370 			pipe_info->buf_sz = 0;
2371 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2372 		}
2373 	}
2374 	if (hif_sc->athdiag_procfs_inited) {
2375 		athdiag_procfs_remove();
2376 		hif_sc->athdiag_procfs_inited = false;
2377 	}
2378 }
2379 
2380 #ifdef CONFIG_BYPASS_QMI
2381 #define FW_SHARED_MEM (2 * 1024 * 1024)
2382 
2383 /**
2384  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2385  * @scn: pointer to HIF structure
2386  *
2387  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2388  *
2389  * Return: void
2390  */
2391 static void hif_post_static_buf_to_target(struct hif_softc *scn)
2392 {
2393 	void *target_va;
2394 	phys_addr_t target_pa;
2395 
2396 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2397 				FW_SHARED_MEM, &target_pa);
2398 	if (NULL == target_va) {
2399 		HIF_TRACE("Memory allocation failed could not post target buf");
2400 		return;
2401 	}
2402 	hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2403 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
2404 }
2405 #else
2406 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2407 {
2408 }
2409 #endif
2410 
2411 #ifdef WLAN_SUSPEND_RESUME_TEST
2412 static void hif_fake_apps_init_ctx(struct hif_softc *scn)
2413 {
2414 	INIT_WORK(&scn->fake_apps_ctx.resume_work,
2415 		  hif_fake_apps_resume_work);
2416 }
2417 #else
2418 static inline void hif_fake_apps_init_ctx(struct hif_softc *scn) {}
2419 #endif
2420 
2421 /**
2422  * hif_config_ce() - configure copy engines
2423  * @scn: hif context
2424  *
2425  * Prepares fw, copy engine hardware and host sw according
2426  * to the attributes selected by hif_ce_prepare_config.
2427  *
2428  * also calls athdiag_procfs_init
2429  *
2430  * return: 0 for success nonzero for failure.
2431  */
2432 int hif_config_ce(struct hif_softc *scn)
2433 {
2434 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2435 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2436 	struct HIF_CE_pipe_info *pipe_info;
2437 	int pipe_num;
2438 	struct CE_state *ce_state;
2439 #ifdef ADRASTEA_SHADOW_REGISTERS
2440 	int i;
2441 #endif
2442 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
2443 
2444 	scn->notice_send = true;
2445 
2446 	hif_post_static_buf_to_target(scn);
2447 
2448 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
2449 
2450 	hif_config_rri_on_ddr(scn);
2451 
2452 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2453 		struct CE_attr *attr;
2454 
2455 		pipe_info = &hif_state->pipe_info[pipe_num];
2456 		pipe_info->pipe_num = pipe_num;
2457 		pipe_info->HIF_CE_state = hif_state;
2458 		attr = &hif_state->host_ce_config[pipe_num];
2459 
2460 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
2461 		ce_state = scn->ce_id_to_state[pipe_num];
2462 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
2463 		QDF_ASSERT(pipe_info->ce_hdl != NULL);
2464 		if (pipe_info->ce_hdl == NULL) {
2465 			rv = QDF_STATUS_E_FAILURE;
2466 			A_TARGET_ACCESS_UNLIKELY(scn);
2467 			goto err;
2468 		}
2469 
2470 		if (attr->flags & CE_ATTR_DIAG) {
2471 			/* Reserve the ultimate CE for
2472 			 * Diagnostic Window support
2473 			 */
2474 			hif_state->ce_diag = pipe_info->ce_hdl;
2475 			continue;
2476 		}
2477 
2478 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2479 				(ce_state->htt_rx_data))
2480 			continue;
2481 
2482 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
2483 		if (attr->dest_nentries > 0) {
2484 			atomic_set(&pipe_info->recv_bufs_needed,
2485 				   init_buffer_count(attr->dest_nentries - 1));
2486 			/*SRNG based CE has one entry less */
2487 			if (ce_srng_based(scn))
2488 				atomic_dec(&pipe_info->recv_bufs_needed);
2489 		} else {
2490 			atomic_set(&pipe_info->recv_bufs_needed, 0);
2491 		}
2492 		ce_tasklet_init(hif_state, (1 << pipe_num));
2493 		ce_register_irq(hif_state, (1 << pipe_num));
2494 	}
2495 
2496 	if (athdiag_procfs_init(scn) != 0) {
2497 		A_TARGET_ACCESS_UNLIKELY(scn);
2498 		goto err;
2499 	}
2500 	scn->athdiag_procfs_inited = true;
2501 
2502 	HIF_DBG("%s: ce_init done", __func__);
2503 
2504 	init_tasklet_workers(hif_hdl);
2505 	hif_fake_apps_init_ctx(scn);
2506 
2507 	HIF_DBG("%s: X, ret = %d", __func__, rv);
2508 
2509 #ifdef ADRASTEA_SHADOW_REGISTERS
2510 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
2511 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
2512 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
2513 			  __func__, i,
2514 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
2515 	}
2516 #endif
2517 
2518 	return rv != QDF_STATUS_SUCCESS;
2519 
2520 err:
2521 	/* Failure, so clean up */
2522 	hif_unconfig_ce(scn);
2523 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
2524 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
2525 }
2526 
2527 #ifdef WLAN_FEATURE_FASTPATH
2528 /**
2529  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
2530  * @handler: Callback funtcion
2531  * @context: handle for callback function
2532  *
2533  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
2534  */
2535 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
2536 				fastpath_msg_handler handler,
2537 				void *context)
2538 {
2539 	struct CE_state *ce_state;
2540 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2541 	int i;
2542 
2543 	if (!scn) {
2544 		HIF_ERROR("%s: scn is NULL", __func__);
2545 		QDF_ASSERT(0);
2546 		return QDF_STATUS_E_FAILURE;
2547 	}
2548 
2549 	if (!scn->fastpath_mode_on) {
2550 		HIF_WARN("%s: Fastpath mode disabled", __func__);
2551 		return QDF_STATUS_E_FAILURE;
2552 	}
2553 
2554 	for (i = 0; i < scn->ce_count; i++) {
2555 		ce_state = scn->ce_id_to_state[i];
2556 		if (ce_state->htt_rx_data) {
2557 			ce_state->fastpath_handler = handler;
2558 			ce_state->context = context;
2559 		}
2560 	}
2561 
2562 	return QDF_STATUS_SUCCESS;
2563 }
2564 #endif
2565 
2566 #ifdef IPA_OFFLOAD
2567 /**
2568  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
2569  * @scn: bus context
2570  * @ce_sr_base_paddr: copyengine source ring base physical address
2571  * @ce_sr_ring_size: copyengine source ring size
2572  * @ce_reg_paddr: copyengine register physical address
2573  *
2574  * IPA micro controller data path offload feature enabled,
2575  * HIF should release copy engine related resource information to IPA UC
2576  * IPA UC will access hardware resource with released information
2577  *
2578  * Return: None
2579  */
2580 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
2581 			     qdf_dma_addr_t *ce_sr_base_paddr,
2582 			     uint32_t *ce_sr_ring_size,
2583 			     qdf_dma_addr_t *ce_reg_paddr)
2584 {
2585 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2586 	struct HIF_CE_pipe_info *pipe_info =
2587 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
2588 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
2589 
2590 	ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
2591 			    ce_reg_paddr);
2592 }
2593 #endif /* IPA_OFFLOAD */
2594 
2595 
2596 #ifdef ADRASTEA_SHADOW_REGISTERS
2597 
2598 /*
2599  * Current shadow register config
2600  *
2601  * -----------------------------------------------------------
2602  * Shadow Register      |     CE   |    src/dst write index
2603  * -----------------------------------------------------------
2604  *         0            |     0    |           src
2605  *         1     No Config - Doesn't point to anything
2606  *         2     No Config - Doesn't point to anything
2607  *         3            |     3    |           src
2608  *         4            |     4    |           src
2609  *         5            |     5    |           src
2610  *         6     No Config - Doesn't point to anything
2611  *         7            |     7    |           src
2612  *         8     No Config - Doesn't point to anything
2613  *         9     No Config - Doesn't point to anything
2614  *         10    No Config - Doesn't point to anything
2615  *         11    No Config - Doesn't point to anything
2616  * -----------------------------------------------------------
2617  *         12    No Config - Doesn't point to anything
2618  *         13           |     1    |           dst
2619  *         14           |     2    |           dst
2620  *         15    No Config - Doesn't point to anything
2621  *         16    No Config - Doesn't point to anything
2622  *         17    No Config - Doesn't point to anything
2623  *         18    No Config - Doesn't point to anything
2624  *         19           |     7    |           dst
2625  *         20           |     8    |           dst
2626  *         21    No Config - Doesn't point to anything
2627  *         22    No Config - Doesn't point to anything
2628  *         23    No Config - Doesn't point to anything
2629  * -----------------------------------------------------------
2630  *
2631  *
2632  * ToDo - Move shadow register config to following in the future
2633  * This helps free up a block of shadow registers towards the end.
2634  * Can be used for other purposes
2635  *
2636  * -----------------------------------------------------------
2637  * Shadow Register      |     CE   |    src/dst write index
2638  * -----------------------------------------------------------
2639  *      0            |     0    |           src
2640  *      1            |     3    |           src
2641  *      2            |     4    |           src
2642  *      3            |     5    |           src
2643  *      4            |     7    |           src
2644  * -----------------------------------------------------------
2645  *      5            |     1    |           dst
2646  *      6            |     2    |           dst
2647  *      7            |     7    |           dst
2648  *      8            |     8    |           dst
2649  * -----------------------------------------------------------
2650  *      9     No Config - Doesn't point to anything
2651  *      12    No Config - Doesn't point to anything
2652  *      13    No Config - Doesn't point to anything
2653  *      14    No Config - Doesn't point to anything
2654  *      15    No Config - Doesn't point to anything
2655  *      16    No Config - Doesn't point to anything
2656  *      17    No Config - Doesn't point to anything
2657  *      18    No Config - Doesn't point to anything
2658  *      19    No Config - Doesn't point to anything
2659  *      20    No Config - Doesn't point to anything
2660  *      21    No Config - Doesn't point to anything
2661  *      22    No Config - Doesn't point to anything
2662  *      23    No Config - Doesn't point to anything
2663  * -----------------------------------------------------------
2664 */
2665 
2666 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
2667 {
2668 	u32 addr = 0;
2669 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
2670 
2671 	switch (ce) {
2672 	case 0:
2673 		addr = SHADOW_VALUE0;
2674 		break;
2675 	case 3:
2676 		addr = SHADOW_VALUE3;
2677 		break;
2678 	case 4:
2679 		addr = SHADOW_VALUE4;
2680 		break;
2681 	case 5:
2682 		addr = SHADOW_VALUE5;
2683 		break;
2684 	case 7:
2685 		addr = SHADOW_VALUE7;
2686 		break;
2687 	default:
2688 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
2689 		QDF_ASSERT(0);
2690 	}
2691 	return addr;
2692 
2693 }
2694 
2695 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
2696 {
2697 	u32 addr = 0;
2698 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
2699 
2700 	switch (ce) {
2701 	case 1:
2702 		addr = SHADOW_VALUE13;
2703 		break;
2704 	case 2:
2705 		addr = SHADOW_VALUE14;
2706 		break;
2707 	case 5:
2708 		addr = SHADOW_VALUE17;
2709 		break;
2710 	case 7:
2711 		addr = SHADOW_VALUE19;
2712 		break;
2713 	case 8:
2714 		addr = SHADOW_VALUE20;
2715 		break;
2716 	case 9:
2717 		addr = SHADOW_VALUE21;
2718 		break;
2719 	case 10:
2720 		addr = SHADOW_VALUE22;
2721 		break;
2722 	case 11:
2723 		addr = SHADOW_VALUE23;
2724 		break;
2725 	default:
2726 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
2727 		QDF_ASSERT(0);
2728 	}
2729 
2730 	return addr;
2731 
2732 }
2733 #endif
2734 
2735 #if defined(FEATURE_LRO)
2736 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
2737 {
2738 	struct CE_state *ce_state;
2739 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2740 
2741 	ce_state = scn->ce_id_to_state[ctx_id];
2742 
2743 	return ce_state->lro_data;
2744 }
2745 
2746 /**
2747  * ce_lro_flush_cb_register() - register the LRO flush
2748  * callback
2749  * @scn: HIF context
2750  * @handler: callback function
2751  * @data: opaque data pointer to be passed back
2752  *
2753  * Store the LRO flush callback provided
2754  *
2755  * Return: Number of instances the callback is registered for
2756  */
2757 int ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
2758 			     void (handler)(void *),
2759 			     void *(lro_init_handler)(void))
2760 {
2761 	int rc = 0;
2762 	int i;
2763 	struct CE_state *ce_state;
2764 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2765 	void *data = NULL;
2766 
2767 	QDF_ASSERT(scn != NULL);
2768 
2769 	if (scn != NULL) {
2770 		for (i = 0; i < scn->ce_count; i++) {
2771 			ce_state = scn->ce_id_to_state[i];
2772 			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
2773 				data = lro_init_handler();
2774 				if (data == NULL) {
2775 					HIF_ERROR("%s: Failed to init LRO for CE %d",
2776 						  __func__, i);
2777 					continue;
2778 				}
2779 				ce_state->lro_flush_cb = handler;
2780 				ce_state->lro_data = data;
2781 				rc++;
2782 			}
2783 		}
2784 	} else {
2785 		HIF_ERROR("%s: hif_state NULL!", __func__);
2786 	}
2787 	return rc;
2788 }
2789 
2790 /**
2791  * ce_lro_flush_cb_deregister() - deregister the LRO flush
2792  * callback
2793  * @scn: HIF context
2794  *
2795  * Remove the LRO flush callback
2796  *
2797  * Return: Number of instances the callback is de-registered
2798  */
2799 int ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
2800 			       void (lro_deinit_cb)(void *))
2801 {
2802 	int rc = 0;
2803 	int i;
2804 	struct CE_state *ce_state;
2805 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2806 
2807 	QDF_ASSERT(scn != NULL);
2808 	if (scn != NULL) {
2809 		for (i = 0; i < scn->ce_count; i++) {
2810 			ce_state = scn->ce_id_to_state[i];
2811 			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
2812 				qdf_spin_lock_bh(
2813 					&ce_state->lro_unloading_lock);
2814 				ce_state->lro_flush_cb = NULL;
2815 				lro_deinit_cb(ce_state->lro_data);
2816 				ce_state->lro_data = NULL;
2817 				qdf_spin_unlock_bh(
2818 					&ce_state->lro_unloading_lock);
2819 				qdf_spinlock_destroy(
2820 					&ce_state->lro_unloading_lock);
2821 				rc++;
2822 			}
2823 		}
2824 	} else {
2825 		HIF_ERROR("%s: hif_state NULL!", __func__);
2826 	}
2827 	return rc;
2828 }
2829 #endif
2830 
2831 /**
2832  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
2833  * this service
2834  * @scn: hif_softc pointer.
2835  * @svc_id: Service ID for which the mapping is needed.
2836  * @ul_pipe: address of the container in which ul pipe is returned.
2837  * @dl_pipe: address of the container in which dl pipe is returned.
2838  * @ul_is_polled: address of the container in which a bool
2839  *			indicating if the UL CE for this service
2840  *			is polled is returned.
2841  * @dl_is_polled: address of the container in which a bool
2842  *			indicating if the DL CE for this service
2843  *			is polled is returned.
2844  *
2845  * Return: Indicates whether the service has been found in the table.
2846  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
2847  *         There will be warning logs if either leg has not been updated
2848  *         because it missed the entry in the table (but this is not an err).
2849  */
2850 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
2851 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
2852 			int *dl_is_polled)
2853 {
2854 	int status = QDF_STATUS_E_INVAL;
2855 	unsigned int i;
2856 	struct service_to_pipe element;
2857 	struct service_to_pipe *tgt_svc_map_to_use;
2858 	uint32_t sz_tgt_svc_map_to_use;
2859 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2860 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2861 	bool dl_updated = false;
2862 	bool ul_updated = false;
2863 
2864 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
2865 				       &sz_tgt_svc_map_to_use);
2866 
2867 	*dl_is_polled = 0;  /* polling for received messages not supported */
2868 
2869 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
2870 
2871 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
2872 		if (element.service_id == svc_id) {
2873 			if (element.pipedir == PIPEDIR_OUT) {
2874 				*ul_pipe = element.pipenum;
2875 				*ul_is_polled =
2876 					(hif_state->host_ce_config[*ul_pipe].flags &
2877 					 CE_ATTR_DISABLE_INTR) != 0;
2878 				ul_updated = true;
2879 			} else if (element.pipedir == PIPEDIR_IN) {
2880 				*dl_pipe = element.pipenum;
2881 				dl_updated = true;
2882 			}
2883 			status = QDF_STATUS_SUCCESS;
2884 		}
2885 	}
2886 	if (ul_updated == false)
2887 		HIF_INFO("%s: ul pipe is NOT updated for service %d",
2888 			 __func__, svc_id);
2889 	if (dl_updated == false)
2890 		HIF_INFO("%s: dl pipe is NOT updated for service %d",
2891 			 __func__, svc_id);
2892 
2893 	return status;
2894 }
2895 
2896 #ifdef SHADOW_REG_DEBUG
2897 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
2898 		uint32_t CE_ctrl_addr)
2899 {
2900 	uint32_t read_from_hw, srri_from_ddr = 0;
2901 
2902 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
2903 
2904 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2905 
2906 	if (read_from_hw != srri_from_ddr) {
2907 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2908 		       __func__, srri_from_ddr, read_from_hw,
2909 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2910 		QDF_ASSERT(0);
2911 	}
2912 	return srri_from_ddr;
2913 }
2914 
2915 
2916 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
2917 		uint32_t CE_ctrl_addr)
2918 {
2919 	uint32_t read_from_hw, drri_from_ddr = 0;
2920 
2921 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
2922 
2923 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
2924 
2925 	if (read_from_hw != drri_from_ddr) {
2926 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
2927 		       drri_from_ddr, read_from_hw,
2928 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
2929 		QDF_ASSERT(0);
2930 	}
2931 	return drri_from_ddr;
2932 }
2933 
2934 #endif
2935 
2936 #ifdef ADRASTEA_RRI_ON_DDR
2937 /**
2938  * hif_get_src_ring_read_index(): Called to get the SRRI
2939  *
2940  * @scn: hif_softc pointer
2941  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2942  *
2943  * This function returns the SRRI to the caller. For CEs that
2944  * dont have interrupts enabled, we look at the DDR based SRRI
2945  *
2946  * Return: SRRI
2947  */
2948 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2949 		uint32_t CE_ctrl_addr)
2950 {
2951 	struct CE_attr attr;
2952 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2953 
2954 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2955 	if (attr.flags & CE_ATTR_DISABLE_INTR)
2956 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2957 	else
2958 		return A_TARGET_READ(scn,
2959 				(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2960 }
2961 
2962 /**
2963  * hif_get_dst_ring_read_index(): Called to get the DRRI
2964  *
2965  * @scn: hif_softc pointer
2966  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2967  *
2968  * This function returns the DRRI to the caller. For CEs that
2969  * dont have interrupts enabled, we look at the DDR based DRRI
2970  *
2971  * Return: DRRI
2972  */
2973 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2974 		uint32_t CE_ctrl_addr)
2975 {
2976 	struct CE_attr attr;
2977 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2978 
2979 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2980 
2981 	if (attr.flags & CE_ATTR_DISABLE_INTR)
2982 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2983 	else
2984 		return A_TARGET_READ(scn,
2985 				(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2986 }
2987 
2988 /**
2989  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2990  *
2991  * @scn: hif_softc pointer
2992  *
2993  * This function allocates non cached memory on ddr and sends
2994  * the physical address of this memory to the CE hardware. The
2995  * hardware updates the RRI on this particular location.
2996  *
2997  * Return: None
2998  */
2999 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3000 {
3001 	unsigned int i;
3002 	qdf_dma_addr_t paddr_rri_on_ddr;
3003 	uint32_t high_paddr, low_paddr;
3004 
3005 	scn->vaddr_rri_on_ddr =
3006 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3007 		scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3008 		&paddr_rri_on_ddr);
3009 
3010 	low_paddr  = BITS0_TO_31(paddr_rri_on_ddr);
3011 	high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3012 
3013 	HIF_DBG("%s using srri and drri from DDR", __func__);
3014 
3015 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3016 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3017 
3018 	for (i = 0; i < CE_COUNT; i++)
3019 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3020 
3021 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
3022 
3023 }
3024 #else
3025 
3026 /**
3027  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3028  *
3029  * @scn: hif_softc pointer
3030  *
3031  * This is a dummy implementation for platforms that don't
3032  * support this functionality.
3033  *
3034  * Return: None
3035  */
3036 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3037 {
3038 }
3039 #endif
3040 
3041 /**
3042  * hif_dump_ce_registers() - dump ce registers
3043  * @scn: hif_opaque_softc pointer.
3044  *
3045  * Output the copy engine registers
3046  *
3047  * Return: 0 for success or error code
3048  */
3049 int hif_dump_ce_registers(struct hif_softc *scn)
3050 {
3051 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3052 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3053 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3054 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3055 	uint16_t i;
3056 	QDF_STATUS status;
3057 
3058 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3059 		if (scn->ce_id_to_state[i] == NULL) {
3060 			HIF_DBG("CE%d not used.", i);
3061 			continue;
3062 		}
3063 
3064 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3065 					   (uint8_t *) &ce_reg_values[0],
3066 					   ce_reg_word_size * sizeof(uint32_t));
3067 
3068 		if (status != QDF_STATUS_SUCCESS) {
3069 			HIF_ERROR("Dumping CE register failed!");
3070 			return -EACCES;
3071 		}
3072 		HIF_ERROR("CE%d=>\n", i);
3073 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3074 				   (uint8_t *) &ce_reg_values[0],
3075 				   ce_reg_word_size * sizeof(uint32_t));
3076 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d\n", (ce_reg_address
3077 				+ SR_WR_INDEX_ADDRESS),
3078 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3079 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d\n", (ce_reg_address
3080 				+ CURRENT_SRRI_ADDRESS),
3081 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3082 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d\n", (ce_reg_address
3083 				+ DST_WR_INDEX_ADDRESS),
3084 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3085 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d\n", (ce_reg_address
3086 				+ CURRENT_DRRI_ADDRESS),
3087 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3088 		qdf_print("---\n");
3089 	}
3090 	return 0;
3091 }
3092 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3093 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3094 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3095 {
3096 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3097 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3098 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3099 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3100 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3101 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3102 	struct CE_ring_state *src_ring = ce_state->src_ring;
3103 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3104 
3105 	if (src_ring) {
3106 		hif_info->ul_pipe.nentries = src_ring->nentries;
3107 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3108 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3109 		hif_info->ul_pipe.write_index = src_ring->write_index;
3110 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3111 		hif_info->ul_pipe.base_addr_CE_space =
3112 			src_ring->base_addr_CE_space;
3113 		hif_info->ul_pipe.base_addr_owner_space =
3114 			src_ring->base_addr_owner_space;
3115 	}
3116 
3117 
3118 	if (dest_ring) {
3119 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3120 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3121 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3122 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3123 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3124 		hif_info->dl_pipe.base_addr_CE_space =
3125 			dest_ring->base_addr_CE_space;
3126 		hif_info->dl_pipe.base_addr_owner_space =
3127 			dest_ring->base_addr_owner_space;
3128 	}
3129 
3130 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3131 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3132 
3133 	return hif_info;
3134 }
3135 
3136 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3137 {
3138 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3139 
3140 	scn->nss_wifi_ol_mode = mode;
3141 	return 0;
3142 }
3143 
3144 #endif
3145 
3146 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3147 {
3148 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3149 	scn->hif_attribute = hif_attrib;
3150 }
3151 
3152 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3153 {
3154 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3155 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3156 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3157 
3158 	Q_TARGET_ACCESS_BEGIN(scn);
3159 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3160 	Q_TARGET_ACCESS_END(scn);
3161 }
3162 
3163 /**
3164  * hif_fw_event_handler() - hif fw event handler
3165  * @hif_state: pointer to hif ce state structure
3166  *
3167  * Process fw events and raise HTC callback to process fw events.
3168  *
3169  * Return: none
3170  */
3171 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3172 {
3173 	struct hif_msg_callbacks *msg_callbacks =
3174 		&hif_state->msg_callbacks_current;
3175 
3176 	if (!msg_callbacks->fwEventHandler)
3177 		return;
3178 
3179 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3180 			QDF_STATUS_E_FAILURE);
3181 }
3182 
3183 #ifndef QCA_WIFI_3_0
3184 /**
3185  * hif_fw_interrupt_handler() - FW interrupt handler
3186  * @irq: irq number
3187  * @arg: the user pointer
3188  *
3189  * Called from the PCI interrupt handler when a
3190  * firmware-generated interrupt to the Host.
3191  *
3192  * Return: status of handled irq
3193  */
3194 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3195 {
3196 	struct hif_softc *scn = arg;
3197 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3198 	uint32_t fw_indicator_address, fw_indicator;
3199 
3200 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3201 		return ATH_ISR_NOSCHED;
3202 
3203 	fw_indicator_address = hif_state->fw_indicator_address;
3204 	/* For sudden unplug this will return ~0 */
3205 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3206 
3207 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3208 		/* ACK: clear Target-side pending event */
3209 		A_TARGET_WRITE(scn, fw_indicator_address,
3210 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3211 		if (Q_TARGET_ACCESS_END(scn) < 0)
3212 			return ATH_ISR_SCHED;
3213 
3214 		if (hif_state->started) {
3215 			hif_fw_event_handler(hif_state);
3216 		} else {
3217 			/*
3218 			 * Probable Target failure before we're prepared
3219 			 * to handle it.  Generally unexpected.
3220 			 */
3221 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3222 				("%s: Early firmware event indicated\n",
3223 				 __func__));
3224 		}
3225 	} else {
3226 		if (Q_TARGET_ACCESS_END(scn) < 0)
3227 			return ATH_ISR_SCHED;
3228 	}
3229 
3230 	return ATH_ISR_SCHED;
3231 }
3232 #else
3233 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3234 {
3235 	return ATH_ISR_SCHED;
3236 }
3237 #endif /* #ifdef QCA_WIFI_3_0 */
3238 
3239 
3240 /**
3241  * hif_wlan_disable(): call the platform driver to disable wlan
3242  * @scn: HIF Context
3243  *
3244  * This function passes the con_mode to platform driver to disable
3245  * wlan.
3246  *
3247  * Return: void
3248  */
3249 void hif_wlan_disable(struct hif_softc *scn)
3250 {
3251 	enum pld_driver_mode mode;
3252 	uint32_t con_mode = hif_get_conparam(scn);
3253 
3254 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3255 		mode = PLD_FTM;
3256 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3257 		mode = PLD_EPPING;
3258 	else
3259 		mode = PLD_MISSION;
3260 
3261 	pld_wlan_disable(scn->qdf_dev->dev, mode);
3262 }
3263 
3264 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3265 {
3266 	QDF_STATUS status;
3267 	uint8_t ul_pipe, dl_pipe;
3268 	int ul_is_polled, dl_is_polled;
3269 
3270 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3271 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3272 					 HTC_CTRL_RSVD_SVC,
3273 					 &ul_pipe, &dl_pipe,
3274 					 &ul_is_polled, &dl_is_polled);
3275 	if (status) {
3276 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3277 		return qdf_status_to_os_return(status);
3278 	}
3279 
3280 	*ce_id = dl_pipe;
3281 
3282 	return 0;
3283 }
3284