xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision 87a8e4458319c60b618522e263ed900e36aab528)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #ifndef CONFIG_WIN
41 #include "qwlan_version.h"
42 #endif
43 #include "qdf_module.h"
44 
45 #define CE_POLL_TIMEOUT 10      /* ms */
46 
47 #define AGC_DUMP         1
48 #define CHANINFO_DUMP    2
49 #define BB_WATCHDOG_DUMP 3
50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51 #define PCIE_ACCESS_DUMP 4
52 #endif
53 #include "mp_dev.h"
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
56 	!defined(QCA_WIFI_SUPPORT_SRNG)
57 #define QCA_WIFI_SUPPORT_SRNG
58 #endif
59 
60 /* Forward references */
61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
62 
63 /*
64  * Fix EV118783, poll to check whether a BMI response comes
65  * other than waiting for the interruption which may be lost.
66  */
67 /* #define BMI_RSP_POLLING */
68 #define BMI_RSP_TO_MILLISEC  1000
69 
70 #ifdef CONFIG_BYPASS_QMI
71 #define BYPASS_QMI 1
72 #else
73 #define BYPASS_QMI 0
74 #endif
75 
76 #ifdef CONFIG_WIN
77 #if ENABLE_10_4_FW_HDR
78 #define WDI_IPA_SERVICE_GROUP 5
79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
82 #endif /* ENABLE_10_4_FW_HDR */
83 #endif
84 
85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
86 static void hif_config_rri_on_ddr(struct hif_softc *scn);
87 
88 /**
89  * hif_target_access_log_dump() - dump access log
90  *
91  * dump access log
92  *
93  * Return: n/a
94  */
95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96 static void hif_target_access_log_dump(void)
97 {
98 	hif_target_dump_access_log();
99 }
100 #endif
101 
102 
103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 		      uint8_t cmd_id, bool start)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	switch (cmd_id) {
109 	case AGC_DUMP:
110 		if (start)
111 			priv_start_agc(scn);
112 		else
113 			priv_dump_agc(scn);
114 		break;
115 	case CHANINFO_DUMP:
116 		if (start)
117 			priv_start_cap_chaninfo(scn);
118 		else
119 			priv_dump_chaninfo(scn);
120 		break;
121 	case BB_WATCHDOG_DUMP:
122 		priv_dump_bbwatchdog(scn);
123 		break;
124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 	case PCIE_ACCESS_DUMP:
126 		hif_target_access_log_dump();
127 		break;
128 #endif
129 	default:
130 		HIF_ERROR("%s: Invalid htc dump command", __func__);
131 		break;
132 	}
133 }
134 
135 static void ce_poll_timeout(void *arg)
136 {
137 	struct CE_state *CE_state = (struct CE_state *)arg;
138 
139 	if (CE_state->timer_inited) {
140 		ce_per_engine_service(CE_state->scn, CE_state->id);
141 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
142 	}
143 }
144 
145 static unsigned int roundup_pwr2(unsigned int n)
146 {
147 	int i;
148 	unsigned int test_pwr2;
149 
150 	if (!(n & (n - 1)))
151 		return n; /* already a power of 2 */
152 
153 	test_pwr2 = 4;
154 	for (i = 0; i < 29; i++) {
155 		if (test_pwr2 > n)
156 			return test_pwr2;
157 		test_pwr2 = test_pwr2 << 1;
158 	}
159 
160 	QDF_ASSERT(0); /* n too large */
161 	return 0;
162 }
163 
164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166 
167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
177 #ifdef QCA_WIFI_3_0_ADRASTEA
178 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
181 #endif
182 };
183 
184 #ifdef QCN7605_SUPPORT
185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194 };
195 #endif
196 
197 #ifdef WLAN_FEATURE_EPPING
198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208 };
209 #endif
210 
211 /* CE_PCI TABLE */
212 /*
213  * NOTE: the table below is out of date, though still a useful reference.
214  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215  * mapping of HTC services to HIF pipes.
216  */
217 /*
218  * This authoritative table defines Copy Engine configuration and the mapping
219  * of services/endpoints to CEs.  A subset of this information is passed to
220  * the Target during startup as a prerequisite to entering BMI phase.
221  * See:
222  *    target_service_to_ce_map - Target-side mapping
223  *    hif_map_service_to_pipe      - Host-side mapping
224  *    target_ce_config         - Target-side configuration
225  *    host_ce_config           - Host-side configuration
226    ============================================================================
227    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
228  |                      |      | ctio | Size     | Frequency
229  |                      |      | n    |          |
230    ============================================================================
231    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
232    descriptor |                      |      |      | O(100B)  | and regular
233    download   |                      |      |      |          |
234    ----------------------------------------------------------------------------
235    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
236    indication |                      |      |      | O(10B)   | regular
237    upload     |                      |      |      |          |
238    ----------------------------------------------------------------------------
239    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
240    upload     |                      |      |      | O(1000B) | (frequent
241    e.g. noise |                      |      |      |          | during IP1.0
242    packets    |                      |      |      |          | testing)
243    ----------------------------------------------------------------------------
244    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
245    download   |                      |      |      | O(1000B) | (frequent
246    e.g.       |                      |      |      |          | during IP1.0
247    misdirecte |                      |      |      |          | testing)
248    d EAPOL    |                      |      |      |          |
249    packets    |                      |      |      |          |
250    ----------------------------------------------------------------------------
251    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
252  | DATA_VO (uplink)     |      |      |          |
253    ----------------------------------------------------------------------------
254    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
255  | DATA_VO (downlink)   |      |      |          |
256    ----------------------------------------------------------------------------
257    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258  |                      |      |      | O(100B)  |
259    ----------------------------------------------------------------------------
260    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
261    messages   | (downlink)           |      |      | O(100B)  |
262  |                      |      |      |          |
263    ----------------------------------------------------------------------------
264    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
265  | HTC_RAW_STREAMS      |      |      |          |
266  | (uplink)             |      |      |          |
267    ----------------------------------------------------------------------------
268    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
269  | HTC_RAW_STREAMS      |      |      |          |
270  | (downlink)           |      |      |          |
271    ----------------------------------------------------------------------------
272    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
273  |                      |      |      |          | infrequent
274    ============================================================================
275  */
276 
277 /*
278  * Map from service/endpoint to Copy Engine.
279  * This table is derived from the CE_PCI TABLE, above.
280  * It is passed to the Target at startup for use by firmware.
281  */
282 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 	{
284 		WMI_DATA_VO_SVC,
285 		PIPEDIR_OUT,    /* out = UL = host -> target */
286 		3,
287 	},
288 	{
289 		WMI_DATA_VO_SVC,
290 		PIPEDIR_IN,     /* in = DL = target -> host */
291 		2,
292 	},
293 	{
294 		WMI_DATA_BK_SVC,
295 		PIPEDIR_OUT,    /* out = UL = host -> target */
296 		3,
297 	},
298 	{
299 		WMI_DATA_BK_SVC,
300 		PIPEDIR_IN,     /* in = DL = target -> host */
301 		2,
302 	},
303 	{
304 		WMI_DATA_BE_SVC,
305 		PIPEDIR_OUT,    /* out = UL = host -> target */
306 		3,
307 	},
308 	{
309 		WMI_DATA_BE_SVC,
310 		PIPEDIR_IN,     /* in = DL = target -> host */
311 		2,
312 	},
313 	{
314 		WMI_DATA_VI_SVC,
315 		PIPEDIR_OUT,    /* out = UL = host -> target */
316 		3,
317 	},
318 	{
319 		WMI_DATA_VI_SVC,
320 		PIPEDIR_IN,     /* in = DL = target -> host */
321 		2,
322 	},
323 	{
324 		WMI_CONTROL_SVC,
325 		PIPEDIR_OUT,    /* out = UL = host -> target */
326 		3,
327 	},
328 	{
329 		WMI_CONTROL_SVC,
330 		PIPEDIR_IN,     /* in = DL = target -> host */
331 		2,
332 	},
333 	{
334 		HTC_CTRL_RSVD_SVC,
335 		PIPEDIR_OUT,    /* out = UL = host -> target */
336 		0,              /* could be moved to 3 (share with WMI) */
337 	},
338 	{
339 		HTC_CTRL_RSVD_SVC,
340 		PIPEDIR_IN,     /* in = DL = target -> host */
341 		2,
342 	},
343 	{
344 		HTC_RAW_STREAMS_SVC, /* not currently used */
345 		PIPEDIR_OUT,    /* out = UL = host -> target */
346 		0,
347 	},
348 	{
349 		HTC_RAW_STREAMS_SVC, /* not currently used */
350 		PIPEDIR_IN,     /* in = DL = target -> host */
351 		2,
352 	},
353 	{
354 		HTT_DATA_MSG_SVC,
355 		PIPEDIR_OUT,    /* out = UL = host -> target */
356 		4,
357 	},
358 	{
359 		HTT_DATA_MSG_SVC,
360 		PIPEDIR_IN,     /* in = DL = target -> host */
361 		1,
362 	},
363 	{
364 		WDI_IPA_TX_SVC,
365 		PIPEDIR_OUT,    /* in = DL = target -> host */
366 		5,
367 	},
368 #if defined(QCA_WIFI_3_0_ADRASTEA)
369 	{
370 		HTT_DATA2_MSG_SVC,
371 		PIPEDIR_IN,    /* in = DL = target -> host */
372 		9,
373 	},
374 	{
375 		HTT_DATA3_MSG_SVC,
376 		PIPEDIR_IN,    /* in = DL = target -> host */
377 		10,
378 	},
379 	{
380 		PACKET_LOG_SVC,
381 		PIPEDIR_IN,    /* in = DL = target -> host */
382 		11,
383 	},
384 #endif
385 	/* (Additions here) */
386 
387 	{                       /* Must be last */
388 		0,
389 		0,
390 		0,
391 	},
392 };
393 
394 /* PIPEDIR_OUT = HOST to Target */
395 /* PIPEDIR_IN  = TARGET to HOST */
396 #if (defined(QCA_WIFI_QCA8074))
397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
416 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
417 	/* (Additions here) */
418 	{ 0, 0, 0, },
419 };
420 #else
421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422 };
423 #endif
424 
425 /* PIPEDIR_OUT = HOST to Target */
426 /* PIPEDIR_IN  = TARGET to HOST */
427 #ifdef QCN7605_SUPPORT
428 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
429 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
430 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
431 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
432 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
433 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
434 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
435 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
436 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
437 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
438 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
439 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
440 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
441 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
442 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
443 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
444 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
445 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
446 #ifdef IPA_OFFLOAD
447 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
448 #else
449 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
450 #endif
451 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
452 	/* (Additions here) */
453 	{ 0, 0, 0, },
454 };
455 #endif
456 
457 #if (defined(QCA_WIFI_QCA6290))
458 #ifdef CONFIG_WIN
459 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
460 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
461 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
462 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
463 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
464 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
465 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
466 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
467 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
468 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
469 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
470 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
472 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
473 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
474 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
475 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
476 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
477 	/* (Additions here) */
478 	{ 0, 0, 0, },
479 };
480 #else
481 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
482 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
483 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
484 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
485 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
486 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
487 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
488 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
489 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
490 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
491 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
492 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
493 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
494 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
495 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
496 	/* (Additions here) */
497 	{ 0, 0, 0, },
498 };
499 #endif
500 #else
501 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
502 };
503 #endif
504 
505 #if (defined(QCA_WIFI_QCA6390))
506 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
507 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
508 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
509 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
510 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
511 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
512 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
513 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
514 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
515 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
516 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
517 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
518 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
519 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
520 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
521 	/* (Additions here) */
522 	{ 0, 0, 0, },
523 };
524 #else
525 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
526 };
527 #endif
528 
529 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
530 	{
531 		WMI_DATA_VO_SVC,
532 		PIPEDIR_OUT,    /* out = UL = host -> target */
533 		3,
534 	},
535 	{
536 		WMI_DATA_VO_SVC,
537 		PIPEDIR_IN,     /* in = DL = target -> host */
538 		2,
539 	},
540 	{
541 		WMI_DATA_BK_SVC,
542 		PIPEDIR_OUT,    /* out = UL = host -> target */
543 		3,
544 	},
545 	{
546 		WMI_DATA_BK_SVC,
547 		PIPEDIR_IN,     /* in = DL = target -> host */
548 		2,
549 	},
550 	{
551 		WMI_DATA_BE_SVC,
552 		PIPEDIR_OUT,    /* out = UL = host -> target */
553 		3,
554 	},
555 	{
556 		WMI_DATA_BE_SVC,
557 		PIPEDIR_IN,     /* in = DL = target -> host */
558 		2,
559 	},
560 	{
561 		WMI_DATA_VI_SVC,
562 		PIPEDIR_OUT,    /* out = UL = host -> target */
563 		3,
564 	},
565 	{
566 		WMI_DATA_VI_SVC,
567 		PIPEDIR_IN,     /* in = DL = target -> host */
568 		2,
569 	},
570 	{
571 		WMI_CONTROL_SVC,
572 		PIPEDIR_OUT,    /* out = UL = host -> target */
573 		3,
574 	},
575 	{
576 		WMI_CONTROL_SVC,
577 		PIPEDIR_IN,     /* in = DL = target -> host */
578 		2,
579 	},
580 	{
581 		HTC_CTRL_RSVD_SVC,
582 		PIPEDIR_OUT,    /* out = UL = host -> target */
583 		0,              /* could be moved to 3 (share with WMI) */
584 	},
585 	{
586 		HTC_CTRL_RSVD_SVC,
587 		PIPEDIR_IN,     /* in = DL = target -> host */
588 		1,
589 	},
590 	{
591 		HTC_RAW_STREAMS_SVC, /* not currently used */
592 		PIPEDIR_OUT,    /* out = UL = host -> target */
593 		0,
594 	},
595 	{
596 		HTC_RAW_STREAMS_SVC, /* not currently used */
597 		PIPEDIR_IN,     /* in = DL = target -> host */
598 		1,
599 	},
600 	{
601 		HTT_DATA_MSG_SVC,
602 		PIPEDIR_OUT,    /* out = UL = host -> target */
603 		4,
604 	},
605 #ifdef WLAN_FEATURE_FASTPATH
606 	{
607 		HTT_DATA_MSG_SVC,
608 		PIPEDIR_IN,     /* in = DL = target -> host */
609 		5,
610 	},
611 #else /* WLAN_FEATURE_FASTPATH */
612 	{
613 		HTT_DATA_MSG_SVC,
614 		PIPEDIR_IN,  /* in = DL = target -> host */
615 		1,
616 	},
617 #endif /* WLAN_FEATURE_FASTPATH */
618 
619 	/* (Additions here) */
620 
621 	{                       /* Must be last */
622 		0,
623 		0,
624 		0,
625 	},
626 };
627 
628 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
629 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
630 
631 #ifdef WLAN_FEATURE_EPPING
632 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
633 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
634 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
635 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
636 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
637 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
638 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
639 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
640 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
641 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
642 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
643 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
644 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
645 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
646 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
647 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
648 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
649 	{0, 0, 0,},             /* Must be last */
650 };
651 
652 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
653 					   **tgt_svc_map_to_use,
654 					   uint32_t *sz_tgt_svc_map_to_use)
655 {
656 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
657 	*sz_tgt_svc_map_to_use =
658 			sizeof(target_service_to_ce_map_wlan_epping);
659 }
660 #endif
661 
662 #ifdef QCN7605_SUPPORT
663 static inline
664 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
665 			       uint32_t *sz_tgt_svc_map_to_use)
666 {
667 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
668 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
669 }
670 #else
671 static inline
672 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
673 			       uint32_t *sz_tgt_svc_map_to_use)
674 {
675 	HIF_ERROR("%s: QCN7605 not supported", __func__);
676 }
677 #endif
678 
679 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
680 				    struct service_to_pipe **tgt_svc_map_to_use,
681 				    uint32_t *sz_tgt_svc_map_to_use)
682 {
683 	uint32_t mode = hif_get_conparam(scn);
684 	struct hif_target_info *tgt_info = &scn->target_info;
685 
686 	if (QDF_IS_EPPING_ENABLED(mode)) {
687 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
688 						      sz_tgt_svc_map_to_use);
689 	} else {
690 		switch (tgt_info->target_type) {
691 		default:
692 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
693 			*sz_tgt_svc_map_to_use =
694 				sizeof(target_service_to_ce_map_wlan);
695 			break;
696 		case TARGET_TYPE_QCN7605:
697 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
698 						  sz_tgt_svc_map_to_use);
699 			break;
700 		case TARGET_TYPE_AR900B:
701 		case TARGET_TYPE_QCA9984:
702 		case TARGET_TYPE_IPQ4019:
703 		case TARGET_TYPE_QCA9888:
704 		case TARGET_TYPE_AR9888:
705 		case TARGET_TYPE_AR9888V2:
706 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
707 			*sz_tgt_svc_map_to_use =
708 				sizeof(target_service_to_ce_map_ar900b);
709 			break;
710 		case TARGET_TYPE_QCA6290:
711 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
712 			*sz_tgt_svc_map_to_use =
713 				sizeof(target_service_to_ce_map_qca6290);
714 			break;
715 		case TARGET_TYPE_QCA6390:
716 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
717 			*sz_tgt_svc_map_to_use =
718 				sizeof(target_service_to_ce_map_qca6390);
719 			break;
720 		case TARGET_TYPE_QCA8074:
721 		case TARGET_TYPE_QCA8074V2:
722 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
723 			*sz_tgt_svc_map_to_use =
724 				sizeof(target_service_to_ce_map_qca8074);
725 			break;
726 		}
727 	}
728 }
729 
730 /**
731  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
732  * @ce_state : pointer to the state context of the CE
733  *
734  * Description:
735  *   Sets htt_rx_data attribute of the state structure if the
736  *   CE serves one of the HTT DATA services.
737  *
738  * Return:
739  *  false (attribute set to false)
740  *  true  (attribute set to true);
741  */
742 static bool ce_mark_datapath(struct CE_state *ce_state)
743 {
744 	struct service_to_pipe *svc_map;
745 	uint32_t map_sz, map_len;
746 	int    i;
747 	bool   rc = false;
748 
749 	if (ce_state != NULL) {
750 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
751 					       &map_sz);
752 
753 		map_len = map_sz / sizeof(struct service_to_pipe);
754 		for (i = 0; i < map_len; i++) {
755 			if ((svc_map[i].pipenum == ce_state->id) &&
756 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
757 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
758 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
759 				/* HTT CEs are unidirectional */
760 				if (svc_map[i].pipedir == PIPEDIR_IN)
761 					ce_state->htt_rx_data = true;
762 				else
763 					ce_state->htt_tx_data = true;
764 				rc = true;
765 			}
766 		}
767 	}
768 	return rc;
769 }
770 
771 /**
772  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
773  * @ce_id: ce in question
774  * @ring: ring state being examined
775  * @type: "src_ring" or "dest_ring" string for identifying the ring
776  *
777  * Warns on non-zero index values.
778  * Causes a kernel panic if the ring is not empty durring initialization.
779  */
780 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
781 					 char *type)
782 {
783 	if (ring->write_index != 0 || ring->sw_index != 0)
784 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
785 			  ce_id, type, ring->sw_index, ring->write_index);
786 	if (ring->write_index != ring->sw_index)
787 		QDF_BUG(0);
788 }
789 
790 #ifdef IPA_OFFLOAD
791 /**
792  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
793  * @scn: softc instance
794  * @ce_id: ce in question
795  * @base_addr: pointer to copyengine ring base address
796  * @ce_ring: copyengine instance
797  * @nentries: number of entries should be allocated
798  * @desc_size: ce desc size
799  *
800  * Return: QDF_STATUS_SUCCESS - for success
801  */
802 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
803 				     qdf_dma_addr_t *base_addr,
804 				     struct CE_ring_state *ce_ring,
805 				     unsigned int nentries, uint32_t desc_size)
806 {
807 	if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
808 		scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(scn->qdf_dev,
809 			nentries * desc_size + CE_DESC_RING_ALIGN);
810 		if (!scn->ipa_ce_ring) {
811 			HIF_ERROR("%s: Failed to allocate memory for IPA ce ring",
812 				  __func__);
813 			return QDF_STATUS_E_NOMEM;
814 		}
815 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
816 						&scn->ipa_ce_ring->mem_info);
817 		ce_ring->base_addr_owner_space_unaligned =
818 						scn->ipa_ce_ring->vaddr;
819 	} else {
820 		ce_ring->base_addr_owner_space_unaligned =
821 			qdf_mem_alloc_consistent(scn->qdf_dev,
822 						 scn->qdf_dev->dev,
823 						 (nentries * desc_size +
824 						 CE_DESC_RING_ALIGN),
825 						 base_addr);
826 		if (!ce_ring->base_addr_owner_space_unaligned) {
827 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
828 				  __func__, CE_id);
829 			return QDF_STATUS_E_NOMEM;
830 		}
831 	}
832 	return QDF_STATUS_SUCCESS;
833 }
834 
835 /**
836  * ce_free_desc_ring() - Frees copyengine descriptor ring
837  * @scn: softc instance
838  * @ce_id: ce in question
839  * @ce_ring: copyengine instance
840  * @desc_size: ce desc size
841  *
842  * Return: None
843  */
844 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
845 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
846 {
847 	if (CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) {
848 		qdf_mem_shared_mem_free(scn->qdf_dev,
849 					scn->ipa_ce_ring);
850 		ce_ring->base_addr_owner_space_unaligned = NULL;
851 	} else {
852 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
853 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
854 			ce_ring->base_addr_owner_space_unaligned,
855 			ce_ring->base_addr_CE_space, 0);
856 		ce_ring->base_addr_owner_space_unaligned = NULL;
857 	}
858 }
859 #else
860 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
861 				     qdf_dma_addr_t *base_addr,
862 				     struct CE_ring_state *ce_ring,
863 				     unsigned int nentries, uint32_t desc_size)
864 {
865 	ce_ring->base_addr_owner_space_unaligned =
866 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
867 					 (nentries * desc_size +
868 					 CE_DESC_RING_ALIGN), base_addr);
869 	if (!ce_ring->base_addr_owner_space_unaligned) {
870 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
871 			  __func__, CE_id);
872 		return QDF_STATUS_E_NOMEM;
873 	}
874 	return QDF_STATUS_SUCCESS;
875 }
876 
877 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
878 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
879 {
880 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
881 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
882 		ce_ring->base_addr_owner_space_unaligned,
883 		ce_ring->base_addr_CE_space, 0);
884 	ce_ring->base_addr_owner_space_unaligned = NULL;
885 }
886 #endif /* IPA_OFFLOAD */
887 
888 /**
889  * ce_srng_based() - Does this target use srng
890  * @ce_state : pointer to the state context of the CE
891  *
892  * Description:
893  *   returns true if the target is SRNG based
894  *
895  * Return:
896  *  false (attribute set to false)
897  *  true  (attribute set to true);
898  */
899 bool ce_srng_based(struct hif_softc *scn)
900 {
901 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
902 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
903 
904 	switch (tgt_info->target_type) {
905 	case TARGET_TYPE_QCA8074:
906 	case TARGET_TYPE_QCA8074V2:
907 	case TARGET_TYPE_QCA6290:
908 	case TARGET_TYPE_QCA6390:
909 		return true;
910 	default:
911 		return false;
912 	}
913 	return false;
914 }
915 qdf_export_symbol(ce_srng_based);
916 
917 #ifdef QCA_WIFI_SUPPORT_SRNG
918 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
919 {
920 	if (ce_srng_based(scn))
921 		return ce_services_srng();
922 
923 	return ce_services_legacy();
924 }
925 
926 
927 #else	/* QCA_LITHIUM */
928 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
929 {
930 	return ce_services_legacy();
931 }
932 #endif /* QCA_LITHIUM */
933 
934 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
935 		struct pld_shadow_reg_v2_cfg **shadow_config,
936 		int *num_shadow_registers_configured) {
937 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
938 
939 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
940 			scn, shadow_config, num_shadow_registers_configured);
941 }
942 
943 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
944 						uint8_t ring_type)
945 {
946 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
947 
948 	return hif_state->ce_services->ce_get_desc_size(ring_type);
949 }
950 
951 
952 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
953 		uint8_t ring_type, uint32_t nentries)
954 {
955 	uint32_t ce_nbytes;
956 	char *ptr;
957 	qdf_dma_addr_t base_addr;
958 	struct CE_ring_state *ce_ring;
959 	uint32_t desc_size;
960 	struct hif_softc *scn = CE_state->scn;
961 
962 	ce_nbytes = sizeof(struct CE_ring_state)
963 		+ (nentries * sizeof(void *));
964 	ptr = qdf_mem_malloc(ce_nbytes);
965 	if (!ptr)
966 		return NULL;
967 
968 	ce_ring = (struct CE_ring_state *)ptr;
969 	ptr += sizeof(struct CE_ring_state);
970 	ce_ring->nentries = nentries;
971 	ce_ring->nentries_mask = nentries - 1;
972 
973 	ce_ring->low_water_mark_nentries = 0;
974 	ce_ring->high_water_mark_nentries = nentries;
975 	ce_ring->per_transfer_context = (void **)ptr;
976 
977 	desc_size = ce_get_desc_size(scn, ring_type);
978 
979 	/* Legacy platforms that do not support cache
980 	 * coherent DMA are unsupported
981 	 */
982 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
983 			       ce_ring, nentries,
984 			       desc_size) !=
985 	    QDF_STATUS_SUCCESS) {
986 		HIF_ERROR("%s: ring has no DMA mem",
987 				__func__);
988 		qdf_mem_free(ptr);
989 		return NULL;
990 	}
991 	ce_ring->base_addr_CE_space_unaligned = base_addr;
992 
993 	/* Correctly initialize memory to 0 to
994 	 * prevent garbage data crashing system
995 	 * when download firmware
996 	 */
997 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
998 			nentries * desc_size +
999 			CE_DESC_RING_ALIGN);
1000 
1001 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1002 
1003 		ce_ring->base_addr_CE_space =
1004 			(ce_ring->base_addr_CE_space_unaligned +
1005 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1006 
1007 		ce_ring->base_addr_owner_space = (void *)
1008 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1009 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1010 	} else {
1011 		ce_ring->base_addr_CE_space =
1012 				ce_ring->base_addr_CE_space_unaligned;
1013 		ce_ring->base_addr_owner_space =
1014 				ce_ring->base_addr_owner_space_unaligned;
1015 	}
1016 
1017 	return ce_ring;
1018 }
1019 
1020 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1021 			uint32_t ce_id, struct CE_ring_state *ring,
1022 			struct CE_attr *attr)
1023 {
1024 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1025 
1026 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1027 					      ring, attr);
1028 }
1029 
1030 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1031 {
1032 	uint8_t ul_pipe, dl_pipe;
1033 	int ce_id, status, ul_is_polled, dl_is_polled;
1034 	struct CE_state *ce_state;
1035 
1036 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1037 					 &ul_pipe, &dl_pipe,
1038 					 &ul_is_polled, &dl_is_polled);
1039 	if (status) {
1040 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1041 		return status;
1042 	}
1043 
1044 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1045 		if (ce_id == ul_pipe)
1046 			continue;
1047 		if (ce_id == dl_pipe)
1048 			continue;
1049 
1050 		ce_state = scn->ce_id_to_state[ce_id];
1051 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1052 		if (ce_state->state == CE_RUNNING)
1053 			ce_state->state = CE_PAUSED;
1054 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1055 	}
1056 
1057 	return status;
1058 }
1059 
1060 int hif_ce_bus_late_resume(struct hif_softc *scn)
1061 {
1062 	int ce_id;
1063 	struct CE_state *ce_state;
1064 	int write_index;
1065 	bool index_updated;
1066 
1067 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1068 		ce_state = scn->ce_id_to_state[ce_id];
1069 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1070 		if (ce_state->state == CE_PENDING) {
1071 			write_index = ce_state->src_ring->write_index;
1072 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1073 					write_index);
1074 			ce_state->state = CE_RUNNING;
1075 			index_updated = true;
1076 		} else {
1077 			index_updated = false;
1078 		}
1079 
1080 		if (ce_state->state == CE_PAUSED)
1081 			ce_state->state = CE_RUNNING;
1082 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1083 
1084 		if (index_updated)
1085 			hif_record_ce_desc_event(scn, ce_id,
1086 				RESUME_WRITE_INDEX_UPDATE,
1087 				NULL, NULL, write_index, 0);
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 /**
1094  * ce_oom_recovery() - try to recover rx ce from oom condition
1095  * @context: CE_state of the CE with oom rx ring
1096  *
1097  * the executing work Will continue to be rescheduled until
1098  * at least 1 descriptor is successfully posted to the rx ring.
1099  *
1100  * return: none
1101  */
1102 static void ce_oom_recovery(void *context)
1103 {
1104 	struct CE_state *ce_state = context;
1105 	struct hif_softc *scn = ce_state->scn;
1106 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1107 	struct HIF_CE_pipe_info *pipe_info =
1108 		&ce_softc->pipe_info[ce_state->id];
1109 
1110 	hif_post_recv_buffers_for_pipe(pipe_info);
1111 }
1112 
1113 #if HIF_CE_DEBUG_DATA_BUF
1114 /**
1115  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1116  * the CE descriptors.
1117  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1118  * @scn: hif scn handle
1119  * ce_id: Copy Engine Id
1120  *
1121  * Return: QDF_STATUS
1122  */
1123 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1124 {
1125 	struct hif_ce_desc_event *event = NULL;
1126 	struct hif_ce_desc_event *hist_ev = NULL;
1127 	uint32_t index = 0;
1128 
1129 	hist_ev =
1130 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1131 
1132 	if (!hist_ev)
1133 		return QDF_STATUS_E_NOMEM;
1134 
1135 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1136 		event = &hist_ev[index];
1137 		event->data =
1138 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1139 		if (event->data == NULL)
1140 			return QDF_STATUS_E_NOMEM;
1141 	}
1142 	return QDF_STATUS_SUCCESS;
1143 }
1144 
1145 /**
1146  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1147  * the CE descriptors.
1148  * @scn: hif scn handle
1149  * ce_id: Copy Engine Id
1150  *
1151  * Return:
1152  */
1153 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1154 {
1155 	struct hif_ce_desc_event *event = NULL;
1156 	struct hif_ce_desc_event *hist_ev = NULL;
1157 	uint32_t index = 0;
1158 
1159 	hist_ev =
1160 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1161 
1162 	if (!hist_ev)
1163 		return;
1164 
1165 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1166 		event = &hist_ev[index];
1167 		if (event->data != NULL)
1168 			qdf_mem_free(event->data);
1169 		event->data = NULL;
1170 		event = NULL;
1171 	}
1172 }
1173 #endif /* HIF_CE_DEBUG_DATA_BUF */
1174 
1175 /*
1176  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1177  * for defined here
1178  */
1179 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF
1180 /**
1181  * alloc_mem_ce_debug_history() - Allocate mem for the CE descriptors storing
1182  * @scn: hif scn handle
1183  * ce_id: Copy Engine Id
1184  *
1185  * Return: QDF_STATUS
1186  */
1187 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1188 						unsigned int CE_id)
1189 {
1190 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1191 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1192 
1193 	if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1194 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1195 		return QDF_STATUS_E_NOMEM;
1196 	} else {
1197 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1198 		return QDF_STATUS_SUCCESS;
1199 	}
1200 }
1201 
1202 /**
1203  * free_mem_ce_debug_history() - Free mem allocated for the CE descriptors
1204  * storing.
1205  * @scn: hif scn handle
1206  * ce_id: Copy Engine Id
1207  *
1208  * Return:
1209  */
1210 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1211 						unsigned int CE_id)
1212 {
1213 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1214 	struct hif_ce_desc_event *hist_ev =
1215 			(struct hif_ce_desc_event *)ce_hist->hist_ev[CE_id];
1216 
1217 	if (!hist_ev)
1218 		return;
1219 
1220 #if HIF_CE_DEBUG_DATA_BUF
1221 	if (ce_hist->data_enable[CE_id] == 1) {
1222 		ce_hist->data_enable[CE_id] = 0;
1223 		free_mem_ce_debug_hist_data(scn, CE_id);
1224 	}
1225 #endif
1226 	ce_hist->enable[CE_id] = 0;
1227 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1228 	ce_hist->hist_ev[CE_id] = NULL;
1229 }
1230 
1231 /**
1232  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1233  * CE records on the console using sysfs.
1234  * @scn: hif scn handle
1235  *
1236  * Return:
1237  */
1238 static inline void reset_ce_debug_history(struct hif_softc *scn)
1239 {
1240 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1241 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1242 	 * index. Disable data storing
1243 	 */
1244 	ce_hist->hist_index = 0;
1245 	ce_hist->hist_id = 0;
1246 }
1247 #else /*Note: #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1248 static inline QDF_STATUS alloc_mem_ce_debug_history(struct hif_softc *scn,
1249 						unsigned int CE_id)
1250 {
1251 	return QDF_STATUS_SUCCESS;
1252 }
1253 
1254 static inline void free_mem_ce_debug_history(struct hif_softc *scn,
1255 						unsigned int CE_id)
1256 {
1257 }
1258 
1259 static inline void reset_ce_debug_history(struct hif_softc *scn)
1260 {
1261 }
1262 #endif /*Note: defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
1263 
1264 void ce_enable_polling(void *cestate)
1265 {
1266 	struct CE_state *CE_state = (struct CE_state *)cestate;
1267 
1268 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1269 		CE_state->timer_inited = true;
1270 }
1271 
1272 void ce_disable_polling(void *cestate)
1273 {
1274 	struct CE_state *CE_state = (struct CE_state *)cestate;
1275 
1276 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1277 		CE_state->timer_inited = false;
1278 }
1279 
1280 /*
1281  * Initialize a Copy Engine based on caller-supplied attributes.
1282  * This may be called once to initialize both source and destination
1283  * rings or it may be called twice for separate source and destination
1284  * initialization. It may be that only one side or the other is
1285  * initialized by software/firmware.
1286  *
1287  * This should be called durring the initialization sequence before
1288  * interupts are enabled, so we don't have to worry about thread safety.
1289  */
1290 struct CE_handle *ce_init(struct hif_softc *scn,
1291 			  unsigned int CE_id, struct CE_attr *attr)
1292 {
1293 	struct CE_state *CE_state;
1294 	uint32_t ctrl_addr;
1295 	unsigned int nentries;
1296 	bool malloc_CE_state = false;
1297 	bool malloc_src_ring = false;
1298 	int status;
1299 
1300 	QDF_ASSERT(CE_id < scn->ce_count);
1301 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1302 	CE_state = scn->ce_id_to_state[CE_id];
1303 
1304 	if (!CE_state) {
1305 		CE_state =
1306 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1307 		if (!CE_state) {
1308 			HIF_ERROR("%s: CE_state has no mem", __func__);
1309 			return NULL;
1310 		}
1311 		malloc_CE_state = true;
1312 		qdf_spinlock_create(&CE_state->ce_index_lock);
1313 
1314 		CE_state->id = CE_id;
1315 		CE_state->ctrl_addr = ctrl_addr;
1316 		CE_state->state = CE_RUNNING;
1317 		CE_state->attr_flags = attr->flags;
1318 	}
1319 	CE_state->scn = scn;
1320 
1321 	qdf_atomic_init(&CE_state->rx_pending);
1322 	if (attr == NULL) {
1323 		/* Already initialized; caller wants the handle */
1324 		return (struct CE_handle *)CE_state;
1325 	}
1326 
1327 	if (CE_state->src_sz_max)
1328 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1329 	else
1330 		CE_state->src_sz_max = attr->src_sz_max;
1331 
1332 	ce_init_ce_desc_event_log(scn, CE_id,
1333 				  attr->src_nentries + attr->dest_nentries);
1334 
1335 	/* source ring setup */
1336 	nentries = attr->src_nentries;
1337 	if (nentries) {
1338 		struct CE_ring_state *src_ring;
1339 
1340 		nentries = roundup_pwr2(nentries);
1341 		if (CE_state->src_ring) {
1342 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1343 		} else {
1344 			src_ring = CE_state->src_ring =
1345 				ce_alloc_ring_state(CE_state,
1346 						CE_RING_SRC,
1347 						nentries);
1348 			if (!src_ring) {
1349 				/* cannot allocate src ring. If the
1350 				 * CE_state is allocated locally free
1351 				 * CE_State and return error.
1352 				 */
1353 				HIF_ERROR("%s: src ring has no mem", __func__);
1354 				if (malloc_CE_state) {
1355 					/* allocated CE_state locally */
1356 					qdf_mem_free(CE_state);
1357 					malloc_CE_state = false;
1358 				}
1359 				return NULL;
1360 			}
1361 			/* we can allocate src ring. Mark that the src ring is
1362 			 * allocated locally
1363 			 */
1364 			malloc_src_ring = true;
1365 
1366 			/*
1367 			 * Also allocate a shadow src ring in
1368 			 * regular mem to use for faster access.
1369 			 */
1370 			src_ring->shadow_base_unaligned =
1371 				qdf_mem_malloc(nentries *
1372 					       sizeof(struct CE_src_desc) +
1373 					       CE_DESC_RING_ALIGN);
1374 			if (src_ring->shadow_base_unaligned == NULL) {
1375 				HIF_ERROR("%s: src ring no shadow_base mem",
1376 					  __func__);
1377 				goto error_no_dma_mem;
1378 			}
1379 			src_ring->shadow_base = (struct CE_src_desc *)
1380 				(((size_t) src_ring->shadow_base_unaligned +
1381 				CE_DESC_RING_ALIGN - 1) &
1382 				 ~(CE_DESC_RING_ALIGN - 1));
1383 
1384 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1385 					       src_ring, attr);
1386 			if (status < 0)
1387 				goto error_target_access;
1388 
1389 			ce_ring_test_initial_indexes(CE_id, src_ring,
1390 						     "src_ring");
1391 		}
1392 	}
1393 
1394 	/* destination ring setup */
1395 	nentries = attr->dest_nentries;
1396 	if (nentries) {
1397 		struct CE_ring_state *dest_ring;
1398 
1399 		nentries = roundup_pwr2(nentries);
1400 		if (CE_state->dest_ring) {
1401 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1402 		} else {
1403 			dest_ring = CE_state->dest_ring =
1404 				ce_alloc_ring_state(CE_state,
1405 						CE_RING_DEST,
1406 						nentries);
1407 			if (!dest_ring) {
1408 				/* cannot allocate dst ring. If the CE_state
1409 				 * or src ring is allocated locally free
1410 				 * CE_State and src ring and return error.
1411 				 */
1412 				HIF_ERROR("%s: dest ring has no mem",
1413 					  __func__);
1414 				goto error_no_dma_mem;
1415 			}
1416 
1417 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1418 				      dest_ring, attr);
1419 			if (status < 0)
1420 				goto error_target_access;
1421 
1422 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1423 						     "dest_ring");
1424 
1425 			/* For srng based target, init status ring here */
1426 			if (ce_srng_based(CE_state->scn)) {
1427 				CE_state->status_ring =
1428 					ce_alloc_ring_state(CE_state,
1429 							CE_RING_STATUS,
1430 							nentries);
1431 				if (CE_state->status_ring == NULL) {
1432 					/*Allocation failed. Cleanup*/
1433 					qdf_mem_free(CE_state->dest_ring);
1434 					if (malloc_src_ring) {
1435 						qdf_mem_free
1436 							(CE_state->src_ring);
1437 						CE_state->src_ring = NULL;
1438 						malloc_src_ring = false;
1439 					}
1440 					if (malloc_CE_state) {
1441 						/* allocated CE_state locally */
1442 						scn->ce_id_to_state[CE_id] =
1443 							NULL;
1444 						qdf_mem_free(CE_state);
1445 						malloc_CE_state = false;
1446 					}
1447 
1448 					return NULL;
1449 				}
1450 
1451 				status = ce_ring_setup(scn, CE_RING_STATUS,
1452 					       CE_id, CE_state->status_ring,
1453 					       attr);
1454 				if (status < 0)
1455 					goto error_target_access;
1456 
1457 			}
1458 
1459 			/* epping */
1460 			/* poll timer */
1461 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1462 				qdf_timer_init(scn->qdf_dev,
1463 						&CE_state->poll_timer,
1464 						ce_poll_timeout,
1465 						CE_state,
1466 						QDF_TIMER_TYPE_WAKE_APPS);
1467 				ce_enable_polling(CE_state);
1468 				qdf_timer_mod(&CE_state->poll_timer,
1469 						      CE_POLL_TIMEOUT);
1470 			}
1471 		}
1472 	}
1473 
1474 	if (!ce_srng_based(scn)) {
1475 		/* Enable CE error interrupts */
1476 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1477 			goto error_target_access;
1478 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1479 		if (Q_TARGET_ACCESS_END(scn) < 0)
1480 			goto error_target_access;
1481 	}
1482 
1483 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1484 			ce_oom_recovery, CE_state);
1485 
1486 	/* update the htt_data attribute */
1487 	ce_mark_datapath(CE_state);
1488 	scn->ce_id_to_state[CE_id] = CE_state;
1489 
1490 	alloc_mem_ce_debug_history(scn, CE_id);
1491 
1492 	return (struct CE_handle *)CE_state;
1493 
1494 error_target_access:
1495 error_no_dma_mem:
1496 	ce_fini((struct CE_handle *)CE_state);
1497 	return NULL;
1498 }
1499 
1500 #ifdef WLAN_FEATURE_FASTPATH
1501 /**
1502  * hif_enable_fastpath() Update that we have enabled fastpath mode
1503  * @hif_ctx: HIF context
1504  *
1505  * For use in data path
1506  *
1507  * Retrun: void
1508  */
1509 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1510 {
1511 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1512 
1513 	if (ce_srng_based(scn)) {
1514 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1515 		return;
1516 	}
1517 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1518 	scn->fastpath_mode_on = true;
1519 }
1520 
1521 /**
1522  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1523  * @hif_ctx: HIF Context
1524  *
1525  * For use in data path to skip HTC
1526  *
1527  * Return: bool
1528  */
1529 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1530 {
1531 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1532 
1533 	return scn->fastpath_mode_on;
1534 }
1535 
1536 /**
1537  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1538  * @hif_ctx: HIF Context
1539  *
1540  * API to check if polling is enabled on all CEs. Returns true when polling
1541  * is enabled on all CEs.
1542  *
1543  * Return: bool
1544  */
1545 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1546 {
1547 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1548 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1549 	struct CE_attr *attr;
1550 	int id;
1551 
1552 	for (id = 0; id < scn->ce_count; id++) {
1553 		attr = &hif_state->host_ce_config[id];
1554 		if (attr && (attr->dest_nentries) &&
1555 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1556 			return false;
1557 	}
1558 	return true;
1559 }
1560 qdf_export_symbol(hif_is_polled_mode_enabled);
1561 
1562 /**
1563  * hif_get_ce_handle - API to get CE handle for FastPath mode
1564  * @hif_ctx: HIF Context
1565  * @id: CopyEngine Id
1566  *
1567  * API to return CE handle for fastpath mode
1568  *
1569  * Return: void
1570  */
1571 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1572 {
1573 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1574 
1575 	return scn->ce_id_to_state[id];
1576 }
1577 
1578 /**
1579  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1580  * No processing is required inside this function.
1581  * @ce_hdl: Cope engine handle
1582  * Using an assert, this function makes sure that,
1583  * the TX CE has been processed completely.
1584  *
1585  * This is called while dismantling CE structures. No other thread
1586  * should be using these structures while dismantling is occurring
1587  * therfore no locking is needed.
1588  *
1589  * Return: none
1590  */
1591 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1592 {
1593 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1594 	struct CE_ring_state *src_ring = ce_state->src_ring;
1595 	struct hif_softc *sc = ce_state->scn;
1596 	uint32_t sw_index, write_index;
1597 
1598 	if (hif_is_nss_wifi_enabled(sc))
1599 		return;
1600 
1601 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1602 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1603 			 __func__, __LINE__);
1604 		sw_index = src_ring->sw_index;
1605 		write_index = src_ring->sw_index;
1606 
1607 		/* At this point Tx CE should be clean */
1608 		qdf_assert_always(sw_index == write_index);
1609 	}
1610 }
1611 
1612 /**
1613  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1614  * @ce_hdl: Handle to CE
1615  *
1616  * These buffers are never allocated on the fly, but
1617  * are allocated only once during HIF start and freed
1618  * only once during HIF stop.
1619  * NOTE:
1620  * The assumption here is there is no in-flight DMA in progress
1621  * currently, so that buffers can be freed up safely.
1622  *
1623  * Return: NONE
1624  */
1625 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1626 {
1627 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1628 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1629 	qdf_nbuf_t nbuf;
1630 	int i;
1631 
1632 	if (ce_state->scn->fastpath_mode_on == false)
1633 		return;
1634 
1635 	if (!ce_state->htt_rx_data)
1636 		return;
1637 
1638 	/*
1639 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1640 	 * this CE is completely full: does not leave one blank space, to
1641 	 * distinguish between empty queue & full queue. So free all the
1642 	 * entries.
1643 	 */
1644 	for (i = 0; i < dst_ring->nentries; i++) {
1645 		nbuf = dst_ring->per_transfer_context[i];
1646 
1647 		/*
1648 		 * The reasons for doing this check are:
1649 		 * 1) Protect against calling cleanup before allocating buffers
1650 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1651 		 *    could have a partially filled ring, because of a memory
1652 		 *    allocation failure in the middle of allocating ring.
1653 		 *    This check accounts for that case, checking
1654 		 *    fastpath_mode_on flag or started flag would not have
1655 		 *    covered that case. This is not in performance path,
1656 		 *    so OK to do this.
1657 		 */
1658 		if (nbuf) {
1659 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1660 					      QDF_DMA_FROM_DEVICE);
1661 			qdf_nbuf_free(nbuf);
1662 		}
1663 	}
1664 }
1665 
1666 /**
1667  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1668  * @scn: HIF handle
1669  *
1670  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1671  * Hence we have to post all the entries in the pipe, even, in the beginning
1672  * unlike for other CE pipes where one less than dest_nentries are filled in
1673  * the beginning.
1674  *
1675  * Return: None
1676  */
1677 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1678 {
1679 	int pipe_num;
1680 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1681 
1682 	if (scn->fastpath_mode_on == false)
1683 		return;
1684 
1685 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1686 		struct HIF_CE_pipe_info *pipe_info =
1687 			&hif_state->pipe_info[pipe_num];
1688 		struct CE_state *ce_state =
1689 			scn->ce_id_to_state[pipe_info->pipe_num];
1690 
1691 		if (ce_state->htt_rx_data)
1692 			atomic_inc(&pipe_info->recv_bufs_needed);
1693 	}
1694 }
1695 #else
1696 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1697 {
1698 }
1699 
1700 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1701 {
1702 	return false;
1703 }
1704 
1705 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
1706 {
1707 	return false;
1708 }
1709 #endif /* WLAN_FEATURE_FASTPATH */
1710 
1711 void ce_fini(struct CE_handle *copyeng)
1712 {
1713 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1714 	unsigned int CE_id = CE_state->id;
1715 	struct hif_softc *scn = CE_state->scn;
1716 	uint32_t desc_size;
1717 
1718 	bool inited = CE_state->timer_inited;
1719 	CE_state->state = CE_UNUSED;
1720 	scn->ce_id_to_state[CE_id] = NULL;
1721 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1722 	ce_disable_polling(CE_state);
1723 
1724 	qdf_lro_deinit(CE_state->lro_data);
1725 
1726 	if (CE_state->src_ring) {
1727 		/* Cleanup the datapath Tx ring */
1728 		ce_h2t_tx_ce_cleanup(copyeng);
1729 
1730 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1731 		if (CE_state->src_ring->shadow_base_unaligned)
1732 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1733 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1734 			ce_free_desc_ring(scn, CE_state->id,
1735 					  CE_state->src_ring,
1736 					  desc_size);
1737 		qdf_mem_free(CE_state->src_ring);
1738 	}
1739 	if (CE_state->dest_ring) {
1740 		/* Cleanup the datapath Rx ring */
1741 		ce_t2h_msg_ce_cleanup(copyeng);
1742 
1743 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1744 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1745 			ce_free_desc_ring(scn, CE_state->id,
1746 					  CE_state->dest_ring,
1747 					  desc_size);
1748 		qdf_mem_free(CE_state->dest_ring);
1749 
1750 		/* epping */
1751 		if (inited) {
1752 			qdf_timer_free(&CE_state->poll_timer);
1753 		}
1754 	}
1755 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1756 		/* Cleanup the datapath Tx ring */
1757 		ce_h2t_tx_ce_cleanup(copyeng);
1758 
1759 		if (CE_state->status_ring->shadow_base_unaligned)
1760 			qdf_mem_free(
1761 				CE_state->status_ring->shadow_base_unaligned);
1762 
1763 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1764 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1765 			ce_free_desc_ring(scn, CE_state->id,
1766 					  CE_state->status_ring,
1767 					  desc_size);
1768 		qdf_mem_free(CE_state->status_ring);
1769 	}
1770 
1771 	free_mem_ce_debug_history(scn, CE_id);
1772 	reset_ce_debug_history(scn);
1773 	ce_deinit_ce_desc_event_log(scn, CE_id);
1774 
1775 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1776 	qdf_mem_free(CE_state);
1777 }
1778 
1779 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1780 {
1781 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1782 
1783 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1784 		  sizeof(hif_state->msg_callbacks_pending));
1785 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1786 		  sizeof(hif_state->msg_callbacks_current));
1787 }
1788 
1789 /* Send the first nbytes bytes of the buffer */
1790 QDF_STATUS
1791 hif_send_head(struct hif_opaque_softc *hif_ctx,
1792 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1793 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1794 {
1795 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1796 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1797 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1798 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1799 	int bytes = nbytes, nfrags = 0;
1800 	struct ce_sendlist sendlist;
1801 	int status, i = 0;
1802 	unsigned int mux_id = 0;
1803 
1804 	if (nbytes > qdf_nbuf_len(nbuf)) {
1805 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
1806 			  (uint32_t)qdf_nbuf_len(nbuf));
1807 		QDF_ASSERT(0);
1808 	}
1809 
1810 	transfer_id =
1811 		(mux_id & MUX_ID_MASK) |
1812 		(transfer_id & TRANSACTION_ID_MASK);
1813 	data_attr &= DESC_DATA_FLAG_MASK;
1814 	/*
1815 	 * The common case involves sending multiple fragments within a
1816 	 * single download (the tx descriptor and the tx frame header).
1817 	 * So, optimize for the case of multiple fragments by not even
1818 	 * checking whether it's necessary to use a sendlist.
1819 	 * The overhead of using a sendlist for a single buffer download
1820 	 * is not a big deal, since it happens rarely (for WMI messages).
1821 	 */
1822 	ce_sendlist_init(&sendlist);
1823 	do {
1824 		qdf_dma_addr_t frag_paddr;
1825 		int frag_bytes;
1826 
1827 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1828 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1829 		/*
1830 		 * Clear the packet offset for all but the first CE desc.
1831 		 */
1832 		if (i++ > 0)
1833 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1834 
1835 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1836 				    frag_bytes >
1837 				    bytes ? bytes : frag_bytes,
1838 				    qdf_nbuf_get_frag_is_wordstream
1839 				    (nbuf,
1840 				    nfrags) ? 0 :
1841 				    CE_SEND_FLAG_SWAP_DISABLE,
1842 				    data_attr);
1843 		if (status != QDF_STATUS_SUCCESS) {
1844 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1845 				__func__, nfrags);
1846 			return status;
1847 		}
1848 		bytes -= frag_bytes;
1849 		nfrags++;
1850 	} while (bytes > 0);
1851 
1852 	/* Make sure we have resources to handle this request */
1853 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1854 	if (pipe_info->num_sends_allowed < nfrags) {
1855 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1856 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1857 		return QDF_STATUS_E_RESOURCES;
1858 	}
1859 	pipe_info->num_sends_allowed -= nfrags;
1860 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1861 
1862 	if (qdf_unlikely(ce_hdl == NULL)) {
1863 		HIF_ERROR("%s: error CE handle is null", __func__);
1864 		return A_ERROR;
1865 	}
1866 
1867 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
1868 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
1869 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1870 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
1871 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
1872 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1873 
1874 	return status;
1875 }
1876 
1877 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1878 								int force)
1879 {
1880 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1881 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1882 
1883 	if (!force) {
1884 		int resources;
1885 		/*
1886 		 * Decide whether to actually poll for completions, or just
1887 		 * wait for a later chance. If there seem to be plenty of
1888 		 * resources left, then just wait, since checking involves
1889 		 * reading a CE register, which is a relatively expensive
1890 		 * operation.
1891 		 */
1892 		resources = hif_get_free_queue_number(hif_ctx, pipe);
1893 		/*
1894 		 * If at least 50% of the total resources are still available,
1895 		 * don't bother checking again yet.
1896 		 */
1897 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
1898 									 1))
1899 			return;
1900 	}
1901 #if ATH_11AC_TXCOMPACT
1902 	ce_per_engine_servicereap(scn, pipe);
1903 #else
1904 	ce_per_engine_service(scn, pipe);
1905 #endif
1906 }
1907 
1908 uint16_t
1909 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
1910 {
1911 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1912 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1913 	uint16_t rv;
1914 
1915 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1916 	rv = pipe_info->num_sends_allowed;
1917 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1918 	return rv;
1919 }
1920 
1921 /* Called by lower (CE) layer when a send to Target completes. */
1922 static void
1923 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
1924 		     void *transfer_context, qdf_dma_addr_t CE_data,
1925 		     unsigned int nbytes, unsigned int transfer_id,
1926 		     unsigned int sw_index, unsigned int hw_index,
1927 		     unsigned int toeplitz_hash_result)
1928 {
1929 	struct HIF_CE_pipe_info *pipe_info =
1930 		(struct HIF_CE_pipe_info *)ce_context;
1931 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
1932 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1933 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
1934 	struct hif_msg_callbacks *msg_callbacks =
1935 		&pipe_info->pipe_callbacks;
1936 
1937 	do {
1938 		/*
1939 		 * The upper layer callback will be triggered
1940 		 * when last fragment is complteted.
1941 		 */
1942 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
1943 			if (scn->target_status == TARGET_STATUS_RESET) {
1944 
1945 				qdf_nbuf_unmap_single(scn->qdf_dev,
1946 						      transfer_context,
1947 						      QDF_DMA_TO_DEVICE);
1948 				qdf_nbuf_free(transfer_context);
1949 			} else
1950 				msg_callbacks->txCompletionHandler(
1951 					msg_callbacks->Context,
1952 					transfer_context, transfer_id,
1953 					toeplitz_hash_result);
1954 		}
1955 
1956 		qdf_spin_lock(&pipe_info->completion_freeq_lock);
1957 		pipe_info->num_sends_allowed++;
1958 		qdf_spin_unlock(&pipe_info->completion_freeq_lock);
1959 	} while (ce_completed_send_next(copyeng,
1960 			&ce_context, &transfer_context,
1961 			&CE_data, &nbytes, &transfer_id,
1962 			&sw_idx, &hw_idx,
1963 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
1964 }
1965 
1966 /**
1967  * hif_ce_do_recv(): send message from copy engine to upper layers
1968  * @msg_callbacks: structure containing callback and callback context
1969  * @netbuff: skb containing message
1970  * @nbytes: number of bytes in the message
1971  * @pipe_info: used for the pipe_number info
1972  *
1973  * Checks the packet length, configures the length in the netbuff,
1974  * and calls the upper layer callback.
1975  *
1976  * return: None
1977  */
1978 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
1979 		qdf_nbuf_t netbuf, int nbytes,
1980 		struct HIF_CE_pipe_info *pipe_info) {
1981 	if (nbytes <= pipe_info->buf_sz) {
1982 		qdf_nbuf_set_pktlen(netbuf, nbytes);
1983 		msg_callbacks->
1984 			rxCompletionHandler(msg_callbacks->Context,
1985 					netbuf, pipe_info->pipe_num);
1986 	} else {
1987 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
1988 				__func__, netbuf, nbytes);
1989 
1990 		qdf_nbuf_free(netbuf);
1991 	}
1992 }
1993 
1994 /* Called by lower (CE) layer when data is received from the Target. */
1995 static void
1996 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
1997 		     void *transfer_context, qdf_dma_addr_t CE_data,
1998 		     unsigned int nbytes, unsigned int transfer_id,
1999 		     unsigned int flags)
2000 {
2001 	struct HIF_CE_pipe_info *pipe_info =
2002 		(struct HIF_CE_pipe_info *)ce_context;
2003 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2004 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2005 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2006 #ifdef HIF_PCI
2007 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
2008 #endif
2009 	struct hif_msg_callbacks *msg_callbacks =
2010 		 &pipe_info->pipe_callbacks;
2011 
2012 	do {
2013 #ifdef HIF_PCI
2014 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2015 #endif
2016 		qdf_nbuf_unmap_single(scn->qdf_dev,
2017 				      (qdf_nbuf_t) transfer_context,
2018 				      QDF_DMA_FROM_DEVICE);
2019 
2020 		atomic_inc(&pipe_info->recv_bufs_needed);
2021 		hif_post_recv_buffers_for_pipe(pipe_info);
2022 		if (scn->target_status == TARGET_STATUS_RESET)
2023 			qdf_nbuf_free(transfer_context);
2024 		else
2025 			hif_ce_do_recv(msg_callbacks, transfer_context,
2026 				nbytes, pipe_info);
2027 
2028 		/* Set up force_break flag if num of receices reaches
2029 		 * MAX_NUM_OF_RECEIVES
2030 		 */
2031 		ce_state->receive_count++;
2032 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2033 			ce_state->force_break = 1;
2034 			break;
2035 		}
2036 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2037 					&CE_data, &nbytes, &transfer_id,
2038 					&flags) == QDF_STATUS_SUCCESS);
2039 
2040 }
2041 
2042 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2043 
2044 void
2045 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2046 	      struct hif_msg_callbacks *callbacks)
2047 {
2048 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2049 
2050 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2051 	spin_lock_init(&pcie_access_log_lock);
2052 #endif
2053 	/* Save callbacks for later installation */
2054 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2055 		 sizeof(hif_state->msg_callbacks_pending));
2056 
2057 }
2058 
2059 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2060 {
2061 	struct CE_handle *ce_diag = hif_state->ce_diag;
2062 	int pipe_num;
2063 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2064 	struct hif_msg_callbacks *hif_msg_callbacks =
2065 		&hif_state->msg_callbacks_current;
2066 
2067 	/* daemonize("hif_compl_thread"); */
2068 
2069 	if (scn->ce_count == 0) {
2070 		HIF_ERROR("%s: Invalid ce_count", __func__);
2071 		return -EINVAL;
2072 	}
2073 
2074 	if (!hif_msg_callbacks ||
2075 			!hif_msg_callbacks->rxCompletionHandler ||
2076 			!hif_msg_callbacks->txCompletionHandler) {
2077 		HIF_ERROR("%s: no completion handler registered", __func__);
2078 		return -EFAULT;
2079 	}
2080 
2081 	A_TARGET_ACCESS_LIKELY(scn);
2082 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2083 		struct CE_attr attr;
2084 		struct HIF_CE_pipe_info *pipe_info;
2085 
2086 		pipe_info = &hif_state->pipe_info[pipe_num];
2087 		if (pipe_info->ce_hdl == ce_diag)
2088 			continue;       /* Handle Diagnostic CE specially */
2089 		attr = hif_state->host_ce_config[pipe_num];
2090 		if (attr.src_nentries) {
2091 			/* pipe used to send to target */
2092 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2093 					 __func__, pipe_num, pipe_info);
2094 			ce_send_cb_register(pipe_info->ce_hdl,
2095 					    hif_pci_ce_send_done, pipe_info,
2096 					    attr.flags & CE_ATTR_DISABLE_INTR);
2097 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2098 		}
2099 		if (attr.dest_nentries) {
2100 			/* pipe used to receive from target */
2101 			ce_recv_cb_register(pipe_info->ce_hdl,
2102 					    hif_pci_ce_recv_data, pipe_info,
2103 					    attr.flags & CE_ATTR_DISABLE_INTR);
2104 		}
2105 
2106 		if (attr.src_nentries)
2107 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2108 
2109 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2110 					sizeof(pipe_info->pipe_callbacks));
2111 	}
2112 
2113 	A_TARGET_ACCESS_UNLIKELY(scn);
2114 	return 0;
2115 }
2116 
2117 /*
2118  * Install pending msg callbacks.
2119  *
2120  * TBDXXX: This hack is needed because upper layers install msg callbacks
2121  * for use with HTC before BMI is done; yet this HIF implementation
2122  * needs to continue to use BMI msg callbacks. Really, upper layers
2123  * should not register HTC callbacks until AFTER BMI phase.
2124  */
2125 static void hif_msg_callbacks_install(struct hif_softc *scn)
2126 {
2127 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2128 
2129 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2130 		 &hif_state->msg_callbacks_pending,
2131 		 sizeof(hif_state->msg_callbacks_pending));
2132 }
2133 
2134 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2135 							uint8_t *DLPipe)
2136 {
2137 	int ul_is_polled, dl_is_polled;
2138 
2139 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2140 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2141 }
2142 
2143 /**
2144  * hif_dump_pipe_debug_count() - Log error count
2145  * @scn: hif_softc pointer.
2146  *
2147  * Output the pipe error counts of each pipe to log file
2148  *
2149  * Return: N/A
2150  */
2151 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2152 {
2153 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2154 	int pipe_num;
2155 
2156 	if (hif_state == NULL) {
2157 		HIF_ERROR("%s hif_state is NULL", __func__);
2158 		return;
2159 	}
2160 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2161 		struct HIF_CE_pipe_info *pipe_info;
2162 
2163 	pipe_info = &hif_state->pipe_info[pipe_num];
2164 
2165 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2166 			pipe_info->nbuf_dma_err_count > 0 ||
2167 			pipe_info->nbuf_ce_enqueue_err_count)
2168 		HIF_ERROR(
2169 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2170 			__func__, pipe_info->pipe_num,
2171 			atomic_read(&pipe_info->recv_bufs_needed),
2172 			pipe_info->nbuf_alloc_err_count,
2173 			pipe_info->nbuf_dma_err_count,
2174 			pipe_info->nbuf_ce_enqueue_err_count);
2175 	}
2176 }
2177 
2178 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2179 					  void *nbuf, uint32_t *error_cnt,
2180 					  enum hif_ce_event_type failure_type,
2181 					  const char *failure_type_string)
2182 {
2183 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2184 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2185 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2186 	int ce_id = CE_state->id;
2187 	uint32_t error_cnt_tmp;
2188 
2189 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2190 	error_cnt_tmp = ++(*error_cnt);
2191 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2192 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2193 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2194 		  failure_type_string);
2195 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2196 				 NULL, nbuf, bufs_needed_tmp, 0);
2197 	/* if we fail to allocate the last buffer for an rx pipe,
2198 	 *	there is no trigger to refill the ce and we will
2199 	 *	eventually crash
2200 	 */
2201 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2202 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2203 
2204 }
2205 
2206 
2207 
2208 
2209 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2210 {
2211 	struct CE_handle *ce_hdl;
2212 	qdf_size_t buf_sz;
2213 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2214 	QDF_STATUS status;
2215 	uint32_t bufs_posted = 0;
2216 
2217 	buf_sz = pipe_info->buf_sz;
2218 	if (buf_sz == 0) {
2219 		/* Unused Copy Engine */
2220 		return QDF_STATUS_SUCCESS;
2221 	}
2222 
2223 	ce_hdl = pipe_info->ce_hdl;
2224 
2225 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2226 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2227 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2228 		qdf_nbuf_t nbuf;
2229 
2230 		atomic_dec(&pipe_info->recv_bufs_needed);
2231 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2232 
2233 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2234 		if (!nbuf) {
2235 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2236 					&pipe_info->nbuf_alloc_err_count,
2237 					 HIF_RX_NBUF_ALLOC_FAILURE,
2238 					"HIF_RX_NBUF_ALLOC_FAILURE");
2239 			return QDF_STATUS_E_NOMEM;
2240 		}
2241 
2242 		/*
2243 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2244 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2245 		 * DMA_FROM_DEVICE);
2246 		 */
2247 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2248 					    QDF_DMA_FROM_DEVICE);
2249 
2250 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2251 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2252 					&pipe_info->nbuf_dma_err_count,
2253 					 HIF_RX_NBUF_MAP_FAILURE,
2254 					"HIF_RX_NBUF_MAP_FAILURE");
2255 			qdf_nbuf_free(nbuf);
2256 			return status;
2257 		}
2258 
2259 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2260 
2261 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2262 					       buf_sz, DMA_FROM_DEVICE);
2263 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2264 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2265 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2266 					&pipe_info->nbuf_ce_enqueue_err_count,
2267 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2268 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2269 
2270 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2271 						QDF_DMA_FROM_DEVICE);
2272 			qdf_nbuf_free(nbuf);
2273 			return status;
2274 		}
2275 
2276 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2277 		bufs_posted++;
2278 	}
2279 	pipe_info->nbuf_alloc_err_count =
2280 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2281 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2282 	pipe_info->nbuf_dma_err_count =
2283 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2284 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2285 	pipe_info->nbuf_ce_enqueue_err_count =
2286 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2287 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2288 
2289 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2290 
2291 	return QDF_STATUS_SUCCESS;
2292 }
2293 
2294 /*
2295  * Try to post all desired receive buffers for all pipes.
2296  * Returns 0 for non fastpath rx copy engine as
2297  * oom_allocation_work will be scheduled to recover any
2298  * failures, non-zero if unable to completely replenish
2299  * receive buffers for fastpath rx Copy engine.
2300  */
2301 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2302 {
2303 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2304 	int pipe_num;
2305 	struct CE_state *ce_state = NULL;
2306 	QDF_STATUS qdf_status;
2307 
2308 	A_TARGET_ACCESS_LIKELY(scn);
2309 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2310 		struct HIF_CE_pipe_info *pipe_info;
2311 
2312 		ce_state = scn->ce_id_to_state[pipe_num];
2313 		pipe_info = &hif_state->pipe_info[pipe_num];
2314 
2315 		if (hif_is_nss_wifi_enabled(scn) &&
2316 		    ce_state && (ce_state->htt_rx_data))
2317 			continue;
2318 
2319 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2320 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2321 			ce_state->htt_rx_data &&
2322 			scn->fastpath_mode_on) {
2323 			A_TARGET_ACCESS_UNLIKELY(scn);
2324 			return qdf_status;
2325 		}
2326 	}
2327 
2328 	A_TARGET_ACCESS_UNLIKELY(scn);
2329 
2330 	return QDF_STATUS_SUCCESS;
2331 }
2332 
2333 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2334 {
2335 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2336 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2337 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2338 
2339 	hif_update_fastpath_recv_bufs_cnt(scn);
2340 
2341 	hif_msg_callbacks_install(scn);
2342 
2343 	if (hif_completion_thread_startup(hif_state))
2344 		return QDF_STATUS_E_FAILURE;
2345 
2346 	/* enable buffer cleanup */
2347 	hif_state->started = true;
2348 
2349 	/* Post buffers once to start things off. */
2350 	qdf_status = hif_post_recv_buffers(scn);
2351 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2352 		/* cleanup is done in hif_ce_disable */
2353 		HIF_ERROR("%s:failed to post buffers", __func__);
2354 		return qdf_status;
2355 	}
2356 
2357 	return qdf_status;
2358 }
2359 
2360 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2361 {
2362 	struct hif_softc *scn;
2363 	struct CE_handle *ce_hdl;
2364 	uint32_t buf_sz;
2365 	struct HIF_CE_state *hif_state;
2366 	qdf_nbuf_t netbuf;
2367 	qdf_dma_addr_t CE_data;
2368 	void *per_CE_context;
2369 
2370 	buf_sz = pipe_info->buf_sz;
2371 	/* Unused Copy Engine */
2372 	if (buf_sz == 0)
2373 		return;
2374 
2375 
2376 	hif_state = pipe_info->HIF_CE_state;
2377 	if (!hif_state->started)
2378 		return;
2379 
2380 	scn = HIF_GET_SOFTC(hif_state);
2381 	ce_hdl = pipe_info->ce_hdl;
2382 
2383 	if (scn->qdf_dev == NULL)
2384 		return;
2385 	while (ce_revoke_recv_next
2386 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2387 			&CE_data) == QDF_STATUS_SUCCESS) {
2388 		if (netbuf) {
2389 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2390 					      QDF_DMA_FROM_DEVICE);
2391 			qdf_nbuf_free(netbuf);
2392 		}
2393 	}
2394 }
2395 
2396 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2397 {
2398 	struct CE_handle *ce_hdl;
2399 	struct HIF_CE_state *hif_state;
2400 	struct hif_softc *scn;
2401 	qdf_nbuf_t netbuf;
2402 	void *per_CE_context;
2403 	qdf_dma_addr_t CE_data;
2404 	unsigned int nbytes;
2405 	unsigned int id;
2406 	uint32_t buf_sz;
2407 	uint32_t toeplitz_hash_result;
2408 
2409 	buf_sz = pipe_info->buf_sz;
2410 	if (buf_sz == 0) {
2411 		/* Unused Copy Engine */
2412 		return;
2413 	}
2414 
2415 	hif_state = pipe_info->HIF_CE_state;
2416 	if (!hif_state->started) {
2417 		return;
2418 	}
2419 
2420 	scn = HIF_GET_SOFTC(hif_state);
2421 
2422 	ce_hdl = pipe_info->ce_hdl;
2423 
2424 	while (ce_cancel_send_next
2425 		       (ce_hdl, &per_CE_context,
2426 		       (void **)&netbuf, &CE_data, &nbytes,
2427 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2428 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2429 			/*
2430 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2431 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2432 			 * freed in htt_htc_misc_pkt_pool_free() in
2433 			 * wlantl_close(), so do not free them here again
2434 			 * by checking whether it's the endpoint
2435 			 * which they are queued in.
2436 			 */
2437 			if (id == scn->htc_htt_tx_endpoint)
2438 				return;
2439 			/* Indicate the completion to higher
2440 			 * layer to free the buffer
2441 			 */
2442 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2443 				pipe_info->pipe_callbacks.
2444 				    txCompletionHandler(pipe_info->
2445 					    pipe_callbacks.Context,
2446 					    netbuf, id, toeplitz_hash_result);
2447 		}
2448 	}
2449 }
2450 
2451 /*
2452  * Cleanup residual buffers for device shutdown:
2453  *    buffers that were enqueued for receive
2454  *    buffers that were to be sent
2455  * Note: Buffers that had completed but which were
2456  * not yet processed are on a completion queue. They
2457  * are handled when the completion thread shuts down.
2458  */
2459 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2460 {
2461 	int pipe_num;
2462 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2463 	struct CE_state *ce_state;
2464 
2465 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2466 		struct HIF_CE_pipe_info *pipe_info;
2467 
2468 		ce_state = scn->ce_id_to_state[pipe_num];
2469 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2470 				((ce_state->htt_tx_data) ||
2471 				 (ce_state->htt_rx_data))) {
2472 			continue;
2473 		}
2474 
2475 		pipe_info = &hif_state->pipe_info[pipe_num];
2476 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2477 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2478 	}
2479 }
2480 
2481 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2482 {
2483 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2484 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2485 
2486 	hif_buffer_cleanup(hif_state);
2487 }
2488 
2489 static void hif_destroy_oom_work(struct hif_softc *scn)
2490 {
2491 	struct CE_state *ce_state;
2492 	int ce_id;
2493 
2494 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2495 		ce_state = scn->ce_id_to_state[ce_id];
2496 		if (ce_state)
2497 			qdf_destroy_work(scn->qdf_dev,
2498 					 &ce_state->oom_allocation_work);
2499 	}
2500 }
2501 
2502 void hif_ce_stop(struct hif_softc *scn)
2503 {
2504 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2505 	int pipe_num;
2506 
2507 	/*
2508 	 * before cleaning up any memory, ensure irq &
2509 	 * bottom half contexts will not be re-entered
2510 	 */
2511 	hif_disable_isr(&scn->osc);
2512 	hif_destroy_oom_work(scn);
2513 	scn->hif_init_done = false;
2514 
2515 	/*
2516 	 * At this point, asynchronous threads are stopped,
2517 	 * The Target should not DMA nor interrupt, Host code may
2518 	 * not initiate anything more.  So we just need to clean
2519 	 * up Host-side state.
2520 	 */
2521 
2522 	if (scn->athdiag_procfs_inited) {
2523 		athdiag_procfs_remove();
2524 		scn->athdiag_procfs_inited = false;
2525 	}
2526 
2527 	hif_buffer_cleanup(hif_state);
2528 
2529 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2530 		struct HIF_CE_pipe_info *pipe_info;
2531 		struct CE_attr attr;
2532 		struct CE_handle *ce_diag = hif_state->ce_diag;
2533 
2534 		pipe_info = &hif_state->pipe_info[pipe_num];
2535 		if (pipe_info->ce_hdl) {
2536 			if (pipe_info->ce_hdl != ce_diag) {
2537 				attr = hif_state->host_ce_config[pipe_num];
2538 				if (attr.src_nentries)
2539 					qdf_spinlock_destroy(&pipe_info->
2540 							completion_freeq_lock);
2541 			}
2542 			ce_fini(pipe_info->ce_hdl);
2543 			pipe_info->ce_hdl = NULL;
2544 			pipe_info->buf_sz = 0;
2545 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2546 		}
2547 	}
2548 
2549 	if (hif_state->sleep_timer_init) {
2550 		qdf_timer_stop(&hif_state->sleep_timer);
2551 		qdf_timer_free(&hif_state->sleep_timer);
2552 		hif_state->sleep_timer_init = false;
2553 	}
2554 
2555 	hif_state->started = false;
2556 }
2557 
2558 #ifdef QCN7605_SUPPORT
2559 static inline
2560 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2561 				    **target_shadow_reg_cfg_ret,
2562 				    uint32_t *shadow_cfg_sz_ret)
2563 {
2564 	if (target_shadow_reg_cfg_ret)
2565 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605;
2566 	if (shadow_cfg_sz_ret)
2567 		*shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605);
2568 }
2569 #else
2570 static inline
2571 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2572 				    **target_shadow_reg_cfg_ret,
2573 				    uint32_t *shadow_cfg_sz_ret)
2574 {
2575 	HIF_ERROR("QCN7605 not supported");
2576 }
2577 #endif
2578 
2579 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2580 				   struct shadow_reg_cfg
2581 				   **target_shadow_reg_cfg_ret,
2582 				   uint32_t *shadow_cfg_sz_ret)
2583 {
2584 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2585 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2586 
2587 	switch (tgt_info->target_type) {
2588 	case TARGET_TYPE_QCN7605:
2589 		hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret,
2590 					       shadow_cfg_sz_ret);
2591 		break;
2592 	default:
2593 		if (target_shadow_reg_cfg_ret)
2594 			*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2595 		if (shadow_cfg_sz_ret)
2596 			*shadow_cfg_sz_ret = shadow_cfg_sz;
2597 	}
2598 }
2599 
2600 /**
2601  * hif_get_target_ce_config() - get copy engine configuration
2602  * @target_ce_config_ret: basic copy engine configuration
2603  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2604  * @target_service_to_ce_map_ret: service mapping for the copy engines
2605  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2606  * @target_shadow_reg_cfg_ret: shadow register configuration
2607  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2608  *
2609  * providing accessor to these values outside of this file.
2610  * currently these are stored in static pointers to const sections.
2611  * there are multiple configurations that are selected from at compile time.
2612  * Runtime selection would need to consider mode, target type and bus type.
2613  *
2614  * Return: return by parameter.
2615  */
2616 void hif_get_target_ce_config(struct hif_softc *scn,
2617 		struct CE_pipe_config **target_ce_config_ret,
2618 		uint32_t *target_ce_config_sz_ret,
2619 		struct service_to_pipe **target_service_to_ce_map_ret,
2620 		uint32_t *target_service_to_ce_map_sz_ret,
2621 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2622 		uint32_t *shadow_cfg_sz_ret)
2623 {
2624 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2625 
2626 	*target_ce_config_ret = hif_state->target_ce_config;
2627 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2628 
2629 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2630 				       target_service_to_ce_map_sz_ret);
2631 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2632 			       shadow_cfg_sz_ret);
2633 }
2634 
2635 #ifdef CONFIG_SHADOW_V2
2636 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2637 {
2638 	int i;
2639 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2640 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2641 
2642 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2643 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2644 		     "%s: i %d, val %x", __func__, i,
2645 		     cfg->shadow_reg_v2_cfg[i].addr);
2646 	}
2647 }
2648 
2649 #else
2650 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2651 {
2652 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2653 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2654 }
2655 #endif
2656 
2657 /**
2658  * hif_wlan_enable(): call the platform driver to enable wlan
2659  * @scn: HIF Context
2660  *
2661  * This function passes the con_mode and CE configuration to
2662  * platform driver to enable wlan.
2663  *
2664  * Return: linux error code
2665  */
2666 int hif_wlan_enable(struct hif_softc *scn)
2667 {
2668 	struct pld_wlan_enable_cfg cfg;
2669 	enum pld_driver_mode mode;
2670 	uint32_t con_mode = hif_get_conparam(scn);
2671 
2672 	hif_get_target_ce_config(scn,
2673 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2674 			&cfg.num_ce_tgt_cfg,
2675 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2676 			&cfg.num_ce_svc_pipe_cfg,
2677 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2678 			&cfg.num_shadow_reg_cfg);
2679 
2680 	/* translate from structure size to array size */
2681 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2682 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2683 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2684 
2685 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2686 			      &cfg.num_shadow_reg_v2_cfg);
2687 
2688 	hif_print_hal_shadow_register_cfg(&cfg);
2689 
2690 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2691 		mode = PLD_FTM;
2692 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2693 		mode = PLD_COLDBOOT_CALIBRATION;
2694 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2695 		mode = PLD_EPPING;
2696 	else
2697 		mode = PLD_MISSION;
2698 
2699 	if (BYPASS_QMI)
2700 		return 0;
2701 	else
2702 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2703 				       mode, QWLAN_VERSIONSTR);
2704 }
2705 
2706 #ifdef WLAN_FEATURE_EPPING
2707 
2708 #define CE_EPPING_USES_IRQ true
2709 
2710 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2711 {
2712 	if (CE_EPPING_USES_IRQ)
2713 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2714 	else
2715 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2716 	hif_state->target_ce_config = target_ce_config_wlan_epping;
2717 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2718 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2719 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2720 }
2721 #endif
2722 
2723 #ifdef QCN7605_SUPPORT
2724 static inline
2725 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2726 			       struct HIF_CE_state *hif_state)
2727 {
2728 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
2729 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
2730 	hif_state->target_ce_config_sz =
2731 				 sizeof(target_ce_config_wlan_qcn7605);
2732 	scn->ce_count = QCN7605_CE_COUNT;
2733 }
2734 #else
2735 static inline
2736 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
2737 			       struct HIF_CE_state *hif_state)
2738 {
2739 	HIF_ERROR("QCN7605 not supported");
2740 }
2741 #endif
2742 
2743 /**
2744  * hif_ce_prepare_config() - load the correct static tables.
2745  * @scn: hif context
2746  *
2747  * Epping uses different static attribute tables than mission mode.
2748  */
2749 void hif_ce_prepare_config(struct hif_softc *scn)
2750 {
2751 	uint32_t mode = hif_get_conparam(scn);
2752 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2753 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2754 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2755 
2756 	hif_state->ce_services = ce_services_attach(scn);
2757 
2758 	scn->ce_count = HOST_CE_COUNT;
2759 	/* if epping is enabled we need to use the epping configuration. */
2760 	if (QDF_IS_EPPING_ENABLED(mode)) {
2761 		hif_ce_prepare_epping_config(hif_state);
2762 	}
2763 
2764 	switch (tgt_info->target_type) {
2765 	default:
2766 		hif_state->host_ce_config = host_ce_config_wlan;
2767 		hif_state->target_ce_config = target_ce_config_wlan;
2768 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
2769 		break;
2770 	case TARGET_TYPE_QCN7605:
2771 		hif_set_ce_config_qcn7605(scn, hif_state);
2772 		break;
2773 	case TARGET_TYPE_AR900B:
2774 	case TARGET_TYPE_QCA9984:
2775 	case TARGET_TYPE_IPQ4019:
2776 	case TARGET_TYPE_QCA9888:
2777 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
2778 			hif_state->host_ce_config =
2779 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
2780 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2781 			hif_state->host_ce_config =
2782 				host_lowdesc_ce_cfg_wlan_ar900b;
2783 		} else {
2784 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
2785 		}
2786 
2787 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
2788 		hif_state->target_ce_config_sz =
2789 				sizeof(target_ce_config_wlan_ar900b);
2790 
2791 		break;
2792 
2793 	case TARGET_TYPE_AR9888:
2794 	case TARGET_TYPE_AR9888V2:
2795 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
2796 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
2797 		} else {
2798 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
2799 		}
2800 
2801 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
2802 		hif_state->target_ce_config_sz =
2803 					sizeof(target_ce_config_wlan_ar9888);
2804 
2805 		break;
2806 
2807 	case TARGET_TYPE_QCA8074:
2808 	case TARGET_TYPE_QCA8074V2:
2809 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
2810 			hif_state->host_ce_config =
2811 					host_ce_config_wlan_qca8074_pci;
2812 			hif_state->target_ce_config =
2813 				target_ce_config_wlan_qca8074_pci;
2814 			hif_state->target_ce_config_sz =
2815 				sizeof(target_ce_config_wlan_qca8074_pci);
2816 		} else {
2817 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
2818 			hif_state->target_ce_config =
2819 					target_ce_config_wlan_qca8074;
2820 			hif_state->target_ce_config_sz =
2821 				sizeof(target_ce_config_wlan_qca8074);
2822 		}
2823 		break;
2824 	case TARGET_TYPE_QCA6290:
2825 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
2826 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
2827 		hif_state->target_ce_config_sz =
2828 					sizeof(target_ce_config_wlan_qca6290);
2829 
2830 		scn->ce_count = QCA_6290_CE_COUNT;
2831 		break;
2832 	case TARGET_TYPE_QCA6390:
2833 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
2834 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
2835 		hif_state->target_ce_config_sz =
2836 					sizeof(target_ce_config_wlan_qca6390);
2837 
2838 		scn->ce_count = QCA_6390_CE_COUNT;
2839 		break;
2840 	}
2841 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
2842 }
2843 
2844 /**
2845  * hif_ce_open() - do ce specific allocations
2846  * @hif_sc: pointer to hif context
2847  *
2848  * return: 0 for success or QDF_STATUS_E_NOMEM
2849  */
2850 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
2851 {
2852 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2853 
2854 	qdf_spinlock_create(&hif_state->irq_reg_lock);
2855 	qdf_spinlock_create(&hif_state->keep_awake_lock);
2856 	return QDF_STATUS_SUCCESS;
2857 }
2858 
2859 /**
2860  * hif_ce_close() - do ce specific free
2861  * @hif_sc: pointer to hif context
2862  */
2863 void hif_ce_close(struct hif_softc *hif_sc)
2864 {
2865 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2866 
2867 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
2868 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
2869 }
2870 
2871 /**
2872  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
2873  * @hif_sc: hif context
2874  *
2875  * uses state variables to support cleaning up when hif_config_ce fails.
2876  */
2877 void hif_unconfig_ce(struct hif_softc *hif_sc)
2878 {
2879 	int pipe_num;
2880 	struct HIF_CE_pipe_info *pipe_info;
2881 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2882 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
2883 
2884 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2885 		pipe_info = &hif_state->pipe_info[pipe_num];
2886 		if (pipe_info->ce_hdl) {
2887 			ce_unregister_irq(hif_state, (1 << pipe_num));
2888 		}
2889 	}
2890 	deinit_tasklet_workers(hif_hdl);
2891 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
2892 		pipe_info = &hif_state->pipe_info[pipe_num];
2893 		if (pipe_info->ce_hdl) {
2894 			ce_fini(pipe_info->ce_hdl);
2895 			pipe_info->ce_hdl = NULL;
2896 			pipe_info->buf_sz = 0;
2897 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2898 		}
2899 	}
2900 	if (hif_sc->athdiag_procfs_inited) {
2901 		athdiag_procfs_remove();
2902 		hif_sc->athdiag_procfs_inited = false;
2903 	}
2904 }
2905 
2906 #ifdef CONFIG_BYPASS_QMI
2907 #define FW_SHARED_MEM (2 * 1024 * 1024)
2908 
2909 /**
2910  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
2911  * @scn: pointer to HIF structure
2912  *
2913  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
2914  *
2915  * Return: void
2916  */
2917 static void hif_post_static_buf_to_target(struct hif_softc *scn)
2918 {
2919 	void *target_va;
2920 	phys_addr_t target_pa;
2921 
2922 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2923 				FW_SHARED_MEM, &target_pa);
2924 	if (NULL == target_va) {
2925 		HIF_TRACE("Memory allocation failed could not post target buf");
2926 		return;
2927 	}
2928 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
2929 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
2930 }
2931 #else
2932 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
2933 {
2934 }
2935 #endif
2936 
2937 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
2938 				bool wait_for_it)
2939 {
2940 	/* todo */
2941 	return 0;
2942 }
2943 
2944 /**
2945  * hif_config_ce() - configure copy engines
2946  * @scn: hif context
2947  *
2948  * Prepares fw, copy engine hardware and host sw according
2949  * to the attributes selected by hif_ce_prepare_config.
2950  *
2951  * also calls athdiag_procfs_init
2952  *
2953  * return: 0 for success nonzero for failure.
2954  */
2955 int hif_config_ce(struct hif_softc *scn)
2956 {
2957 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2958 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2959 	struct HIF_CE_pipe_info *pipe_info;
2960 	int pipe_num;
2961 	struct CE_state *ce_state = NULL;
2962 
2963 #ifdef ADRASTEA_SHADOW_REGISTERS
2964 	int i;
2965 #endif
2966 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
2967 
2968 	scn->notice_send = true;
2969 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
2970 
2971 	hif_post_static_buf_to_target(scn);
2972 
2973 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
2974 
2975 	hif_config_rri_on_ddr(scn);
2976 
2977 	if (ce_srng_based(scn))
2978 		scn->bus_ops.hif_target_sleep_state_adjust =
2979 			&hif_srng_sleep_state_adjust;
2980 
2981 	/* Initialise the CE debug history sysfs interface inputs ce_id and
2982 	 * index. Disable data storing
2983 	 */
2984 	reset_ce_debug_history(scn);
2985 
2986 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2987 		struct CE_attr *attr;
2988 
2989 		pipe_info = &hif_state->pipe_info[pipe_num];
2990 		pipe_info->pipe_num = pipe_num;
2991 		pipe_info->HIF_CE_state = hif_state;
2992 		attr = &hif_state->host_ce_config[pipe_num];
2993 
2994 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
2995 		ce_state = scn->ce_id_to_state[pipe_num];
2996 		if (!ce_state) {
2997 			A_TARGET_ACCESS_UNLIKELY(scn);
2998 			goto err;
2999 		}
3000 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3001 		QDF_ASSERT(pipe_info->ce_hdl != NULL);
3002 		if (pipe_info->ce_hdl == NULL) {
3003 			rv = QDF_STATUS_E_FAILURE;
3004 			A_TARGET_ACCESS_UNLIKELY(scn);
3005 			goto err;
3006 		}
3007 
3008 		ce_state->lro_data = qdf_lro_init();
3009 
3010 		if (attr->flags & CE_ATTR_DIAG) {
3011 			/* Reserve the ultimate CE for
3012 			 * Diagnostic Window support
3013 			 */
3014 			hif_state->ce_diag = pipe_info->ce_hdl;
3015 			continue;
3016 		}
3017 
3018 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3019 				(ce_state->htt_rx_data))
3020 			continue;
3021 
3022 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3023 		if (attr->dest_nentries > 0) {
3024 			atomic_set(&pipe_info->recv_bufs_needed,
3025 				   init_buffer_count(attr->dest_nentries - 1));
3026 			/*SRNG based CE has one entry less */
3027 			if (ce_srng_based(scn))
3028 				atomic_dec(&pipe_info->recv_bufs_needed);
3029 		} else {
3030 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3031 		}
3032 		ce_tasklet_init(hif_state, (1 << pipe_num));
3033 		ce_register_irq(hif_state, (1 << pipe_num));
3034 	}
3035 
3036 	if (athdiag_procfs_init(scn) != 0) {
3037 		A_TARGET_ACCESS_UNLIKELY(scn);
3038 		goto err;
3039 	}
3040 	scn->athdiag_procfs_inited = true;
3041 
3042 	HIF_DBG("%s: ce_init done", __func__);
3043 
3044 	init_tasklet_workers(hif_hdl);
3045 
3046 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3047 
3048 #ifdef ADRASTEA_SHADOW_REGISTERS
3049 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3050 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3051 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3052 			  __func__, i,
3053 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3054 	}
3055 #endif
3056 
3057 	return rv != QDF_STATUS_SUCCESS;
3058 
3059 err:
3060 	/* Failure, so clean up */
3061 	hif_unconfig_ce(scn);
3062 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3063 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3064 }
3065 
3066 #ifdef WLAN_FEATURE_FASTPATH
3067 /**
3068  * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
3069  * @handler: Callback funtcion
3070  * @context: handle for callback function
3071  *
3072  * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
3073  */
3074 int hif_ce_fastpath_cb_register(struct hif_opaque_softc *hif_ctx,
3075 				fastpath_msg_handler handler,
3076 				void *context)
3077 {
3078 	struct CE_state *ce_state;
3079 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3080 	int i;
3081 
3082 	if (!scn) {
3083 		HIF_ERROR("%s: scn is NULL", __func__);
3084 		QDF_ASSERT(0);
3085 		return QDF_STATUS_E_FAILURE;
3086 	}
3087 
3088 	if (!scn->fastpath_mode_on) {
3089 		HIF_WARN("%s: Fastpath mode disabled", __func__);
3090 		return QDF_STATUS_E_FAILURE;
3091 	}
3092 
3093 	for (i = 0; i < scn->ce_count; i++) {
3094 		ce_state = scn->ce_id_to_state[i];
3095 		if (ce_state->htt_rx_data) {
3096 			ce_state->fastpath_handler = handler;
3097 			ce_state->context = context;
3098 		}
3099 	}
3100 
3101 	return QDF_STATUS_SUCCESS;
3102 }
3103 qdf_export_symbol(hif_ce_fastpath_cb_register);
3104 #endif
3105 
3106 #ifdef IPA_OFFLOAD
3107 /**
3108  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3109  * @scn: bus context
3110  * @ce_sr_base_paddr: copyengine source ring base physical address
3111  * @ce_sr_ring_size: copyengine source ring size
3112  * @ce_reg_paddr: copyengine register physical address
3113  *
3114  * IPA micro controller data path offload feature enabled,
3115  * HIF should release copy engine related resource information to IPA UC
3116  * IPA UC will access hardware resource with released information
3117  *
3118  * Return: None
3119  */
3120 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3121 			     qdf_shared_mem_t **ce_sr,
3122 			     uint32_t *ce_sr_ring_size,
3123 			     qdf_dma_addr_t *ce_reg_paddr)
3124 {
3125 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3126 	struct HIF_CE_pipe_info *pipe_info =
3127 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3128 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3129 
3130 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3131 			    ce_reg_paddr);
3132 }
3133 #endif /* IPA_OFFLOAD */
3134 
3135 
3136 #ifdef ADRASTEA_SHADOW_REGISTERS
3137 
3138 /*
3139  * Current shadow register config
3140  *
3141  * -----------------------------------------------------------
3142  * Shadow Register      |     CE   |    src/dst write index
3143  * -----------------------------------------------------------
3144  *         0            |     0    |           src
3145  *         1     No Config - Doesn't point to anything
3146  *         2     No Config - Doesn't point to anything
3147  *         3            |     3    |           src
3148  *         4            |     4    |           src
3149  *         5            |     5    |           src
3150  *         6     No Config - Doesn't point to anything
3151  *         7            |     7    |           src
3152  *         8     No Config - Doesn't point to anything
3153  *         9     No Config - Doesn't point to anything
3154  *         10    No Config - Doesn't point to anything
3155  *         11    No Config - Doesn't point to anything
3156  * -----------------------------------------------------------
3157  *         12    No Config - Doesn't point to anything
3158  *         13           |     1    |           dst
3159  *         14           |     2    |           dst
3160  *         15    No Config - Doesn't point to anything
3161  *         16    No Config - Doesn't point to anything
3162  *         17    No Config - Doesn't point to anything
3163  *         18    No Config - Doesn't point to anything
3164  *         19           |     7    |           dst
3165  *         20           |     8    |           dst
3166  *         21    No Config - Doesn't point to anything
3167  *         22    No Config - Doesn't point to anything
3168  *         23    No Config - Doesn't point to anything
3169  * -----------------------------------------------------------
3170  *
3171  *
3172  * ToDo - Move shadow register config to following in the future
3173  * This helps free up a block of shadow registers towards the end.
3174  * Can be used for other purposes
3175  *
3176  * -----------------------------------------------------------
3177  * Shadow Register      |     CE   |    src/dst write index
3178  * -----------------------------------------------------------
3179  *      0            |     0    |           src
3180  *      1            |     3    |           src
3181  *      2            |     4    |           src
3182  *      3            |     5    |           src
3183  *      4            |     7    |           src
3184  * -----------------------------------------------------------
3185  *      5            |     1    |           dst
3186  *      6            |     2    |           dst
3187  *      7            |     7    |           dst
3188  *      8            |     8    |           dst
3189  * -----------------------------------------------------------
3190  *      9     No Config - Doesn't point to anything
3191  *      12    No Config - Doesn't point to anything
3192  *      13    No Config - Doesn't point to anything
3193  *      14    No Config - Doesn't point to anything
3194  *      15    No Config - Doesn't point to anything
3195  *      16    No Config - Doesn't point to anything
3196  *      17    No Config - Doesn't point to anything
3197  *      18    No Config - Doesn't point to anything
3198  *      19    No Config - Doesn't point to anything
3199  *      20    No Config - Doesn't point to anything
3200  *      21    No Config - Doesn't point to anything
3201  *      22    No Config - Doesn't point to anything
3202  *      23    No Config - Doesn't point to anything
3203  * -----------------------------------------------------------
3204 */
3205 #ifndef QCN7605_SUPPORT
3206 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3207 {
3208 	u32 addr = 0;
3209 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3210 
3211 	switch (ce) {
3212 	case 0:
3213 		addr = SHADOW_VALUE0;
3214 		break;
3215 	case 3:
3216 		addr = SHADOW_VALUE3;
3217 		break;
3218 	case 4:
3219 		addr = SHADOW_VALUE4;
3220 		break;
3221 	case 5:
3222 		addr = SHADOW_VALUE5;
3223 		break;
3224 	case 7:
3225 		addr = SHADOW_VALUE7;
3226 		break;
3227 	default:
3228 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3229 		QDF_ASSERT(0);
3230 	}
3231 	return addr;
3232 
3233 }
3234 
3235 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3236 {
3237 	u32 addr = 0;
3238 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3239 
3240 	switch (ce) {
3241 	case 1:
3242 		addr = SHADOW_VALUE13;
3243 		break;
3244 	case 2:
3245 		addr = SHADOW_VALUE14;
3246 		break;
3247 	case 5:
3248 		addr = SHADOW_VALUE17;
3249 		break;
3250 	case 7:
3251 		addr = SHADOW_VALUE19;
3252 		break;
3253 	case 8:
3254 		addr = SHADOW_VALUE20;
3255 		break;
3256 	case 9:
3257 		addr = SHADOW_VALUE21;
3258 		break;
3259 	case 10:
3260 		addr = SHADOW_VALUE22;
3261 		break;
3262 	case 11:
3263 		addr = SHADOW_VALUE23;
3264 		break;
3265 	default:
3266 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3267 		QDF_ASSERT(0);
3268 	}
3269 
3270 	return addr;
3271 
3272 }
3273 #else
3274 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3275 {
3276 	u32 addr = 0;
3277 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3278 
3279 	switch (ce) {
3280 	case 0:
3281 		addr = SHADOW_VALUE0;
3282 		break;
3283 	case 4:
3284 		addr = SHADOW_VALUE4;
3285 		break;
3286 	case 5:
3287 		addr = SHADOW_VALUE5;
3288 		break;
3289 	default:
3290 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3291 		QDF_ASSERT(0);
3292 	}
3293 	return addr;
3294 }
3295 
3296 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3297 {
3298 	u32 addr = 0;
3299 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3300 
3301 	switch (ce) {
3302 	case 1:
3303 		addr = SHADOW_VALUE13;
3304 		break;
3305 	case 2:
3306 		addr = SHADOW_VALUE14;
3307 		break;
3308 	case 3:
3309 		addr = SHADOW_VALUE15;
3310 		break;
3311 	case 5:
3312 		addr = SHADOW_VALUE17;
3313 		break;
3314 	case 7:
3315 		addr = SHADOW_VALUE19;
3316 		break;
3317 	case 8:
3318 		addr = SHADOW_VALUE20;
3319 		break;
3320 	case 9:
3321 		addr = SHADOW_VALUE21;
3322 		break;
3323 	case 10:
3324 		addr = SHADOW_VALUE22;
3325 		break;
3326 	case 11:
3327 		addr = SHADOW_VALUE23;
3328 		break;
3329 	default:
3330 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3331 		QDF_ASSERT(0);
3332 	}
3333 
3334 	return addr;
3335 }
3336 #endif
3337 #endif
3338 
3339 #if defined(FEATURE_LRO)
3340 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3341 {
3342 	struct CE_state *ce_state;
3343 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3344 
3345 	ce_state = scn->ce_id_to_state[ctx_id];
3346 
3347 	return ce_state->lro_data;
3348 }
3349 #endif
3350 
3351 /**
3352  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3353  * this service
3354  * @scn: hif_softc pointer.
3355  * @svc_id: Service ID for which the mapping is needed.
3356  * @ul_pipe: address of the container in which ul pipe is returned.
3357  * @dl_pipe: address of the container in which dl pipe is returned.
3358  * @ul_is_polled: address of the container in which a bool
3359  *			indicating if the UL CE for this service
3360  *			is polled is returned.
3361  * @dl_is_polled: address of the container in which a bool
3362  *			indicating if the DL CE for this service
3363  *			is polled is returned.
3364  *
3365  * Return: Indicates whether the service has been found in the table.
3366  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3367  *         There will be warning logs if either leg has not been updated
3368  *         because it missed the entry in the table (but this is not an err).
3369  */
3370 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3371 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3372 			int *dl_is_polled)
3373 {
3374 	int status = QDF_STATUS_E_INVAL;
3375 	unsigned int i;
3376 	struct service_to_pipe element;
3377 	struct service_to_pipe *tgt_svc_map_to_use;
3378 	uint32_t sz_tgt_svc_map_to_use;
3379 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3380 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3381 	bool dl_updated = false;
3382 	bool ul_updated = false;
3383 
3384 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3385 				       &sz_tgt_svc_map_to_use);
3386 
3387 	*dl_is_polled = 0;  /* polling for received messages not supported */
3388 
3389 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3390 
3391 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3392 		if (element.service_id == svc_id) {
3393 			if (element.pipedir == PIPEDIR_OUT) {
3394 				*ul_pipe = element.pipenum;
3395 				*ul_is_polled =
3396 					(hif_state->host_ce_config[*ul_pipe].flags &
3397 					 CE_ATTR_DISABLE_INTR) != 0;
3398 				ul_updated = true;
3399 			} else if (element.pipedir == PIPEDIR_IN) {
3400 				*dl_pipe = element.pipenum;
3401 				dl_updated = true;
3402 			}
3403 			status = QDF_STATUS_SUCCESS;
3404 		}
3405 	}
3406 	if (ul_updated == false)
3407 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3408 	if (dl_updated == false)
3409 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3410 
3411 	return status;
3412 }
3413 
3414 #ifdef SHADOW_REG_DEBUG
3415 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3416 		uint32_t CE_ctrl_addr)
3417 {
3418 	uint32_t read_from_hw, srri_from_ddr = 0;
3419 
3420 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3421 
3422 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3423 
3424 	if (read_from_hw != srri_from_ddr) {
3425 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3426 		       __func__, srri_from_ddr, read_from_hw,
3427 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3428 		QDF_ASSERT(0);
3429 	}
3430 	return srri_from_ddr;
3431 }
3432 
3433 
3434 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3435 		uint32_t CE_ctrl_addr)
3436 {
3437 	uint32_t read_from_hw, drri_from_ddr = 0;
3438 
3439 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3440 
3441 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3442 
3443 	if (read_from_hw != drri_from_ddr) {
3444 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3445 		       drri_from_ddr, read_from_hw,
3446 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3447 		QDF_ASSERT(0);
3448 	}
3449 	return drri_from_ddr;
3450 }
3451 
3452 #endif
3453 
3454 #ifdef ADRASTEA_RRI_ON_DDR
3455 /**
3456  * hif_get_src_ring_read_index(): Called to get the SRRI
3457  *
3458  * @scn: hif_softc pointer
3459  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3460  *
3461  * This function returns the SRRI to the caller. For CEs that
3462  * dont have interrupts enabled, we look at the DDR based SRRI
3463  *
3464  * Return: SRRI
3465  */
3466 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
3467 		uint32_t CE_ctrl_addr)
3468 {
3469 	struct CE_attr attr;
3470 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3471 
3472 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3473 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3474 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3475 	} else {
3476 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3477 			return A_TARGET_READ(scn,
3478 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
3479 		else
3480 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
3481 					CE_ctrl_addr);
3482 	}
3483 }
3484 
3485 /**
3486  * hif_get_dst_ring_read_index(): Called to get the DRRI
3487  *
3488  * @scn: hif_softc pointer
3489  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
3490  *
3491  * This function returns the DRRI to the caller. For CEs that
3492  * dont have interrupts enabled, we look at the DDR based DRRI
3493  *
3494  * Return: DRRI
3495  */
3496 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
3497 		uint32_t CE_ctrl_addr)
3498 {
3499 	struct CE_attr attr;
3500 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3501 
3502 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
3503 
3504 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
3505 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
3506 	} else {
3507 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
3508 			return A_TARGET_READ(scn,
3509 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
3510 		else
3511 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
3512 					CE_ctrl_addr);
3513 	}
3514 }
3515 
3516 /**
3517  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3518  *
3519  * @scn: hif_softc pointer
3520  *
3521  * This function allocates non cached memory on ddr and sends
3522  * the physical address of this memory to the CE hardware. The
3523  * hardware updates the RRI on this particular location.
3524  *
3525  * Return: None
3526  */
3527 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3528 {
3529 	unsigned int i;
3530 	qdf_dma_addr_t paddr_rri_on_ddr;
3531 	uint32_t high_paddr, low_paddr;
3532 
3533 	scn->vaddr_rri_on_ddr =
3534 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
3535 		scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
3536 		&paddr_rri_on_ddr);
3537 
3538 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
3539 	low_paddr  = BITS0_TO_31(paddr_rri_on_ddr);
3540 	high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
3541 
3542 	HIF_DBG("%s using srri and drri from DDR", __func__);
3543 
3544 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
3545 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
3546 
3547 	for (i = 0; i < CE_COUNT; i++)
3548 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
3549 
3550 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
3551 
3552 }
3553 #else
3554 
3555 /**
3556  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
3557  *
3558  * @scn: hif_softc pointer
3559  *
3560  * This is a dummy implementation for platforms that don't
3561  * support this functionality.
3562  *
3563  * Return: None
3564  */
3565 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
3566 {
3567 }
3568 #endif
3569 
3570 /**
3571  * hif_dump_ce_registers() - dump ce registers
3572  * @scn: hif_opaque_softc pointer.
3573  *
3574  * Output the copy engine registers
3575  *
3576  * Return: 0 for success or error code
3577  */
3578 int hif_dump_ce_registers(struct hif_softc *scn)
3579 {
3580 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3581 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3582 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3583 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3584 	uint16_t i;
3585 	QDF_STATUS status;
3586 
3587 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3588 		if (scn->ce_id_to_state[i] == NULL) {
3589 			HIF_DBG("CE%d not used.", i);
3590 			continue;
3591 		}
3592 
3593 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3594 					   (uint8_t *) &ce_reg_values[0],
3595 					   ce_reg_word_size * sizeof(uint32_t));
3596 
3597 		if (status != QDF_STATUS_SUCCESS) {
3598 			HIF_ERROR("Dumping CE register failed!");
3599 			return -EACCES;
3600 		}
3601 		HIF_ERROR("CE%d=>\n", i);
3602 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3603 				   (uint8_t *) &ce_reg_values[0],
3604 				   ce_reg_word_size * sizeof(uint32_t));
3605 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3606 				+ SR_WR_INDEX_ADDRESS),
3607 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3608 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3609 				+ CURRENT_SRRI_ADDRESS),
3610 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3611 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3612 				+ DST_WR_INDEX_ADDRESS),
3613 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3614 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3615 				+ CURRENT_DRRI_ADDRESS),
3616 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3617 		qdf_print("---");
3618 	}
3619 	return 0;
3620 }
3621 qdf_export_symbol(hif_dump_ce_registers);
3622 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3623 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3624 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3625 {
3626 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3627 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3628 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3629 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3630 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3631 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3632 	struct CE_ring_state *src_ring = ce_state->src_ring;
3633 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3634 
3635 	if (src_ring) {
3636 		hif_info->ul_pipe.nentries = src_ring->nentries;
3637 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3638 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3639 		hif_info->ul_pipe.write_index = src_ring->write_index;
3640 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3641 		hif_info->ul_pipe.base_addr_CE_space =
3642 			src_ring->base_addr_CE_space;
3643 		hif_info->ul_pipe.base_addr_owner_space =
3644 			src_ring->base_addr_owner_space;
3645 	}
3646 
3647 
3648 	if (dest_ring) {
3649 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3650 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3651 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3652 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3653 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3654 		hif_info->dl_pipe.base_addr_CE_space =
3655 			dest_ring->base_addr_CE_space;
3656 		hif_info->dl_pipe.base_addr_owner_space =
3657 			dest_ring->base_addr_owner_space;
3658 	}
3659 
3660 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3661 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3662 
3663 	return hif_info;
3664 }
3665 qdf_export_symbol(hif_get_addl_pipe_info);
3666 
3667 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3668 {
3669 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3670 
3671 	scn->nss_wifi_ol_mode = mode;
3672 	return 0;
3673 }
3674 qdf_export_symbol(hif_set_nss_wifiol_mode);
3675 #endif
3676 
3677 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3678 {
3679 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3680 	scn->hif_attribute = hif_attrib;
3681 }
3682 
3683 
3684 /* disable interrupts (only applicable for legacy copy engine currently */
3685 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3686 {
3687 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3688 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3689 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3690 
3691 	Q_TARGET_ACCESS_BEGIN(scn);
3692 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3693 	Q_TARGET_ACCESS_END(scn);
3694 }
3695 qdf_export_symbol(hif_disable_interrupt);
3696 
3697 /**
3698  * hif_fw_event_handler() - hif fw event handler
3699  * @hif_state: pointer to hif ce state structure
3700  *
3701  * Process fw events and raise HTC callback to process fw events.
3702  *
3703  * Return: none
3704  */
3705 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3706 {
3707 	struct hif_msg_callbacks *msg_callbacks =
3708 		&hif_state->msg_callbacks_current;
3709 
3710 	if (!msg_callbacks->fwEventHandler)
3711 		return;
3712 
3713 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3714 			QDF_STATUS_E_FAILURE);
3715 }
3716 
3717 #ifndef QCA_WIFI_3_0
3718 /**
3719  * hif_fw_interrupt_handler() - FW interrupt handler
3720  * @irq: irq number
3721  * @arg: the user pointer
3722  *
3723  * Called from the PCI interrupt handler when a
3724  * firmware-generated interrupt to the Host.
3725  *
3726  * only registered for legacy ce devices
3727  *
3728  * Return: status of handled irq
3729  */
3730 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3731 {
3732 	struct hif_softc *scn = arg;
3733 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3734 	uint32_t fw_indicator_address, fw_indicator;
3735 
3736 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3737 		return ATH_ISR_NOSCHED;
3738 
3739 	fw_indicator_address = hif_state->fw_indicator_address;
3740 	/* For sudden unplug this will return ~0 */
3741 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3742 
3743 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3744 		/* ACK: clear Target-side pending event */
3745 		A_TARGET_WRITE(scn, fw_indicator_address,
3746 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3747 		if (Q_TARGET_ACCESS_END(scn) < 0)
3748 			return ATH_ISR_SCHED;
3749 
3750 		if (hif_state->started) {
3751 			hif_fw_event_handler(hif_state);
3752 		} else {
3753 			/*
3754 			 * Probable Target failure before we're prepared
3755 			 * to handle it.  Generally unexpected.
3756 			 * fw_indicator used as bitmap, and defined as below:
3757 			 *     FW_IND_EVENT_PENDING    0x1
3758 			 *     FW_IND_INITIALIZED      0x2
3759 			 *     FW_IND_NEEDRECOVER      0x4
3760 			 */
3761 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3762 				("%s: Early firmware event indicated 0x%x\n",
3763 				 __func__, fw_indicator));
3764 		}
3765 	} else {
3766 		if (Q_TARGET_ACCESS_END(scn) < 0)
3767 			return ATH_ISR_SCHED;
3768 	}
3769 
3770 	return ATH_ISR_SCHED;
3771 }
3772 #else
3773 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3774 {
3775 	return ATH_ISR_SCHED;
3776 }
3777 #endif /* #ifdef QCA_WIFI_3_0 */
3778 
3779 
3780 /**
3781  * hif_wlan_disable(): call the platform driver to disable wlan
3782  * @scn: HIF Context
3783  *
3784  * This function passes the con_mode to platform driver to disable
3785  * wlan.
3786  *
3787  * Return: void
3788  */
3789 void hif_wlan_disable(struct hif_softc *scn)
3790 {
3791 	enum pld_driver_mode mode;
3792 	uint32_t con_mode = hif_get_conparam(scn);
3793 
3794 	if (scn->target_status == TARGET_STATUS_RESET)
3795 		return;
3796 
3797 	if (QDF_GLOBAL_FTM_MODE == con_mode)
3798 		mode = PLD_FTM;
3799 	else if (QDF_IS_EPPING_ENABLED(con_mode))
3800 		mode = PLD_EPPING;
3801 	else
3802 		mode = PLD_MISSION;
3803 
3804 	pld_wlan_disable(scn->qdf_dev->dev, mode);
3805 }
3806 
3807 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
3808 {
3809 	QDF_STATUS status;
3810 	uint8_t ul_pipe, dl_pipe;
3811 	int ul_is_polled, dl_is_polled;
3812 
3813 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
3814 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
3815 					 HTC_CTRL_RSVD_SVC,
3816 					 &ul_pipe, &dl_pipe,
3817 					 &ul_is_polled, &dl_is_polled);
3818 	if (status) {
3819 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
3820 		return qdf_status_to_os_return(status);
3821 	}
3822 
3823 	*ce_id = dl_pipe;
3824 
3825 	return 0;
3826 }
3827