xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision ad85c389289a03e320cd08dea21861f9857892fc)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #ifndef CONFIG_WIN
41 #include "qwlan_version.h"
42 #endif
43 #include "qdf_module.h"
44 
45 #define CE_POLL_TIMEOUT 10      /* ms */
46 
47 #define AGC_DUMP         1
48 #define CHANINFO_DUMP    2
49 #define BB_WATCHDOG_DUMP 3
50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51 #define PCIE_ACCESS_DUMP 4
52 #endif
53 #include "mp_dev.h"
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290)) && \
56 	!defined(QCA_WIFI_SUPPORT_SRNG)
57 #define QCA_WIFI_SUPPORT_SRNG
58 #endif
59 
60 /* Forward references */
61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
62 
63 /*
64  * Fix EV118783, poll to check whether a BMI response comes
65  * other than waiting for the interruption which may be lost.
66  */
67 /* #define BMI_RSP_POLLING */
68 #define BMI_RSP_TO_MILLISEC  1000
69 
70 #ifdef CONFIG_BYPASS_QMI
71 #define BYPASS_QMI 1
72 #else
73 #define BYPASS_QMI 0
74 #endif
75 
76 #ifdef CONFIG_WIN
77 #if ENABLE_10_4_FW_HDR
78 #define WDI_IPA_SERVICE_GROUP 5
79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
82 #endif /* ENABLE_10_4_FW_HDR */
83 #endif
84 
85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
86 static void hif_config_rri_on_ddr(struct hif_softc *scn);
87 
88 /**
89  * hif_target_access_log_dump() - dump access log
90  *
91  * dump access log
92  *
93  * Return: n/a
94  */
95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96 static void hif_target_access_log_dump(void)
97 {
98 	hif_target_dump_access_log();
99 }
100 #endif
101 
102 
103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 		      uint8_t cmd_id, bool start)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	switch (cmd_id) {
109 	case AGC_DUMP:
110 		if (start)
111 			priv_start_agc(scn);
112 		else
113 			priv_dump_agc(scn);
114 		break;
115 	case CHANINFO_DUMP:
116 		if (start)
117 			priv_start_cap_chaninfo(scn);
118 		else
119 			priv_dump_chaninfo(scn);
120 		break;
121 	case BB_WATCHDOG_DUMP:
122 		priv_dump_bbwatchdog(scn);
123 		break;
124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 	case PCIE_ACCESS_DUMP:
126 		hif_target_access_log_dump();
127 		break;
128 #endif
129 	default:
130 		HIF_ERROR("%s: Invalid htc dump command", __func__);
131 		break;
132 	}
133 }
134 
135 static void ce_poll_timeout(void *arg)
136 {
137 	struct CE_state *CE_state = (struct CE_state *)arg;
138 
139 	if (CE_state->timer_inited) {
140 		ce_per_engine_service(CE_state->scn, CE_state->id);
141 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
142 	}
143 }
144 
145 static unsigned int roundup_pwr2(unsigned int n)
146 {
147 	int i;
148 	unsigned int test_pwr2;
149 
150 	if (!(n & (n - 1)))
151 		return n; /* already a power of 2 */
152 
153 	test_pwr2 = 4;
154 	for (i = 0; i < 29; i++) {
155 		if (test_pwr2 > n)
156 			return test_pwr2;
157 		test_pwr2 = test_pwr2 << 1;
158 	}
159 
160 	QDF_ASSERT(0); /* n too large */
161 	return 0;
162 }
163 
164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166 
167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
177 #ifdef QCA_WIFI_3_0_ADRASTEA
178 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
181 #endif
182 };
183 
184 #ifdef QCN7605_SUPPORT
185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194 };
195 #endif
196 
197 #ifdef WLAN_FEATURE_EPPING
198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208 };
209 #endif
210 
211 /* CE_PCI TABLE */
212 /*
213  * NOTE: the table below is out of date, though still a useful reference.
214  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215  * mapping of HTC services to HIF pipes.
216  */
217 /*
218  * This authoritative table defines Copy Engine configuration and the mapping
219  * of services/endpoints to CEs.  A subset of this information is passed to
220  * the Target during startup as a prerequisite to entering BMI phase.
221  * See:
222  *    target_service_to_ce_map - Target-side mapping
223  *    hif_map_service_to_pipe      - Host-side mapping
224  *    target_ce_config         - Target-side configuration
225  *    host_ce_config           - Host-side configuration
226    ============================================================================
227    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
228  |                      |      | ctio | Size     | Frequency
229  |                      |      | n    |          |
230    ============================================================================
231    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
232    descriptor |                      |      |      | O(100B)  | and regular
233    download   |                      |      |      |          |
234    ----------------------------------------------------------------------------
235    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
236    indication |                      |      |      | O(10B)   | regular
237    upload     |                      |      |      |          |
238    ----------------------------------------------------------------------------
239    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
240    upload     |                      |      |      | O(1000B) | (frequent
241    e.g. noise |                      |      |      |          | during IP1.0
242    packets    |                      |      |      |          | testing)
243    ----------------------------------------------------------------------------
244    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
245    download   |                      |      |      | O(1000B) | (frequent
246    e.g.       |                      |      |      |          | during IP1.0
247    misdirecte |                      |      |      |          | testing)
248    d EAPOL    |                      |      |      |          |
249    packets    |                      |      |      |          |
250    ----------------------------------------------------------------------------
251    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
252  | DATA_VO (uplink)     |      |      |          |
253    ----------------------------------------------------------------------------
254    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
255  | DATA_VO (downlink)   |      |      |          |
256    ----------------------------------------------------------------------------
257    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258  |                      |      |      | O(100B)  |
259    ----------------------------------------------------------------------------
260    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
261    messages   | (downlink)           |      |      | O(100B)  |
262  |                      |      |      |          |
263    ----------------------------------------------------------------------------
264    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
265  | HTC_RAW_STREAMS      |      |      |          |
266  | (uplink)             |      |      |          |
267    ----------------------------------------------------------------------------
268    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
269  | HTC_RAW_STREAMS      |      |      |          |
270  | (downlink)           |      |      |          |
271    ----------------------------------------------------------------------------
272    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
273  |                      |      |      |          | infrequent
274    ============================================================================
275  */
276 
277 /*
278  * Map from service/endpoint to Copy Engine.
279  * This table is derived from the CE_PCI TABLE, above.
280  * It is passed to the Target at startup for use by firmware.
281  */
282 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 	{
284 		WMI_DATA_VO_SVC,
285 		PIPEDIR_OUT,    /* out = UL = host -> target */
286 		3,
287 	},
288 	{
289 		WMI_DATA_VO_SVC,
290 		PIPEDIR_IN,     /* in = DL = target -> host */
291 		2,
292 	},
293 	{
294 		WMI_DATA_BK_SVC,
295 		PIPEDIR_OUT,    /* out = UL = host -> target */
296 		3,
297 	},
298 	{
299 		WMI_DATA_BK_SVC,
300 		PIPEDIR_IN,     /* in = DL = target -> host */
301 		2,
302 	},
303 	{
304 		WMI_DATA_BE_SVC,
305 		PIPEDIR_OUT,    /* out = UL = host -> target */
306 		3,
307 	},
308 	{
309 		WMI_DATA_BE_SVC,
310 		PIPEDIR_IN,     /* in = DL = target -> host */
311 		2,
312 	},
313 	{
314 		WMI_DATA_VI_SVC,
315 		PIPEDIR_OUT,    /* out = UL = host -> target */
316 		3,
317 	},
318 	{
319 		WMI_DATA_VI_SVC,
320 		PIPEDIR_IN,     /* in = DL = target -> host */
321 		2,
322 	},
323 	{
324 		WMI_CONTROL_SVC,
325 		PIPEDIR_OUT,    /* out = UL = host -> target */
326 		3,
327 	},
328 	{
329 		WMI_CONTROL_SVC,
330 		PIPEDIR_IN,     /* in = DL = target -> host */
331 		2,
332 	},
333 	{
334 		HTC_CTRL_RSVD_SVC,
335 		PIPEDIR_OUT,    /* out = UL = host -> target */
336 		0,              /* could be moved to 3 (share with WMI) */
337 	},
338 	{
339 		HTC_CTRL_RSVD_SVC,
340 		PIPEDIR_IN,     /* in = DL = target -> host */
341 		2,
342 	},
343 	{
344 		HTC_RAW_STREAMS_SVC, /* not currently used */
345 		PIPEDIR_OUT,    /* out = UL = host -> target */
346 		0,
347 	},
348 	{
349 		HTC_RAW_STREAMS_SVC, /* not currently used */
350 		PIPEDIR_IN,     /* in = DL = target -> host */
351 		2,
352 	},
353 	{
354 		HTT_DATA_MSG_SVC,
355 		PIPEDIR_OUT,    /* out = UL = host -> target */
356 		4,
357 	},
358 	{
359 		HTT_DATA_MSG_SVC,
360 		PIPEDIR_IN,     /* in = DL = target -> host */
361 		1,
362 	},
363 	{
364 		WDI_IPA_TX_SVC,
365 		PIPEDIR_OUT,    /* in = DL = target -> host */
366 		5,
367 	},
368 #if defined(QCA_WIFI_3_0_ADRASTEA)
369 	{
370 		HTT_DATA2_MSG_SVC,
371 		PIPEDIR_IN,    /* in = DL = target -> host */
372 		9,
373 	},
374 	{
375 		HTT_DATA3_MSG_SVC,
376 		PIPEDIR_IN,    /* in = DL = target -> host */
377 		10,
378 	},
379 	{
380 		PACKET_LOG_SVC,
381 		PIPEDIR_IN,    /* in = DL = target -> host */
382 		11,
383 	},
384 #endif
385 	/* (Additions here) */
386 
387 	{                       /* Must be last */
388 		0,
389 		0,
390 		0,
391 	},
392 };
393 
394 /* PIPEDIR_OUT = HOST to Target */
395 /* PIPEDIR_IN  = TARGET to HOST */
396 #if (defined(QCA_WIFI_QCA8074))
397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
416 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
417 	/* (Additions here) */
418 	{ 0, 0, 0, },
419 };
420 #else
421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422 };
423 #endif
424 
425 #if (defined(QCA_WIFI_QCA8074V2))
426 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
427 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
428 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
429 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
430 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
431 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
432 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
433 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
434 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
435 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
436 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
437 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
440 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
441 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
442 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
443 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
444 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
445 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
446 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
447 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
448 	/* (Additions here) */
449 	{ 0, 0, 0, },
450 };
451 #else
452 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
453 };
454 #endif
455 
456 #if (defined(QCA_WIFI_QCA6018))
457 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
458 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
459 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
460 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
461 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
462 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
463 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
464 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
465 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
466 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
467 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
468 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
469 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
470 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
472 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
473 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
474 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
475 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
476 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
477 	/* (Additions here) */
478 	{ 0, 0, 0, },
479 };
480 #else
481 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
482 };
483 #endif
484 
485 /* PIPEDIR_OUT = HOST to Target */
486 /* PIPEDIR_IN  = TARGET to HOST */
487 #ifdef QCN7605_SUPPORT
488 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
489 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
490 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
491 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
492 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
493 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
494 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
495 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
496 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
497 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
498 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
499 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
500 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
501 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
502 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
503 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
504 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
505 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
506 #ifdef IPA_OFFLOAD
507 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
508 #else
509 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
510 #endif
511 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
512 	/* (Additions here) */
513 	{ 0, 0, 0, },
514 };
515 #endif
516 
517 #if (defined(QCA_WIFI_QCA6290))
518 #ifdef CONFIG_WIN
519 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
520 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
521 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
522 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
523 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
524 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
525 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
526 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
527 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
528 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
529 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
530 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
531 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
532 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
533 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
534 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
535 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
536 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
537 	/* (Additions here) */
538 	{ 0, 0, 0, },
539 };
540 #else
541 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
542 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
543 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
544 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
545 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
546 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
547 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
548 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
549 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
550 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
551 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
552 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
553 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
554 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
555 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
556 	/* (Additions here) */
557 	{ 0, 0, 0, },
558 };
559 #endif
560 #else
561 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
562 };
563 #endif
564 
565 #if (defined(QCA_WIFI_QCA6390))
566 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
567 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
568 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
569 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
570 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
571 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
572 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
573 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
574 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
575 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
576 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
577 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
578 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
579 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
580 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
581 	/* (Additions here) */
582 	{ 0, 0, 0, },
583 };
584 #else
585 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
586 };
587 #endif
588 
589 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
590 	{
591 		WMI_DATA_VO_SVC,
592 		PIPEDIR_OUT,    /* out = UL = host -> target */
593 		3,
594 	},
595 	{
596 		WMI_DATA_VO_SVC,
597 		PIPEDIR_IN,     /* in = DL = target -> host */
598 		2,
599 	},
600 	{
601 		WMI_DATA_BK_SVC,
602 		PIPEDIR_OUT,    /* out = UL = host -> target */
603 		3,
604 	},
605 	{
606 		WMI_DATA_BK_SVC,
607 		PIPEDIR_IN,     /* in = DL = target -> host */
608 		2,
609 	},
610 	{
611 		WMI_DATA_BE_SVC,
612 		PIPEDIR_OUT,    /* out = UL = host -> target */
613 		3,
614 	},
615 	{
616 		WMI_DATA_BE_SVC,
617 		PIPEDIR_IN,     /* in = DL = target -> host */
618 		2,
619 	},
620 	{
621 		WMI_DATA_VI_SVC,
622 		PIPEDIR_OUT,    /* out = UL = host -> target */
623 		3,
624 	},
625 	{
626 		WMI_DATA_VI_SVC,
627 		PIPEDIR_IN,     /* in = DL = target -> host */
628 		2,
629 	},
630 	{
631 		WMI_CONTROL_SVC,
632 		PIPEDIR_OUT,    /* out = UL = host -> target */
633 		3,
634 	},
635 	{
636 		WMI_CONTROL_SVC,
637 		PIPEDIR_IN,     /* in = DL = target -> host */
638 		2,
639 	},
640 	{
641 		HTC_CTRL_RSVD_SVC,
642 		PIPEDIR_OUT,    /* out = UL = host -> target */
643 		0,              /* could be moved to 3 (share with WMI) */
644 	},
645 	{
646 		HTC_CTRL_RSVD_SVC,
647 		PIPEDIR_IN,     /* in = DL = target -> host */
648 		1,
649 	},
650 	{
651 		HTC_RAW_STREAMS_SVC, /* not currently used */
652 		PIPEDIR_OUT,    /* out = UL = host -> target */
653 		0,
654 	},
655 	{
656 		HTC_RAW_STREAMS_SVC, /* not currently used */
657 		PIPEDIR_IN,     /* in = DL = target -> host */
658 		1,
659 	},
660 	{
661 		HTT_DATA_MSG_SVC,
662 		PIPEDIR_OUT,    /* out = UL = host -> target */
663 		4,
664 	},
665 #ifdef WLAN_FEATURE_FASTPATH
666 	{
667 		HTT_DATA_MSG_SVC,
668 		PIPEDIR_IN,     /* in = DL = target -> host */
669 		5,
670 	},
671 #else /* WLAN_FEATURE_FASTPATH */
672 	{
673 		HTT_DATA_MSG_SVC,
674 		PIPEDIR_IN,  /* in = DL = target -> host */
675 		1,
676 	},
677 #endif /* WLAN_FEATURE_FASTPATH */
678 
679 	/* (Additions here) */
680 
681 	{                       /* Must be last */
682 		0,
683 		0,
684 		0,
685 	},
686 };
687 
688 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
689 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
690 
691 #ifdef WLAN_FEATURE_EPPING
692 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
693 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
694 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
695 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
696 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
697 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
698 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
699 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
700 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
701 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
702 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
703 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
704 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
705 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
706 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
707 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
708 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
709 	{0, 0, 0,},             /* Must be last */
710 };
711 
712 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
713 					   **tgt_svc_map_to_use,
714 					   uint32_t *sz_tgt_svc_map_to_use)
715 {
716 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
717 	*sz_tgt_svc_map_to_use =
718 			sizeof(target_service_to_ce_map_wlan_epping);
719 }
720 #endif
721 
722 #ifdef QCN7605_SUPPORT
723 static inline
724 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
725 			       uint32_t *sz_tgt_svc_map_to_use)
726 {
727 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
728 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
729 }
730 #else
731 static inline
732 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
733 			       uint32_t *sz_tgt_svc_map_to_use)
734 {
735 	HIF_ERROR("%s: QCN7605 not supported", __func__);
736 }
737 #endif
738 
739 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
740 				    struct service_to_pipe **tgt_svc_map_to_use,
741 				    uint32_t *sz_tgt_svc_map_to_use)
742 {
743 	uint32_t mode = hif_get_conparam(scn);
744 	struct hif_target_info *tgt_info = &scn->target_info;
745 
746 	if (QDF_IS_EPPING_ENABLED(mode)) {
747 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
748 						      sz_tgt_svc_map_to_use);
749 	} else {
750 		switch (tgt_info->target_type) {
751 		default:
752 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
753 			*sz_tgt_svc_map_to_use =
754 				sizeof(target_service_to_ce_map_wlan);
755 			break;
756 		case TARGET_TYPE_QCN7605:
757 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
758 						  sz_tgt_svc_map_to_use);
759 			break;
760 		case TARGET_TYPE_AR900B:
761 		case TARGET_TYPE_QCA9984:
762 		case TARGET_TYPE_IPQ4019:
763 		case TARGET_TYPE_QCA9888:
764 		case TARGET_TYPE_AR9888:
765 		case TARGET_TYPE_AR9888V2:
766 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
767 			*sz_tgt_svc_map_to_use =
768 				sizeof(target_service_to_ce_map_ar900b);
769 			break;
770 		case TARGET_TYPE_QCA6290:
771 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
772 			*sz_tgt_svc_map_to_use =
773 				sizeof(target_service_to_ce_map_qca6290);
774 			break;
775 		case TARGET_TYPE_QCA6390:
776 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
777 			*sz_tgt_svc_map_to_use =
778 				sizeof(target_service_to_ce_map_qca6390);
779 			break;
780 		case TARGET_TYPE_QCA8074:
781 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
782 			*sz_tgt_svc_map_to_use =
783 				sizeof(target_service_to_ce_map_qca8074);
784 			break;
785 		case TARGET_TYPE_QCA8074V2:
786 			*tgt_svc_map_to_use =
787 				target_service_to_ce_map_qca8074_v2;
788 			*sz_tgt_svc_map_to_use =
789 				sizeof(target_service_to_ce_map_qca8074_v2);
790 			break;
791 		case TARGET_TYPE_QCA6018:
792 			*tgt_svc_map_to_use =
793 				target_service_to_ce_map_qca6018;
794 			*sz_tgt_svc_map_to_use =
795 				sizeof(target_service_to_ce_map_qca6018);
796 			break;
797 		}
798 	}
799 }
800 
801 /**
802  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
803  * @ce_state : pointer to the state context of the CE
804  *
805  * Description:
806  *   Sets htt_rx_data attribute of the state structure if the
807  *   CE serves one of the HTT DATA services.
808  *
809  * Return:
810  *  false (attribute set to false)
811  *  true  (attribute set to true);
812  */
813 static bool ce_mark_datapath(struct CE_state *ce_state)
814 {
815 	struct service_to_pipe *svc_map;
816 	uint32_t map_sz, map_len;
817 	int    i;
818 	bool   rc = false;
819 
820 	if (ce_state != NULL) {
821 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
822 					       &map_sz);
823 
824 		map_len = map_sz / sizeof(struct service_to_pipe);
825 		for (i = 0; i < map_len; i++) {
826 			if ((svc_map[i].pipenum == ce_state->id) &&
827 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
828 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
829 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
830 				/* HTT CEs are unidirectional */
831 				if (svc_map[i].pipedir == PIPEDIR_IN)
832 					ce_state->htt_rx_data = true;
833 				else
834 					ce_state->htt_tx_data = true;
835 				rc = true;
836 			}
837 		}
838 	}
839 	return rc;
840 }
841 
842 /**
843  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
844  * @ce_id: ce in question
845  * @ring: ring state being examined
846  * @type: "src_ring" or "dest_ring" string for identifying the ring
847  *
848  * Warns on non-zero index values.
849  * Causes a kernel panic if the ring is not empty durring initialization.
850  */
851 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
852 					 char *type)
853 {
854 	if (ring->write_index != 0 || ring->sw_index != 0)
855 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
856 			  ce_id, type, ring->sw_index, ring->write_index);
857 	if (ring->write_index != ring->sw_index)
858 		QDF_BUG(0);
859 }
860 
861 #ifdef IPA_OFFLOAD
862 /**
863  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
864  * @scn: softc instance
865  * @ce_id: ce in question
866  * @base_addr: pointer to copyengine ring base address
867  * @ce_ring: copyengine instance
868  * @nentries: number of entries should be allocated
869  * @desc_size: ce desc size
870  *
871  * Return: QDF_STATUS_SUCCESS - for success
872  */
873 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
874 				     qdf_dma_addr_t *base_addr,
875 				     struct CE_ring_state *ce_ring,
876 				     unsigned int nentries, uint32_t desc_size)
877 {
878 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
879 	    !ce_srng_based(scn)) {
880 		if (!scn->ipa_ce_ring) {
881 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
882 				scn->qdf_dev,
883 				nentries * desc_size + CE_DESC_RING_ALIGN);
884 			if (!scn->ipa_ce_ring) {
885 				HIF_ERROR(
886 				"%s: Failed to allocate memory for IPA ce ring",
887 				__func__);
888 				return QDF_STATUS_E_NOMEM;
889 			}
890 		}
891 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
892 						&scn->ipa_ce_ring->mem_info);
893 		ce_ring->base_addr_owner_space_unaligned =
894 						scn->ipa_ce_ring->vaddr;
895 	} else {
896 		ce_ring->base_addr_owner_space_unaligned =
897 			qdf_mem_alloc_consistent(scn->qdf_dev,
898 						 scn->qdf_dev->dev,
899 						 (nentries * desc_size +
900 						 CE_DESC_RING_ALIGN),
901 						 base_addr);
902 		if (!ce_ring->base_addr_owner_space_unaligned) {
903 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
904 				  __func__, CE_id);
905 			return QDF_STATUS_E_NOMEM;
906 		}
907 	}
908 	return QDF_STATUS_SUCCESS;
909 }
910 
911 /**
912  * ce_free_desc_ring() - Frees copyengine descriptor ring
913  * @scn: softc instance
914  * @ce_id: ce in question
915  * @ce_ring: copyengine instance
916  * @desc_size: ce desc size
917  *
918  * Return: None
919  */
920 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
921 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
922 {
923 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
924 	    !ce_srng_based(scn)) {
925 		if (scn->ipa_ce_ring) {
926 			qdf_mem_shared_mem_free(scn->qdf_dev,
927 						scn->ipa_ce_ring);
928 			scn->ipa_ce_ring = NULL;
929 		}
930 		ce_ring->base_addr_owner_space_unaligned = NULL;
931 	} else {
932 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
933 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
934 			ce_ring->base_addr_owner_space_unaligned,
935 			ce_ring->base_addr_CE_space, 0);
936 		ce_ring->base_addr_owner_space_unaligned = NULL;
937 	}
938 }
939 #else
940 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
941 				     qdf_dma_addr_t *base_addr,
942 				     struct CE_ring_state *ce_ring,
943 				     unsigned int nentries, uint32_t desc_size)
944 {
945 	ce_ring->base_addr_owner_space_unaligned =
946 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
947 					 (nentries * desc_size +
948 					 CE_DESC_RING_ALIGN), base_addr);
949 	if (!ce_ring->base_addr_owner_space_unaligned) {
950 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
951 			  __func__, CE_id);
952 		return QDF_STATUS_E_NOMEM;
953 	}
954 	return QDF_STATUS_SUCCESS;
955 }
956 
957 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
958 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
959 {
960 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
961 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
962 		ce_ring->base_addr_owner_space_unaligned,
963 		ce_ring->base_addr_CE_space, 0);
964 	ce_ring->base_addr_owner_space_unaligned = NULL;
965 }
966 #endif /* IPA_OFFLOAD */
967 
968 /*
969  * TODO: Need to explore the possibility of having this as part of a
970  * target context instead of a global array.
971  */
972 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
973 
974 void ce_service_register_module(enum ce_target_type target_type,
975 				struct ce_ops* (*ce_attach)(void))
976 {
977 	if (target_type < CE_MAX_TARGET_TYPE)
978 		ce_attach_register[target_type] = ce_attach;
979 }
980 
981 qdf_export_symbol(ce_service_register_module);
982 
983 /**
984  * ce_srng_based() - Does this target use srng
985  * @ce_state : pointer to the state context of the CE
986  *
987  * Description:
988  *   returns true if the target is SRNG based
989  *
990  * Return:
991  *  false (attribute set to false)
992  *  true  (attribute set to true);
993  */
994 bool ce_srng_based(struct hif_softc *scn)
995 {
996 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
997 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
998 
999 	switch (tgt_info->target_type) {
1000 	case TARGET_TYPE_QCA8074:
1001 	case TARGET_TYPE_QCA8074V2:
1002 	case TARGET_TYPE_QCA6290:
1003 	case TARGET_TYPE_QCA6390:
1004 	case TARGET_TYPE_QCA6018:
1005 		return true;
1006 	default:
1007 		return false;
1008 	}
1009 	return false;
1010 }
1011 qdf_export_symbol(ce_srng_based);
1012 
1013 #ifdef QCA_WIFI_SUPPORT_SRNG
1014 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1015 {
1016 	struct ce_ops *ops = NULL;
1017 
1018 	if (ce_srng_based(scn)) {
1019 		if (ce_attach_register[CE_SVC_SRNG])
1020 			ops = ce_attach_register[CE_SVC_SRNG]();
1021 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1022 		ops = ce_attach_register[CE_SVC_LEGACY]();
1023 	}
1024 
1025 	return ops;
1026 }
1027 
1028 
1029 #else	/* QCA_LITHIUM */
1030 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1031 {
1032 	if (ce_attach_register[CE_SVC_LEGACY])
1033 		return ce_attach_register[CE_SVC_LEGACY]();
1034 
1035 	return NULL;
1036 }
1037 #endif /* QCA_LITHIUM */
1038 
1039 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1040 		struct pld_shadow_reg_v2_cfg **shadow_config,
1041 		int *num_shadow_registers_configured) {
1042 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1043 
1044 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1045 			scn, shadow_config, num_shadow_registers_configured);
1046 }
1047 
1048 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1049 						uint8_t ring_type)
1050 {
1051 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1052 
1053 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1054 }
1055 
1056 
1057 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1058 		uint8_t ring_type, uint32_t nentries)
1059 {
1060 	uint32_t ce_nbytes;
1061 	char *ptr;
1062 	qdf_dma_addr_t base_addr;
1063 	struct CE_ring_state *ce_ring;
1064 	uint32_t desc_size;
1065 	struct hif_softc *scn = CE_state->scn;
1066 
1067 	ce_nbytes = sizeof(struct CE_ring_state)
1068 		+ (nentries * sizeof(void *));
1069 	ptr = qdf_mem_malloc(ce_nbytes);
1070 	if (!ptr)
1071 		return NULL;
1072 
1073 	ce_ring = (struct CE_ring_state *)ptr;
1074 	ptr += sizeof(struct CE_ring_state);
1075 	ce_ring->nentries = nentries;
1076 	ce_ring->nentries_mask = nentries - 1;
1077 
1078 	ce_ring->low_water_mark_nentries = 0;
1079 	ce_ring->high_water_mark_nentries = nentries;
1080 	ce_ring->per_transfer_context = (void **)ptr;
1081 
1082 	desc_size = ce_get_desc_size(scn, ring_type);
1083 
1084 	/* Legacy platforms that do not support cache
1085 	 * coherent DMA are unsupported
1086 	 */
1087 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1088 			       ce_ring, nentries,
1089 			       desc_size) !=
1090 	    QDF_STATUS_SUCCESS) {
1091 		HIF_ERROR("%s: ring has no DMA mem",
1092 				__func__);
1093 		qdf_mem_free(ce_ring);
1094 		return NULL;
1095 	}
1096 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1097 
1098 	/* Correctly initialize memory to 0 to
1099 	 * prevent garbage data crashing system
1100 	 * when download firmware
1101 	 */
1102 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1103 			nentries * desc_size +
1104 			CE_DESC_RING_ALIGN);
1105 
1106 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1107 
1108 		ce_ring->base_addr_CE_space =
1109 			(ce_ring->base_addr_CE_space_unaligned +
1110 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1111 
1112 		ce_ring->base_addr_owner_space = (void *)
1113 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1114 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1115 	} else {
1116 		ce_ring->base_addr_CE_space =
1117 				ce_ring->base_addr_CE_space_unaligned;
1118 		ce_ring->base_addr_owner_space =
1119 				ce_ring->base_addr_owner_space_unaligned;
1120 	}
1121 
1122 	return ce_ring;
1123 }
1124 
1125 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1126 			uint32_t ce_id, struct CE_ring_state *ring,
1127 			struct CE_attr *attr)
1128 {
1129 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1130 
1131 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1132 					      ring, attr);
1133 }
1134 
1135 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1136 {
1137 	uint8_t ul_pipe, dl_pipe;
1138 	int ce_id, status, ul_is_polled, dl_is_polled;
1139 	struct CE_state *ce_state;
1140 
1141 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1142 					 &ul_pipe, &dl_pipe,
1143 					 &ul_is_polled, &dl_is_polled);
1144 	if (status) {
1145 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1146 		return status;
1147 	}
1148 
1149 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1150 		if (ce_id == ul_pipe)
1151 			continue;
1152 		if (ce_id == dl_pipe)
1153 			continue;
1154 
1155 		ce_state = scn->ce_id_to_state[ce_id];
1156 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1157 		if (ce_state->state == CE_RUNNING)
1158 			ce_state->state = CE_PAUSED;
1159 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1160 	}
1161 
1162 	return status;
1163 }
1164 
1165 int hif_ce_bus_late_resume(struct hif_softc *scn)
1166 {
1167 	int ce_id;
1168 	struct CE_state *ce_state;
1169 	int write_index = 0;
1170 	bool index_updated;
1171 
1172 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1173 		ce_state = scn->ce_id_to_state[ce_id];
1174 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1175 		if (ce_state->state == CE_PENDING) {
1176 			write_index = ce_state->src_ring->write_index;
1177 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1178 					write_index);
1179 			ce_state->state = CE_RUNNING;
1180 			index_updated = true;
1181 		} else {
1182 			index_updated = false;
1183 		}
1184 
1185 		if (ce_state->state == CE_PAUSED)
1186 			ce_state->state = CE_RUNNING;
1187 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1188 
1189 		if (index_updated)
1190 			hif_record_ce_desc_event(scn, ce_id,
1191 				RESUME_WRITE_INDEX_UPDATE,
1192 				NULL, NULL, write_index, 0);
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 /**
1199  * ce_oom_recovery() - try to recover rx ce from oom condition
1200  * @context: CE_state of the CE with oom rx ring
1201  *
1202  * the executing work Will continue to be rescheduled until
1203  * at least 1 descriptor is successfully posted to the rx ring.
1204  *
1205  * return: none
1206  */
1207 static void ce_oom_recovery(void *context)
1208 {
1209 	struct CE_state *ce_state = context;
1210 	struct hif_softc *scn = ce_state->scn;
1211 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1212 	struct HIF_CE_pipe_info *pipe_info =
1213 		&ce_softc->pipe_info[ce_state->id];
1214 
1215 	hif_post_recv_buffers_for_pipe(pipe_info);
1216 }
1217 
1218 #ifdef HIF_CE_DEBUG_DATA_BUF
1219 /**
1220  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1221  * the CE descriptors.
1222  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1223  * @scn: hif scn handle
1224  * ce_id: Copy Engine Id
1225  *
1226  * Return: QDF_STATUS
1227  */
1228 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1229 {
1230 	struct hif_ce_desc_event *event = NULL;
1231 	struct hif_ce_desc_event *hist_ev = NULL;
1232 	uint32_t index = 0;
1233 
1234 	hist_ev =
1235 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1236 
1237 	if (!hist_ev)
1238 		return QDF_STATUS_E_NOMEM;
1239 
1240 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1241 		event = &hist_ev[index];
1242 		event->data =
1243 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1244 		if (event->data == NULL)
1245 			return QDF_STATUS_E_NOMEM;
1246 	}
1247 	return QDF_STATUS_SUCCESS;
1248 }
1249 
1250 /**
1251  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1252  * the CE descriptors.
1253  * @scn: hif scn handle
1254  * ce_id: Copy Engine Id
1255  *
1256  * Return:
1257  */
1258 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1259 {
1260 	struct hif_ce_desc_event *event = NULL;
1261 	struct hif_ce_desc_event *hist_ev = NULL;
1262 	uint32_t index = 0;
1263 
1264 	hist_ev =
1265 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1266 
1267 	if (!hist_ev)
1268 		return;
1269 
1270 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1271 		event = &hist_ev[index];
1272 		if (event->data != NULL)
1273 			qdf_mem_free(event->data);
1274 		event->data = NULL;
1275 		event = NULL;
1276 	}
1277 }
1278 #endif /* HIF_CE_DEBUG_DATA_BUF */
1279 
1280 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) /* MCL */
1281 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1282 
1283 /**
1284  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1285  * @scn: hif scn handle
1286  * @ce_id: Copy Engine Id
1287  *
1288  * Return: QDF_STATUS
1289  */
1290 static QDF_STATUS
1291 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1292 {
1293 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1294 
1295 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1296 	ce_hist->enable[ce_id] = 1;
1297 
1298 	return QDF_STATUS_SUCCESS;
1299 }
1300 
1301 /**
1302  * free_mem_ce_debug_history() - Free CE descriptor history
1303  * @scn: hif scn handle
1304  * @ce_id: Copy Engine Id
1305  *
1306  * Return: None
1307  */
1308 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1309 {
1310 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1311 
1312 	ce_hist->enable[ce_id] = 0;
1313 	ce_hist->hist_ev[ce_id] = NULL;
1314 }
1315 
1316 #elif defined(HIF_CE_DEBUG_DATA_BUF) /* WIN */
1317 
1318 static QDF_STATUS
1319 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1320 {
1321 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1322 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1323 
1324 	if (scn->hif_ce_desc_hist.hist_ev[CE_id] == NULL) {
1325 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1326 		return QDF_STATUS_E_NOMEM;
1327 	} else {
1328 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1329 		return QDF_STATUS_SUCCESS;
1330 	}
1331 }
1332 
1333 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1334 {
1335 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1336 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1337 
1338 	if (!hist_ev)
1339 		return;
1340 
1341 	if (ce_hist->data_enable[CE_id] == 1) {
1342 		ce_hist->data_enable[CE_id] = 0;
1343 		free_mem_ce_debug_hist_data(scn, CE_id);
1344 	}
1345 
1346 	ce_hist->enable[CE_id] = 0;
1347 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1348 	ce_hist->hist_ev[CE_id] = NULL;
1349 }
1350 
1351 #else /* Disabled */
1352 
1353 static inline QDF_STATUS
1354 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1355 {
1356 	return QDF_STATUS_SUCCESS;
1357 }
1358 
1359 static inline void
1360 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1361 #endif
1362 
1363 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1364 /**
1365  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1366  * CE records on the console using sysfs.
1367  * @scn: hif scn handle
1368  *
1369  * Return:
1370  */
1371 static inline void reset_ce_debug_history(struct hif_softc *scn)
1372 {
1373 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1374 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1375 	 * index. Disable data storing
1376 	 */
1377 	ce_hist->hist_index = 0;
1378 	ce_hist->hist_id = 0;
1379 }
1380 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1381 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
1382 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1383 
1384 void ce_enable_polling(void *cestate)
1385 {
1386 	struct CE_state *CE_state = (struct CE_state *)cestate;
1387 
1388 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1389 		CE_state->timer_inited = true;
1390 }
1391 
1392 void ce_disable_polling(void *cestate)
1393 {
1394 	struct CE_state *CE_state = (struct CE_state *)cestate;
1395 
1396 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1397 		CE_state->timer_inited = false;
1398 }
1399 
1400 /*
1401  * Initialize a Copy Engine based on caller-supplied attributes.
1402  * This may be called once to initialize both source and destination
1403  * rings or it may be called twice for separate source and destination
1404  * initialization. It may be that only one side or the other is
1405  * initialized by software/firmware.
1406  *
1407  * This should be called durring the initialization sequence before
1408  * interupts are enabled, so we don't have to worry about thread safety.
1409  */
1410 struct CE_handle *ce_init(struct hif_softc *scn,
1411 			  unsigned int CE_id, struct CE_attr *attr)
1412 {
1413 	struct CE_state *CE_state;
1414 	uint32_t ctrl_addr;
1415 	unsigned int nentries;
1416 	bool malloc_CE_state = false;
1417 	bool malloc_src_ring = false;
1418 	int status;
1419 
1420 	QDF_ASSERT(CE_id < scn->ce_count);
1421 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1422 	CE_state = scn->ce_id_to_state[CE_id];
1423 
1424 	if (!CE_state) {
1425 		CE_state =
1426 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1427 		if (!CE_state) {
1428 			HIF_ERROR("%s: CE_state has no mem", __func__);
1429 			return NULL;
1430 		}
1431 		malloc_CE_state = true;
1432 		qdf_spinlock_create(&CE_state->ce_index_lock);
1433 
1434 		CE_state->id = CE_id;
1435 		CE_state->ctrl_addr = ctrl_addr;
1436 		CE_state->state = CE_RUNNING;
1437 		CE_state->attr_flags = attr->flags;
1438 	}
1439 	CE_state->scn = scn;
1440 	CE_state->service = ce_engine_service_reg;
1441 
1442 	qdf_atomic_init(&CE_state->rx_pending);
1443 	if (attr == NULL) {
1444 		/* Already initialized; caller wants the handle */
1445 		return (struct CE_handle *)CE_state;
1446 	}
1447 
1448 	if (CE_state->src_sz_max)
1449 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1450 	else
1451 		CE_state->src_sz_max = attr->src_sz_max;
1452 
1453 	ce_init_ce_desc_event_log(scn, CE_id,
1454 				  attr->src_nentries + attr->dest_nentries);
1455 
1456 	/* source ring setup */
1457 	nentries = attr->src_nentries;
1458 	if (nentries) {
1459 		struct CE_ring_state *src_ring;
1460 
1461 		nentries = roundup_pwr2(nentries);
1462 		if (CE_state->src_ring) {
1463 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1464 		} else {
1465 			src_ring = CE_state->src_ring =
1466 				ce_alloc_ring_state(CE_state,
1467 						CE_RING_SRC,
1468 						nentries);
1469 			if (!src_ring) {
1470 				/* cannot allocate src ring. If the
1471 				 * CE_state is allocated locally free
1472 				 * CE_State and return error.
1473 				 */
1474 				HIF_ERROR("%s: src ring has no mem", __func__);
1475 				if (malloc_CE_state) {
1476 					/* allocated CE_state locally */
1477 					qdf_mem_free(CE_state);
1478 					malloc_CE_state = false;
1479 				}
1480 				return NULL;
1481 			}
1482 			/* we can allocate src ring. Mark that the src ring is
1483 			 * allocated locally
1484 			 */
1485 			malloc_src_ring = true;
1486 
1487 			/*
1488 			 * Also allocate a shadow src ring in
1489 			 * regular mem to use for faster access.
1490 			 */
1491 			src_ring->shadow_base_unaligned =
1492 				qdf_mem_malloc(nentries *
1493 					       sizeof(struct CE_src_desc) +
1494 					       CE_DESC_RING_ALIGN);
1495 			if (src_ring->shadow_base_unaligned == NULL) {
1496 				HIF_ERROR("%s: src ring no shadow_base mem",
1497 					  __func__);
1498 				goto error_no_dma_mem;
1499 			}
1500 			src_ring->shadow_base = (struct CE_src_desc *)
1501 				(((size_t) src_ring->shadow_base_unaligned +
1502 				CE_DESC_RING_ALIGN - 1) &
1503 				 ~(CE_DESC_RING_ALIGN - 1));
1504 
1505 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1506 					       src_ring, attr);
1507 			if (status < 0)
1508 				goto error_target_access;
1509 
1510 			ce_ring_test_initial_indexes(CE_id, src_ring,
1511 						     "src_ring");
1512 		}
1513 	}
1514 
1515 	/* destination ring setup */
1516 	nentries = attr->dest_nentries;
1517 	if (nentries) {
1518 		struct CE_ring_state *dest_ring;
1519 
1520 		nentries = roundup_pwr2(nentries);
1521 		if (CE_state->dest_ring) {
1522 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1523 		} else {
1524 			dest_ring = CE_state->dest_ring =
1525 				ce_alloc_ring_state(CE_state,
1526 						CE_RING_DEST,
1527 						nentries);
1528 			if (!dest_ring) {
1529 				/* cannot allocate dst ring. If the CE_state
1530 				 * or src ring is allocated locally free
1531 				 * CE_State and src ring and return error.
1532 				 */
1533 				HIF_ERROR("%s: dest ring has no mem",
1534 					  __func__);
1535 				goto error_no_dma_mem;
1536 			}
1537 
1538 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1539 				      dest_ring, attr);
1540 			if (status < 0)
1541 				goto error_target_access;
1542 
1543 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1544 						     "dest_ring");
1545 
1546 			/* For srng based target, init status ring here */
1547 			if (ce_srng_based(CE_state->scn)) {
1548 				CE_state->status_ring =
1549 					ce_alloc_ring_state(CE_state,
1550 							CE_RING_STATUS,
1551 							nentries);
1552 				if (CE_state->status_ring == NULL) {
1553 					/*Allocation failed. Cleanup*/
1554 					qdf_mem_free(CE_state->dest_ring);
1555 					if (malloc_src_ring) {
1556 						qdf_mem_free
1557 							(CE_state->src_ring);
1558 						CE_state->src_ring = NULL;
1559 						malloc_src_ring = false;
1560 					}
1561 					if (malloc_CE_state) {
1562 						/* allocated CE_state locally */
1563 						scn->ce_id_to_state[CE_id] =
1564 							NULL;
1565 						qdf_mem_free(CE_state);
1566 						malloc_CE_state = false;
1567 					}
1568 
1569 					return NULL;
1570 				}
1571 
1572 				status = ce_ring_setup(scn, CE_RING_STATUS,
1573 					       CE_id, CE_state->status_ring,
1574 					       attr);
1575 				if (status < 0)
1576 					goto error_target_access;
1577 
1578 			}
1579 
1580 			/* epping */
1581 			/* poll timer */
1582 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1583 				qdf_timer_init(scn->qdf_dev,
1584 						&CE_state->poll_timer,
1585 						ce_poll_timeout,
1586 						CE_state,
1587 						QDF_TIMER_TYPE_WAKE_APPS);
1588 				ce_enable_polling(CE_state);
1589 				qdf_timer_mod(&CE_state->poll_timer,
1590 						      CE_POLL_TIMEOUT);
1591 			}
1592 		}
1593 	}
1594 
1595 	if (!ce_srng_based(scn)) {
1596 		/* Enable CE error interrupts */
1597 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1598 			goto error_target_access;
1599 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1600 		if (Q_TARGET_ACCESS_END(scn) < 0)
1601 			goto error_target_access;
1602 	}
1603 
1604 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1605 			ce_oom_recovery, CE_state);
1606 
1607 	/* update the htt_data attribute */
1608 	ce_mark_datapath(CE_state);
1609 	scn->ce_id_to_state[CE_id] = CE_state;
1610 
1611 	alloc_mem_ce_debug_history(scn, CE_id);
1612 
1613 	return (struct CE_handle *)CE_state;
1614 
1615 error_target_access:
1616 error_no_dma_mem:
1617 	ce_fini((struct CE_handle *)CE_state);
1618 	return NULL;
1619 }
1620 
1621 /**
1622  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1623  * @hif_ctx: HIF Context
1624  *
1625  * API to check if polling is enabled on all CEs. Returns true when polling
1626  * is enabled on all CEs.
1627  *
1628  * Return: bool
1629  */
1630 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1631 {
1632 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1633 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1634 	struct CE_attr *attr;
1635 	int id;
1636 
1637 	for (id = 0; id < scn->ce_count; id++) {
1638 		attr = &hif_state->host_ce_config[id];
1639 		if (attr && (attr->dest_nentries) &&
1640 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1641 			return false;
1642 	}
1643 	return true;
1644 }
1645 qdf_export_symbol(hif_is_polled_mode_enabled);
1646 
1647 #ifdef WLAN_FEATURE_FASTPATH
1648 /**
1649  * hif_enable_fastpath() Update that we have enabled fastpath mode
1650  * @hif_ctx: HIF context
1651  *
1652  * For use in data path
1653  *
1654  * Retrun: void
1655  */
1656 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1657 {
1658 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1659 
1660 	if (ce_srng_based(scn)) {
1661 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1662 		return;
1663 	}
1664 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1665 	scn->fastpath_mode_on = true;
1666 }
1667 
1668 /**
1669  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1670  * @hif_ctx: HIF Context
1671  *
1672  * For use in data path to skip HTC
1673  *
1674  * Return: bool
1675  */
1676 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1677 {
1678 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1679 
1680 	return scn->fastpath_mode_on;
1681 }
1682 
1683 /**
1684  * hif_get_ce_handle - API to get CE handle for FastPath mode
1685  * @hif_ctx: HIF Context
1686  * @id: CopyEngine Id
1687  *
1688  * API to return CE handle for fastpath mode
1689  *
1690  * Return: void
1691  */
1692 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1693 {
1694 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1695 
1696 	return scn->ce_id_to_state[id];
1697 }
1698 qdf_export_symbol(hif_get_ce_handle);
1699 
1700 /**
1701  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1702  * No processing is required inside this function.
1703  * @ce_hdl: Cope engine handle
1704  * Using an assert, this function makes sure that,
1705  * the TX CE has been processed completely.
1706  *
1707  * This is called while dismantling CE structures. No other thread
1708  * should be using these structures while dismantling is occurring
1709  * therfore no locking is needed.
1710  *
1711  * Return: none
1712  */
1713 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1714 {
1715 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1716 	struct CE_ring_state *src_ring = ce_state->src_ring;
1717 	struct hif_softc *sc = ce_state->scn;
1718 	uint32_t sw_index, write_index;
1719 
1720 	if (hif_is_nss_wifi_enabled(sc))
1721 		return;
1722 
1723 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1724 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1725 			 __func__, __LINE__);
1726 		sw_index = src_ring->sw_index;
1727 		write_index = src_ring->sw_index;
1728 
1729 		/* At this point Tx CE should be clean */
1730 		qdf_assert_always(sw_index == write_index);
1731 	}
1732 }
1733 
1734 /**
1735  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1736  * @ce_hdl: Handle to CE
1737  *
1738  * These buffers are never allocated on the fly, but
1739  * are allocated only once during HIF start and freed
1740  * only once during HIF stop.
1741  * NOTE:
1742  * The assumption here is there is no in-flight DMA in progress
1743  * currently, so that buffers can be freed up safely.
1744  *
1745  * Return: NONE
1746  */
1747 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1748 {
1749 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1750 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1751 	qdf_nbuf_t nbuf;
1752 	int i;
1753 
1754 	if (ce_state->scn->fastpath_mode_on == false)
1755 		return;
1756 
1757 	if (!ce_state->htt_rx_data)
1758 		return;
1759 
1760 	/*
1761 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1762 	 * this CE is completely full: does not leave one blank space, to
1763 	 * distinguish between empty queue & full queue. So free all the
1764 	 * entries.
1765 	 */
1766 	for (i = 0; i < dst_ring->nentries; i++) {
1767 		nbuf = dst_ring->per_transfer_context[i];
1768 
1769 		/*
1770 		 * The reasons for doing this check are:
1771 		 * 1) Protect against calling cleanup before allocating buffers
1772 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1773 		 *    could have a partially filled ring, because of a memory
1774 		 *    allocation failure in the middle of allocating ring.
1775 		 *    This check accounts for that case, checking
1776 		 *    fastpath_mode_on flag or started flag would not have
1777 		 *    covered that case. This is not in performance path,
1778 		 *    so OK to do this.
1779 		 */
1780 		if (nbuf) {
1781 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1782 					      QDF_DMA_FROM_DEVICE);
1783 			qdf_nbuf_free(nbuf);
1784 		}
1785 	}
1786 }
1787 
1788 /**
1789  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1790  * @scn: HIF handle
1791  *
1792  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1793  * Hence we have to post all the entries in the pipe, even, in the beginning
1794  * unlike for other CE pipes where one less than dest_nentries are filled in
1795  * the beginning.
1796  *
1797  * Return: None
1798  */
1799 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1800 {
1801 	int pipe_num;
1802 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1803 
1804 	if (scn->fastpath_mode_on == false)
1805 		return;
1806 
1807 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1808 		struct HIF_CE_pipe_info *pipe_info =
1809 			&hif_state->pipe_info[pipe_num];
1810 		struct CE_state *ce_state =
1811 			scn->ce_id_to_state[pipe_info->pipe_num];
1812 
1813 		if (ce_state->htt_rx_data)
1814 			atomic_inc(&pipe_info->recv_bufs_needed);
1815 	}
1816 }
1817 #else
1818 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1819 {
1820 }
1821 
1822 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1823 {
1824 	return false;
1825 }
1826 #endif /* WLAN_FEATURE_FASTPATH */
1827 
1828 void ce_fini(struct CE_handle *copyeng)
1829 {
1830 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1831 	unsigned int CE_id = CE_state->id;
1832 	struct hif_softc *scn = CE_state->scn;
1833 	uint32_t desc_size;
1834 
1835 	bool inited = CE_state->timer_inited;
1836 	CE_state->state = CE_UNUSED;
1837 	scn->ce_id_to_state[CE_id] = NULL;
1838 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1839 	ce_disable_polling(CE_state);
1840 
1841 	qdf_lro_deinit(CE_state->lro_data);
1842 
1843 	if (CE_state->src_ring) {
1844 		/* Cleanup the datapath Tx ring */
1845 		ce_h2t_tx_ce_cleanup(copyeng);
1846 
1847 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1848 		if (CE_state->src_ring->shadow_base_unaligned)
1849 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1850 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1851 			ce_free_desc_ring(scn, CE_state->id,
1852 					  CE_state->src_ring,
1853 					  desc_size);
1854 		qdf_mem_free(CE_state->src_ring);
1855 	}
1856 	if (CE_state->dest_ring) {
1857 		/* Cleanup the datapath Rx ring */
1858 		ce_t2h_msg_ce_cleanup(copyeng);
1859 
1860 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1861 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1862 			ce_free_desc_ring(scn, CE_state->id,
1863 					  CE_state->dest_ring,
1864 					  desc_size);
1865 		qdf_mem_free(CE_state->dest_ring);
1866 
1867 		/* epping */
1868 		if (inited) {
1869 			qdf_timer_free(&CE_state->poll_timer);
1870 		}
1871 	}
1872 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1873 		/* Cleanup the datapath Tx ring */
1874 		ce_h2t_tx_ce_cleanup(copyeng);
1875 
1876 		if (CE_state->status_ring->shadow_base_unaligned)
1877 			qdf_mem_free(
1878 				CE_state->status_ring->shadow_base_unaligned);
1879 
1880 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1881 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1882 			ce_free_desc_ring(scn, CE_state->id,
1883 					  CE_state->status_ring,
1884 					  desc_size);
1885 		qdf_mem_free(CE_state->status_ring);
1886 	}
1887 
1888 	free_mem_ce_debug_history(scn, CE_id);
1889 	reset_ce_debug_history(scn);
1890 	ce_deinit_ce_desc_event_log(scn, CE_id);
1891 
1892 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1893 	qdf_mem_free(CE_state);
1894 }
1895 
1896 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1897 {
1898 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1899 
1900 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1901 		  sizeof(hif_state->msg_callbacks_pending));
1902 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1903 		  sizeof(hif_state->msg_callbacks_current));
1904 }
1905 
1906 /* Send the first nbytes bytes of the buffer */
1907 QDF_STATUS
1908 hif_send_head(struct hif_opaque_softc *hif_ctx,
1909 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1910 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1911 {
1912 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1913 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1914 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1915 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1916 	int bytes = nbytes, nfrags = 0;
1917 	struct ce_sendlist sendlist;
1918 	int status, i = 0;
1919 	unsigned int mux_id = 0;
1920 
1921 	if (nbytes > qdf_nbuf_len(nbuf)) {
1922 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
1923 			  (uint32_t)qdf_nbuf_len(nbuf));
1924 		QDF_ASSERT(0);
1925 	}
1926 
1927 	transfer_id =
1928 		(mux_id & MUX_ID_MASK) |
1929 		(transfer_id & TRANSACTION_ID_MASK);
1930 	data_attr &= DESC_DATA_FLAG_MASK;
1931 	/*
1932 	 * The common case involves sending multiple fragments within a
1933 	 * single download (the tx descriptor and the tx frame header).
1934 	 * So, optimize for the case of multiple fragments by not even
1935 	 * checking whether it's necessary to use a sendlist.
1936 	 * The overhead of using a sendlist for a single buffer download
1937 	 * is not a big deal, since it happens rarely (for WMI messages).
1938 	 */
1939 	ce_sendlist_init(&sendlist);
1940 	do {
1941 		qdf_dma_addr_t frag_paddr;
1942 		int frag_bytes;
1943 
1944 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1945 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1946 		/*
1947 		 * Clear the packet offset for all but the first CE desc.
1948 		 */
1949 		if (i++ > 0)
1950 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1951 
1952 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1953 				    frag_bytes >
1954 				    bytes ? bytes : frag_bytes,
1955 				    qdf_nbuf_get_frag_is_wordstream
1956 				    (nbuf,
1957 				    nfrags) ? 0 :
1958 				    CE_SEND_FLAG_SWAP_DISABLE,
1959 				    data_attr);
1960 		if (status != QDF_STATUS_SUCCESS) {
1961 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1962 				__func__, nfrags);
1963 			return status;
1964 		}
1965 		bytes -= frag_bytes;
1966 		nfrags++;
1967 	} while (bytes > 0);
1968 
1969 	/* Make sure we have resources to handle this request */
1970 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1971 	if (pipe_info->num_sends_allowed < nfrags) {
1972 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1973 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1974 		return QDF_STATUS_E_RESOURCES;
1975 	}
1976 	pipe_info->num_sends_allowed -= nfrags;
1977 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1978 
1979 	if (qdf_unlikely(ce_hdl == NULL)) {
1980 		HIF_ERROR("%s: error CE handle is null", __func__);
1981 		return A_ERROR;
1982 	}
1983 
1984 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
1985 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
1986 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
1987 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
1988 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
1989 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
1990 
1991 	return status;
1992 }
1993 
1994 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1995 								int force)
1996 {
1997 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1998 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1999 
2000 	if (!force) {
2001 		int resources;
2002 		/*
2003 		 * Decide whether to actually poll for completions, or just
2004 		 * wait for a later chance. If there seem to be plenty of
2005 		 * resources left, then just wait, since checking involves
2006 		 * reading a CE register, which is a relatively expensive
2007 		 * operation.
2008 		 */
2009 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2010 		/*
2011 		 * If at least 50% of the total resources are still available,
2012 		 * don't bother checking again yet.
2013 		 */
2014 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2015 									 1))
2016 			return;
2017 	}
2018 #if ATH_11AC_TXCOMPACT
2019 	ce_per_engine_servicereap(scn, pipe);
2020 #else
2021 	ce_per_engine_service(scn, pipe);
2022 #endif
2023 }
2024 
2025 uint16_t
2026 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2027 {
2028 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2029 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2030 	uint16_t rv;
2031 
2032 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2033 	rv = pipe_info->num_sends_allowed;
2034 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2035 	return rv;
2036 }
2037 
2038 /* Called by lower (CE) layer when a send to Target completes. */
2039 static void
2040 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2041 		     void *transfer_context, qdf_dma_addr_t CE_data,
2042 		     unsigned int nbytes, unsigned int transfer_id,
2043 		     unsigned int sw_index, unsigned int hw_index,
2044 		     unsigned int toeplitz_hash_result)
2045 {
2046 	struct HIF_CE_pipe_info *pipe_info =
2047 		(struct HIF_CE_pipe_info *)ce_context;
2048 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2049 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2050 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2051 	struct hif_msg_callbacks *msg_callbacks =
2052 		&pipe_info->pipe_callbacks;
2053 
2054 	do {
2055 		/*
2056 		 * The upper layer callback will be triggered
2057 		 * when last fragment is complteted.
2058 		 */
2059 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
2060 			if (scn->target_status == TARGET_STATUS_RESET) {
2061 
2062 				qdf_nbuf_unmap_single(scn->qdf_dev,
2063 						      transfer_context,
2064 						      QDF_DMA_TO_DEVICE);
2065 				qdf_nbuf_free(transfer_context);
2066 			} else
2067 				msg_callbacks->txCompletionHandler(
2068 					msg_callbacks->Context,
2069 					transfer_context, transfer_id,
2070 					toeplitz_hash_result);
2071 		}
2072 
2073 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2074 		pipe_info->num_sends_allowed++;
2075 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2076 	} while (ce_completed_send_next(copyeng,
2077 			&ce_context, &transfer_context,
2078 			&CE_data, &nbytes, &transfer_id,
2079 			&sw_idx, &hw_idx,
2080 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2081 }
2082 
2083 /**
2084  * hif_ce_do_recv(): send message from copy engine to upper layers
2085  * @msg_callbacks: structure containing callback and callback context
2086  * @netbuff: skb containing message
2087  * @nbytes: number of bytes in the message
2088  * @pipe_info: used for the pipe_number info
2089  *
2090  * Checks the packet length, configures the length in the netbuff,
2091  * and calls the upper layer callback.
2092  *
2093  * return: None
2094  */
2095 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2096 		qdf_nbuf_t netbuf, int nbytes,
2097 		struct HIF_CE_pipe_info *pipe_info) {
2098 	if (nbytes <= pipe_info->buf_sz) {
2099 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2100 		msg_callbacks->
2101 			rxCompletionHandler(msg_callbacks->Context,
2102 					netbuf, pipe_info->pipe_num);
2103 	} else {
2104 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
2105 				__func__, netbuf, nbytes);
2106 
2107 		qdf_nbuf_free(netbuf);
2108 	}
2109 }
2110 
2111 /* Called by lower (CE) layer when data is received from the Target. */
2112 static void
2113 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2114 		     void *transfer_context, qdf_dma_addr_t CE_data,
2115 		     unsigned int nbytes, unsigned int transfer_id,
2116 		     unsigned int flags)
2117 {
2118 	struct HIF_CE_pipe_info *pipe_info =
2119 		(struct HIF_CE_pipe_info *)ce_context;
2120 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2121 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2122 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2123 #ifdef HIF_PCI
2124 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
2125 #endif
2126 	struct hif_msg_callbacks *msg_callbacks =
2127 		 &pipe_info->pipe_callbacks;
2128 
2129 	do {
2130 #ifdef HIF_PCI
2131 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2132 #endif
2133 		qdf_nbuf_unmap_single(scn->qdf_dev,
2134 				      (qdf_nbuf_t) transfer_context,
2135 				      QDF_DMA_FROM_DEVICE);
2136 
2137 		atomic_inc(&pipe_info->recv_bufs_needed);
2138 		hif_post_recv_buffers_for_pipe(pipe_info);
2139 		if (scn->target_status == TARGET_STATUS_RESET)
2140 			qdf_nbuf_free(transfer_context);
2141 		else
2142 			hif_ce_do_recv(msg_callbacks, transfer_context,
2143 				nbytes, pipe_info);
2144 
2145 		/* Set up force_break flag if num of receices reaches
2146 		 * MAX_NUM_OF_RECEIVES
2147 		 */
2148 		ce_state->receive_count++;
2149 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2150 			ce_state->force_break = 1;
2151 			break;
2152 		}
2153 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2154 					&CE_data, &nbytes, &transfer_id,
2155 					&flags) == QDF_STATUS_SUCCESS);
2156 
2157 }
2158 
2159 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2160 
2161 void
2162 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2163 	      struct hif_msg_callbacks *callbacks)
2164 {
2165 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2166 
2167 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2168 	spin_lock_init(&pcie_access_log_lock);
2169 #endif
2170 	/* Save callbacks for later installation */
2171 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2172 		 sizeof(hif_state->msg_callbacks_pending));
2173 
2174 }
2175 
2176 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2177 {
2178 	struct CE_handle *ce_diag = hif_state->ce_diag;
2179 	int pipe_num;
2180 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2181 	struct hif_msg_callbacks *hif_msg_callbacks =
2182 		&hif_state->msg_callbacks_current;
2183 
2184 	/* daemonize("hif_compl_thread"); */
2185 
2186 	if (scn->ce_count == 0) {
2187 		HIF_ERROR("%s: Invalid ce_count", __func__);
2188 		return -EINVAL;
2189 	}
2190 
2191 	if (!hif_msg_callbacks ||
2192 			!hif_msg_callbacks->rxCompletionHandler ||
2193 			!hif_msg_callbacks->txCompletionHandler) {
2194 		HIF_ERROR("%s: no completion handler registered", __func__);
2195 		return -EFAULT;
2196 	}
2197 
2198 	A_TARGET_ACCESS_LIKELY(scn);
2199 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2200 		struct CE_attr attr;
2201 		struct HIF_CE_pipe_info *pipe_info;
2202 
2203 		pipe_info = &hif_state->pipe_info[pipe_num];
2204 		if (pipe_info->ce_hdl == ce_diag)
2205 			continue;       /* Handle Diagnostic CE specially */
2206 		attr = hif_state->host_ce_config[pipe_num];
2207 		if (attr.src_nentries) {
2208 			/* pipe used to send to target */
2209 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2210 					 __func__, pipe_num, pipe_info);
2211 			ce_send_cb_register(pipe_info->ce_hdl,
2212 					    hif_pci_ce_send_done, pipe_info,
2213 					    attr.flags & CE_ATTR_DISABLE_INTR);
2214 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2215 		}
2216 		if (attr.dest_nentries) {
2217 			/* pipe used to receive from target */
2218 			ce_recv_cb_register(pipe_info->ce_hdl,
2219 					    hif_pci_ce_recv_data, pipe_info,
2220 					    attr.flags & CE_ATTR_DISABLE_INTR);
2221 		}
2222 
2223 		if (attr.src_nentries)
2224 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2225 
2226 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2227 					sizeof(pipe_info->pipe_callbacks));
2228 	}
2229 
2230 	A_TARGET_ACCESS_UNLIKELY(scn);
2231 	return 0;
2232 }
2233 
2234 /*
2235  * Install pending msg callbacks.
2236  *
2237  * TBDXXX: This hack is needed because upper layers install msg callbacks
2238  * for use with HTC before BMI is done; yet this HIF implementation
2239  * needs to continue to use BMI msg callbacks. Really, upper layers
2240  * should not register HTC callbacks until AFTER BMI phase.
2241  */
2242 static void hif_msg_callbacks_install(struct hif_softc *scn)
2243 {
2244 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2245 
2246 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2247 		 &hif_state->msg_callbacks_pending,
2248 		 sizeof(hif_state->msg_callbacks_pending));
2249 }
2250 
2251 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2252 							uint8_t *DLPipe)
2253 {
2254 	int ul_is_polled, dl_is_polled;
2255 
2256 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2257 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2258 }
2259 
2260 /**
2261  * hif_dump_pipe_debug_count() - Log error count
2262  * @scn: hif_softc pointer.
2263  *
2264  * Output the pipe error counts of each pipe to log file
2265  *
2266  * Return: N/A
2267  */
2268 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2269 {
2270 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2271 	int pipe_num;
2272 
2273 	if (hif_state == NULL) {
2274 		HIF_ERROR("%s hif_state is NULL", __func__);
2275 		return;
2276 	}
2277 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2278 		struct HIF_CE_pipe_info *pipe_info;
2279 
2280 	pipe_info = &hif_state->pipe_info[pipe_num];
2281 
2282 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2283 			pipe_info->nbuf_dma_err_count > 0 ||
2284 			pipe_info->nbuf_ce_enqueue_err_count)
2285 		HIF_ERROR(
2286 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2287 			__func__, pipe_info->pipe_num,
2288 			atomic_read(&pipe_info->recv_bufs_needed),
2289 			pipe_info->nbuf_alloc_err_count,
2290 			pipe_info->nbuf_dma_err_count,
2291 			pipe_info->nbuf_ce_enqueue_err_count);
2292 	}
2293 }
2294 
2295 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2296 					  void *nbuf, uint32_t *error_cnt,
2297 					  enum hif_ce_event_type failure_type,
2298 					  const char *failure_type_string)
2299 {
2300 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2301 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2302 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2303 	int ce_id = CE_state->id;
2304 	uint32_t error_cnt_tmp;
2305 
2306 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2307 	error_cnt_tmp = ++(*error_cnt);
2308 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2309 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2310 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2311 		  failure_type_string);
2312 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2313 				 NULL, nbuf, bufs_needed_tmp, 0);
2314 	/* if we fail to allocate the last buffer for an rx pipe,
2315 	 *	there is no trigger to refill the ce and we will
2316 	 *	eventually crash
2317 	 */
2318 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2319 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2320 
2321 }
2322 
2323 
2324 
2325 
2326 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2327 {
2328 	struct CE_handle *ce_hdl;
2329 	qdf_size_t buf_sz;
2330 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2331 	QDF_STATUS status;
2332 	uint32_t bufs_posted = 0;
2333 
2334 	buf_sz = pipe_info->buf_sz;
2335 	if (buf_sz == 0) {
2336 		/* Unused Copy Engine */
2337 		return QDF_STATUS_SUCCESS;
2338 	}
2339 
2340 	ce_hdl = pipe_info->ce_hdl;
2341 
2342 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2343 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2344 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2345 		qdf_nbuf_t nbuf;
2346 
2347 		atomic_dec(&pipe_info->recv_bufs_needed);
2348 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2349 
2350 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2351 		if (!nbuf) {
2352 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2353 					&pipe_info->nbuf_alloc_err_count,
2354 					 HIF_RX_NBUF_ALLOC_FAILURE,
2355 					"HIF_RX_NBUF_ALLOC_FAILURE");
2356 			return QDF_STATUS_E_NOMEM;
2357 		}
2358 
2359 		/*
2360 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2361 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2362 		 * DMA_FROM_DEVICE);
2363 		 */
2364 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2365 					    QDF_DMA_FROM_DEVICE);
2366 
2367 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2368 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2369 					&pipe_info->nbuf_dma_err_count,
2370 					 HIF_RX_NBUF_MAP_FAILURE,
2371 					"HIF_RX_NBUF_MAP_FAILURE");
2372 			qdf_nbuf_free(nbuf);
2373 			return status;
2374 		}
2375 
2376 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2377 
2378 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2379 					       buf_sz, DMA_FROM_DEVICE);
2380 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2381 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2382 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2383 					&pipe_info->nbuf_ce_enqueue_err_count,
2384 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2385 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2386 
2387 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2388 						QDF_DMA_FROM_DEVICE);
2389 			qdf_nbuf_free(nbuf);
2390 			return status;
2391 		}
2392 
2393 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2394 		bufs_posted++;
2395 	}
2396 	pipe_info->nbuf_alloc_err_count =
2397 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2398 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2399 	pipe_info->nbuf_dma_err_count =
2400 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2401 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2402 	pipe_info->nbuf_ce_enqueue_err_count =
2403 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2404 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2405 
2406 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2407 
2408 	return QDF_STATUS_SUCCESS;
2409 }
2410 
2411 /*
2412  * Try to post all desired receive buffers for all pipes.
2413  * Returns 0 for non fastpath rx copy engine as
2414  * oom_allocation_work will be scheduled to recover any
2415  * failures, non-zero if unable to completely replenish
2416  * receive buffers for fastpath rx Copy engine.
2417  */
2418 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2419 {
2420 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2421 	int pipe_num;
2422 	struct CE_state *ce_state = NULL;
2423 	QDF_STATUS qdf_status;
2424 
2425 	A_TARGET_ACCESS_LIKELY(scn);
2426 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2427 		struct HIF_CE_pipe_info *pipe_info;
2428 
2429 		ce_state = scn->ce_id_to_state[pipe_num];
2430 		pipe_info = &hif_state->pipe_info[pipe_num];
2431 
2432 		if (hif_is_nss_wifi_enabled(scn) &&
2433 		    ce_state && (ce_state->htt_rx_data))
2434 			continue;
2435 
2436 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2437 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2438 			ce_state->htt_rx_data &&
2439 			scn->fastpath_mode_on) {
2440 			A_TARGET_ACCESS_UNLIKELY(scn);
2441 			return qdf_status;
2442 		}
2443 	}
2444 
2445 	A_TARGET_ACCESS_UNLIKELY(scn);
2446 
2447 	return QDF_STATUS_SUCCESS;
2448 }
2449 
2450 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2451 {
2452 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2453 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2454 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2455 
2456 	hif_update_fastpath_recv_bufs_cnt(scn);
2457 
2458 	hif_msg_callbacks_install(scn);
2459 
2460 	if (hif_completion_thread_startup(hif_state))
2461 		return QDF_STATUS_E_FAILURE;
2462 
2463 	/* enable buffer cleanup */
2464 	hif_state->started = true;
2465 
2466 	/* Post buffers once to start things off. */
2467 	qdf_status = hif_post_recv_buffers(scn);
2468 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2469 		/* cleanup is done in hif_ce_disable */
2470 		HIF_ERROR("%s:failed to post buffers", __func__);
2471 		return qdf_status;
2472 	}
2473 
2474 	return qdf_status;
2475 }
2476 
2477 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2478 {
2479 	struct hif_softc *scn;
2480 	struct CE_handle *ce_hdl;
2481 	uint32_t buf_sz;
2482 	struct HIF_CE_state *hif_state;
2483 	qdf_nbuf_t netbuf;
2484 	qdf_dma_addr_t CE_data;
2485 	void *per_CE_context;
2486 
2487 	buf_sz = pipe_info->buf_sz;
2488 	/* Unused Copy Engine */
2489 	if (buf_sz == 0)
2490 		return;
2491 
2492 
2493 	hif_state = pipe_info->HIF_CE_state;
2494 	if (!hif_state->started)
2495 		return;
2496 
2497 	scn = HIF_GET_SOFTC(hif_state);
2498 	ce_hdl = pipe_info->ce_hdl;
2499 
2500 	if (scn->qdf_dev == NULL)
2501 		return;
2502 	while (ce_revoke_recv_next
2503 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2504 			&CE_data) == QDF_STATUS_SUCCESS) {
2505 		if (netbuf) {
2506 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2507 					      QDF_DMA_FROM_DEVICE);
2508 			qdf_nbuf_free(netbuf);
2509 		}
2510 	}
2511 }
2512 
2513 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2514 {
2515 	struct CE_handle *ce_hdl;
2516 	struct HIF_CE_state *hif_state;
2517 	struct hif_softc *scn;
2518 	qdf_nbuf_t netbuf;
2519 	void *per_CE_context;
2520 	qdf_dma_addr_t CE_data;
2521 	unsigned int nbytes;
2522 	unsigned int id;
2523 	uint32_t buf_sz;
2524 	uint32_t toeplitz_hash_result;
2525 
2526 	buf_sz = pipe_info->buf_sz;
2527 	if (buf_sz == 0) {
2528 		/* Unused Copy Engine */
2529 		return;
2530 	}
2531 
2532 	hif_state = pipe_info->HIF_CE_state;
2533 	if (!hif_state->started) {
2534 		return;
2535 	}
2536 
2537 	scn = HIF_GET_SOFTC(hif_state);
2538 
2539 	ce_hdl = pipe_info->ce_hdl;
2540 
2541 	while (ce_cancel_send_next
2542 		       (ce_hdl, &per_CE_context,
2543 		       (void **)&netbuf, &CE_data, &nbytes,
2544 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2545 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2546 			/*
2547 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2548 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2549 			 * freed in htt_htc_misc_pkt_pool_free() in
2550 			 * wlantl_close(), so do not free them here again
2551 			 * by checking whether it's the endpoint
2552 			 * which they are queued in.
2553 			 */
2554 			if (id == scn->htc_htt_tx_endpoint)
2555 				return;
2556 			/* Indicate the completion to higher
2557 			 * layer to free the buffer
2558 			 */
2559 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2560 				pipe_info->pipe_callbacks.
2561 				    txCompletionHandler(pipe_info->
2562 					    pipe_callbacks.Context,
2563 					    netbuf, id, toeplitz_hash_result);
2564 		}
2565 	}
2566 }
2567 
2568 /*
2569  * Cleanup residual buffers for device shutdown:
2570  *    buffers that were enqueued for receive
2571  *    buffers that were to be sent
2572  * Note: Buffers that had completed but which were
2573  * not yet processed are on a completion queue. They
2574  * are handled when the completion thread shuts down.
2575  */
2576 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2577 {
2578 	int pipe_num;
2579 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2580 	struct CE_state *ce_state;
2581 
2582 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2583 		struct HIF_CE_pipe_info *pipe_info;
2584 
2585 		ce_state = scn->ce_id_to_state[pipe_num];
2586 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2587 				((ce_state->htt_tx_data) ||
2588 				 (ce_state->htt_rx_data))) {
2589 			continue;
2590 		}
2591 
2592 		pipe_info = &hif_state->pipe_info[pipe_num];
2593 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2594 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2595 	}
2596 }
2597 
2598 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2599 {
2600 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2601 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2602 
2603 	hif_buffer_cleanup(hif_state);
2604 }
2605 
2606 static void hif_destroy_oom_work(struct hif_softc *scn)
2607 {
2608 	struct CE_state *ce_state;
2609 	int ce_id;
2610 
2611 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2612 		ce_state = scn->ce_id_to_state[ce_id];
2613 		if (ce_state)
2614 			qdf_destroy_work(scn->qdf_dev,
2615 					 &ce_state->oom_allocation_work);
2616 	}
2617 }
2618 
2619 void hif_ce_stop(struct hif_softc *scn)
2620 {
2621 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2622 	int pipe_num;
2623 
2624 	/*
2625 	 * before cleaning up any memory, ensure irq &
2626 	 * bottom half contexts will not be re-entered
2627 	 */
2628 	hif_disable_isr(&scn->osc);
2629 	hif_destroy_oom_work(scn);
2630 	scn->hif_init_done = false;
2631 
2632 	/*
2633 	 * At this point, asynchronous threads are stopped,
2634 	 * The Target should not DMA nor interrupt, Host code may
2635 	 * not initiate anything more.  So we just need to clean
2636 	 * up Host-side state.
2637 	 */
2638 
2639 	if (scn->athdiag_procfs_inited) {
2640 		athdiag_procfs_remove();
2641 		scn->athdiag_procfs_inited = false;
2642 	}
2643 
2644 	hif_buffer_cleanup(hif_state);
2645 
2646 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2647 		struct HIF_CE_pipe_info *pipe_info;
2648 		struct CE_attr attr;
2649 		struct CE_handle *ce_diag = hif_state->ce_diag;
2650 
2651 		pipe_info = &hif_state->pipe_info[pipe_num];
2652 		if (pipe_info->ce_hdl) {
2653 			if (pipe_info->ce_hdl != ce_diag) {
2654 				attr = hif_state->host_ce_config[pipe_num];
2655 				if (attr.src_nentries)
2656 					qdf_spinlock_destroy(&pipe_info->
2657 							completion_freeq_lock);
2658 			}
2659 			ce_fini(pipe_info->ce_hdl);
2660 			pipe_info->ce_hdl = NULL;
2661 			pipe_info->buf_sz = 0;
2662 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2663 		}
2664 	}
2665 
2666 	if (hif_state->sleep_timer_init) {
2667 		qdf_timer_stop(&hif_state->sleep_timer);
2668 		qdf_timer_free(&hif_state->sleep_timer);
2669 		hif_state->sleep_timer_init = false;
2670 	}
2671 
2672 	hif_state->started = false;
2673 }
2674 
2675 #ifdef QCN7605_SUPPORT
2676 static inline
2677 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2678 				    **target_shadow_reg_cfg_ret,
2679 				    uint32_t *shadow_cfg_sz_ret)
2680 {
2681 	if (target_shadow_reg_cfg_ret)
2682 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg_map_qcn7605;
2683 	if (shadow_cfg_sz_ret)
2684 		*shadow_cfg_sz_ret = sizeof(target_shadow_reg_cfg_map_qcn7605);
2685 }
2686 #else
2687 static inline
2688 void hif_get_shadow_reg_cfg_qcn7605(struct shadow_reg_cfg
2689 				    **target_shadow_reg_cfg_ret,
2690 				    uint32_t *shadow_cfg_sz_ret)
2691 {
2692 	HIF_ERROR("QCN7605 not supported");
2693 }
2694 #endif
2695 
2696 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2697 				   struct shadow_reg_cfg
2698 				   **target_shadow_reg_cfg_ret,
2699 				   uint32_t *shadow_cfg_sz_ret)
2700 {
2701 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2702 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2703 
2704 	switch (tgt_info->target_type) {
2705 	case TARGET_TYPE_QCN7605:
2706 		hif_get_shadow_reg_cfg_qcn7605(target_shadow_reg_cfg_ret,
2707 					       shadow_cfg_sz_ret);
2708 		break;
2709 	default:
2710 		if (target_shadow_reg_cfg_ret)
2711 			*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2712 		if (shadow_cfg_sz_ret)
2713 			*shadow_cfg_sz_ret = shadow_cfg_sz;
2714 	}
2715 }
2716 
2717 /**
2718  * hif_get_target_ce_config() - get copy engine configuration
2719  * @target_ce_config_ret: basic copy engine configuration
2720  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2721  * @target_service_to_ce_map_ret: service mapping for the copy engines
2722  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2723  * @target_shadow_reg_cfg_ret: shadow register configuration
2724  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2725  *
2726  * providing accessor to these values outside of this file.
2727  * currently these are stored in static pointers to const sections.
2728  * there are multiple configurations that are selected from at compile time.
2729  * Runtime selection would need to consider mode, target type and bus type.
2730  *
2731  * Return: return by parameter.
2732  */
2733 void hif_get_target_ce_config(struct hif_softc *scn,
2734 		struct CE_pipe_config **target_ce_config_ret,
2735 		uint32_t *target_ce_config_sz_ret,
2736 		struct service_to_pipe **target_service_to_ce_map_ret,
2737 		uint32_t *target_service_to_ce_map_sz_ret,
2738 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2739 		uint32_t *shadow_cfg_sz_ret)
2740 {
2741 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2742 
2743 	*target_ce_config_ret = hif_state->target_ce_config;
2744 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2745 
2746 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2747 				       target_service_to_ce_map_sz_ret);
2748 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2749 			       shadow_cfg_sz_ret);
2750 }
2751 
2752 #ifdef CONFIG_SHADOW_V2
2753 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2754 {
2755 	int i;
2756 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2757 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2758 
2759 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2760 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2761 		     "%s: i %d, val %x", __func__, i,
2762 		     cfg->shadow_reg_v2_cfg[i].addr);
2763 	}
2764 }
2765 
2766 #else
2767 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2768 {
2769 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2770 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2771 }
2772 #endif
2773 
2774 #ifdef ADRASTEA_RRI_ON_DDR
2775 /**
2776  * hif_get_src_ring_read_index(): Called to get the SRRI
2777  *
2778  * @scn: hif_softc pointer
2779  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2780  *
2781  * This function returns the SRRI to the caller. For CEs that
2782  * dont have interrupts enabled, we look at the DDR based SRRI
2783  *
2784  * Return: SRRI
2785  */
2786 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2787 		uint32_t CE_ctrl_addr)
2788 {
2789 	struct CE_attr attr;
2790 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2791 
2792 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2793 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2794 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2795 	} else {
2796 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2797 			return A_TARGET_READ(scn,
2798 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2799 		else
2800 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2801 					CE_ctrl_addr);
2802 	}
2803 }
2804 
2805 /**
2806  * hif_get_dst_ring_read_index(): Called to get the DRRI
2807  *
2808  * @scn: hif_softc pointer
2809  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2810  *
2811  * This function returns the DRRI to the caller. For CEs that
2812  * dont have interrupts enabled, we look at the DDR based DRRI
2813  *
2814  * Return: DRRI
2815  */
2816 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2817 		uint32_t CE_ctrl_addr)
2818 {
2819 	struct CE_attr attr;
2820 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2821 
2822 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2823 
2824 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2825 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2826 	} else {
2827 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2828 			return A_TARGET_READ(scn,
2829 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2830 		else
2831 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2832 					CE_ctrl_addr);
2833 	}
2834 }
2835 
2836 /**
2837  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2838  * @scn: hif_softc pointer
2839  *
2840  * Return: qdf status
2841  */
2842 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2843 {
2844 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
2845 
2846 	scn->vaddr_rri_on_ddr =
2847 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2848 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2849 		&paddr_rri_on_ddr);
2850 
2851 	if (!scn->vaddr_rri_on_ddr) {
2852 		hif_err("dmaable page alloc fail");
2853 		return QDF_STATUS_E_NOMEM;
2854 	}
2855 
2856 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2857 
2858 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2859 
2860 	return QDF_STATUS_SUCCESS;
2861 }
2862 #endif
2863 
2864 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2865 /**
2866  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2867  *
2868  * @scn: hif_softc pointer
2869  *
2870  * This function allocates non cached memory on ddr and sends
2871  * the physical address of this memory to the CE hardware. The
2872  * hardware updates the RRI on this particular location.
2873  *
2874  * Return: None
2875  */
2876 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2877 {
2878 	unsigned int i;
2879 	uint32_t high_paddr, low_paddr;
2880 
2881 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2882 		return;
2883 
2884 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
2885 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2886 
2887 	HIF_DBG("%s using srri and drri from DDR", __func__);
2888 
2889 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2890 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2891 
2892 	for (i = 0; i < CE_COUNT; i++)
2893 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2894 }
2895 #else
2896 /**
2897  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2898  *
2899  * @scn: hif_softc pointer
2900  *
2901  * This is a dummy implementation for platforms that don't
2902  * support this functionality.
2903  *
2904  * Return: None
2905  */
2906 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2907 {
2908 }
2909 #endif
2910 
2911 /**
2912  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2913  *                                    QMI command
2914  * @scn: hif context
2915  * @cfg: wlan enable config
2916  *
2917  * In case of Genoa, rri_over_ddr memory configuration is passed
2918  * to firmware through QMI configure command.
2919  */
2920 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
2921 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2922 					   struct pld_wlan_enable_cfg *cfg)
2923 {
2924 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2925 		return;
2926 
2927 	cfg->rri_over_ddr_cfg_valid = true;
2928 	cfg->rri_over_ddr_cfg.base_addr_low =
2929 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
2930 	cfg->rri_over_ddr_cfg.base_addr_high =
2931 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
2932 }
2933 #else
2934 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2935 					   struct pld_wlan_enable_cfg *cfg)
2936 {
2937 }
2938 #endif
2939 
2940 /**
2941  * hif_wlan_enable(): call the platform driver to enable wlan
2942  * @scn: HIF Context
2943  *
2944  * This function passes the con_mode and CE configuration to
2945  * platform driver to enable wlan.
2946  *
2947  * Return: linux error code
2948  */
2949 int hif_wlan_enable(struct hif_softc *scn)
2950 {
2951 	struct pld_wlan_enable_cfg cfg;
2952 	enum pld_driver_mode mode;
2953 	uint32_t con_mode = hif_get_conparam(scn);
2954 
2955 	hif_get_target_ce_config(scn,
2956 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2957 			&cfg.num_ce_tgt_cfg,
2958 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2959 			&cfg.num_ce_svc_pipe_cfg,
2960 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2961 			&cfg.num_shadow_reg_cfg);
2962 
2963 	/* translate from structure size to array size */
2964 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2965 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2966 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2967 
2968 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2969 			      &cfg.num_shadow_reg_v2_cfg);
2970 
2971 	hif_print_hal_shadow_register_cfg(&cfg);
2972 
2973 	hif_update_rri_over_ddr_config(scn, &cfg);
2974 
2975 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2976 		mode = PLD_FTM;
2977 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2978 		mode = PLD_COLDBOOT_CALIBRATION;
2979 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2980 		mode = PLD_EPPING;
2981 	else
2982 		mode = PLD_MISSION;
2983 
2984 	if (BYPASS_QMI)
2985 		return 0;
2986 	else
2987 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2988 				       mode, QWLAN_VERSIONSTR);
2989 }
2990 
2991 #ifdef WLAN_FEATURE_EPPING
2992 
2993 #define CE_EPPING_USES_IRQ true
2994 
2995 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2996 {
2997 	if (CE_EPPING_USES_IRQ)
2998 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2999 	else
3000 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
3001 	hif_state->target_ce_config = target_ce_config_wlan_epping;
3002 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
3003 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
3004 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
3005 }
3006 #endif
3007 
3008 #ifdef QCN7605_SUPPORT
3009 static inline
3010 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3011 			       struct HIF_CE_state *hif_state)
3012 {
3013 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3014 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3015 	hif_state->target_ce_config_sz =
3016 				 sizeof(target_ce_config_wlan_qcn7605);
3017 	scn->ce_count = QCN7605_CE_COUNT;
3018 }
3019 #else
3020 static inline
3021 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3022 			       struct HIF_CE_state *hif_state)
3023 {
3024 	HIF_ERROR("QCN7605 not supported");
3025 }
3026 #endif
3027 
3028 #ifdef CE_SVC_CMN_INIT
3029 #ifdef QCA_WIFI_SUPPORT_SRNG
3030 static inline void hif_ce_service_init(void)
3031 {
3032 	ce_service_srng_init();
3033 }
3034 #else
3035 static inline void hif_ce_service_init(void)
3036 {
3037 	ce_service_legacy_init();
3038 }
3039 #endif
3040 #else
3041 static inline void hif_ce_service_init(void)
3042 {
3043 }
3044 #endif
3045 
3046 
3047 /**
3048  * hif_ce_prepare_config() - load the correct static tables.
3049  * @scn: hif context
3050  *
3051  * Epping uses different static attribute tables than mission mode.
3052  */
3053 void hif_ce_prepare_config(struct hif_softc *scn)
3054 {
3055 	uint32_t mode = hif_get_conparam(scn);
3056 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3057 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3058 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3059 
3060 	hif_ce_service_init();
3061 	hif_state->ce_services = ce_services_attach(scn);
3062 
3063 	scn->ce_count = HOST_CE_COUNT;
3064 	/* if epping is enabled we need to use the epping configuration. */
3065 	if (QDF_IS_EPPING_ENABLED(mode)) {
3066 		hif_ce_prepare_epping_config(hif_state);
3067 	}
3068 
3069 	switch (tgt_info->target_type) {
3070 	default:
3071 		hif_state->host_ce_config = host_ce_config_wlan;
3072 		hif_state->target_ce_config = target_ce_config_wlan;
3073 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3074 		break;
3075 	case TARGET_TYPE_QCN7605:
3076 		hif_set_ce_config_qcn7605(scn, hif_state);
3077 		break;
3078 	case TARGET_TYPE_AR900B:
3079 	case TARGET_TYPE_QCA9984:
3080 	case TARGET_TYPE_IPQ4019:
3081 	case TARGET_TYPE_QCA9888:
3082 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3083 			hif_state->host_ce_config =
3084 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3085 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3086 			hif_state->host_ce_config =
3087 				host_lowdesc_ce_cfg_wlan_ar900b;
3088 		} else {
3089 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3090 		}
3091 
3092 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3093 		hif_state->target_ce_config_sz =
3094 				sizeof(target_ce_config_wlan_ar900b);
3095 
3096 		break;
3097 
3098 	case TARGET_TYPE_AR9888:
3099 	case TARGET_TYPE_AR9888V2:
3100 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3101 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3102 		} else {
3103 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3104 		}
3105 
3106 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3107 		hif_state->target_ce_config_sz =
3108 					sizeof(target_ce_config_wlan_ar9888);
3109 
3110 		break;
3111 
3112 	case TARGET_TYPE_QCA8074:
3113 	case TARGET_TYPE_QCA8074V2:
3114 	case TARGET_TYPE_QCA6018:
3115 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3116 			hif_state->host_ce_config =
3117 					host_ce_config_wlan_qca8074_pci;
3118 			hif_state->target_ce_config =
3119 				target_ce_config_wlan_qca8074_pci;
3120 			hif_state->target_ce_config_sz =
3121 				sizeof(target_ce_config_wlan_qca8074_pci);
3122 		} else {
3123 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3124 			hif_state->target_ce_config =
3125 					target_ce_config_wlan_qca8074;
3126 			hif_state->target_ce_config_sz =
3127 				sizeof(target_ce_config_wlan_qca8074);
3128 		}
3129 		break;
3130 	case TARGET_TYPE_QCA6290:
3131 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3132 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3133 		hif_state->target_ce_config_sz =
3134 					sizeof(target_ce_config_wlan_qca6290);
3135 
3136 		scn->ce_count = QCA_6290_CE_COUNT;
3137 		break;
3138 	case TARGET_TYPE_QCA6390:
3139 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3140 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3141 		hif_state->target_ce_config_sz =
3142 					sizeof(target_ce_config_wlan_qca6390);
3143 
3144 		scn->ce_count = QCA_6390_CE_COUNT;
3145 		break;
3146 	}
3147 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3148 }
3149 
3150 /**
3151  * hif_ce_open() - do ce specific allocations
3152  * @hif_sc: pointer to hif context
3153  *
3154  * return: 0 for success or QDF_STATUS_E_NOMEM
3155  */
3156 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3157 {
3158 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3159 
3160 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3161 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3162 	return QDF_STATUS_SUCCESS;
3163 }
3164 
3165 /**
3166  * hif_ce_close() - do ce specific free
3167  * @hif_sc: pointer to hif context
3168  */
3169 void hif_ce_close(struct hif_softc *hif_sc)
3170 {
3171 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3172 
3173 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3174 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3175 }
3176 
3177 /**
3178  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3179  * @hif_sc: hif context
3180  *
3181  * uses state variables to support cleaning up when hif_config_ce fails.
3182  */
3183 void hif_unconfig_ce(struct hif_softc *hif_sc)
3184 {
3185 	int pipe_num;
3186 	struct HIF_CE_pipe_info *pipe_info;
3187 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3188 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3189 
3190 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3191 		pipe_info = &hif_state->pipe_info[pipe_num];
3192 		if (pipe_info->ce_hdl) {
3193 			ce_unregister_irq(hif_state, (1 << pipe_num));
3194 		}
3195 	}
3196 	deinit_tasklet_workers(hif_hdl);
3197 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3198 		pipe_info = &hif_state->pipe_info[pipe_num];
3199 		if (pipe_info->ce_hdl) {
3200 			ce_fini(pipe_info->ce_hdl);
3201 			pipe_info->ce_hdl = NULL;
3202 			pipe_info->buf_sz = 0;
3203 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3204 		}
3205 	}
3206 	if (hif_sc->athdiag_procfs_inited) {
3207 		athdiag_procfs_remove();
3208 		hif_sc->athdiag_procfs_inited = false;
3209 	}
3210 }
3211 
3212 #ifdef CONFIG_BYPASS_QMI
3213 #ifdef QCN7605_SUPPORT
3214 /**
3215  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3216  * @scn: pointer to HIF structure
3217  *
3218  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3219  *
3220  * Return: void
3221  */
3222 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3223 {
3224 	void *target_va;
3225 	phys_addr_t target_pa;
3226 	struct ce_info *ce_info_ptr;
3227 	uint32_t msi_data_start;
3228 	uint32_t msi_data_count;
3229 	uint32_t msi_irq_start;
3230 	uint32_t i = 0;
3231 	int ret;
3232 
3233 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3234 					     scn->qdf_dev->dev,
3235 					     FW_SHARED_MEM +
3236 					     sizeof(struct ce_info),
3237 					     &target_pa);
3238 	if (!target_va)
3239 		return;
3240 
3241 	ce_info_ptr = (struct ce_info *)target_va;
3242 
3243 	if (scn->vaddr_rri_on_ddr) {
3244 		ce_info_ptr->rri_over_ddr_low_paddr  =
3245 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3246 		ce_info_ptr->rri_over_ddr_high_paddr =
3247 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3248 	}
3249 
3250 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3251 					  &msi_data_count, &msi_data_start,
3252 					  &msi_irq_start);
3253 	if (ret) {
3254 		hif_err("Failed to get CE msi config");
3255 		return;
3256 	}
3257 
3258 	for (i = 0; i < CE_COUNT_MAX; i++) {
3259 		ce_info_ptr->cfg[i].ce_id = i;
3260 		ce_info_ptr->cfg[i].msi_vector =
3261 			 (i % msi_data_count) + msi_irq_start;
3262 	}
3263 
3264 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3265 	hif_info("target va %pK target pa %pa", target_va, &target_pa);
3266 }
3267 #else
3268 /**
3269  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3270  * @scn: pointer to HIF structure
3271  *
3272  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3273  *
3274  * Return: void
3275  */
3276 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3277 {
3278 	void *target_va;
3279 	phys_addr_t target_pa;
3280 
3281 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3282 				FW_SHARED_MEM, &target_pa);
3283 	if (NULL == target_va) {
3284 		HIF_TRACE("Memory allocation failed could not post target buf");
3285 		return;
3286 	}
3287 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3288 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
3289 }
3290 #endif
3291 
3292 #else
3293 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3294 {
3295 }
3296 #endif
3297 
3298 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3299 				bool wait_for_it)
3300 {
3301 	/* todo */
3302 	return 0;
3303 }
3304 
3305 /**
3306  * hif_config_ce() - configure copy engines
3307  * @scn: hif context
3308  *
3309  * Prepares fw, copy engine hardware and host sw according
3310  * to the attributes selected by hif_ce_prepare_config.
3311  *
3312  * also calls athdiag_procfs_init
3313  *
3314  * return: 0 for success nonzero for failure.
3315  */
3316 int hif_config_ce(struct hif_softc *scn)
3317 {
3318 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3319 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3320 	struct HIF_CE_pipe_info *pipe_info;
3321 	int pipe_num;
3322 	struct CE_state *ce_state = NULL;
3323 
3324 #ifdef ADRASTEA_SHADOW_REGISTERS
3325 	int i;
3326 #endif
3327 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3328 
3329 	scn->notice_send = true;
3330 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3331 
3332 	hif_post_static_buf_to_target(scn);
3333 
3334 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3335 
3336 	hif_config_rri_on_ddr(scn);
3337 
3338 	if (ce_srng_based(scn))
3339 		scn->bus_ops.hif_target_sleep_state_adjust =
3340 			&hif_srng_sleep_state_adjust;
3341 
3342 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3343 	 * index. Disable data storing
3344 	 */
3345 	reset_ce_debug_history(scn);
3346 
3347 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3348 		struct CE_attr *attr;
3349 
3350 		pipe_info = &hif_state->pipe_info[pipe_num];
3351 		pipe_info->pipe_num = pipe_num;
3352 		pipe_info->HIF_CE_state = hif_state;
3353 		attr = &hif_state->host_ce_config[pipe_num];
3354 
3355 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3356 		ce_state = scn->ce_id_to_state[pipe_num];
3357 		if (!ce_state) {
3358 			A_TARGET_ACCESS_UNLIKELY(scn);
3359 			goto err;
3360 		}
3361 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3362 		QDF_ASSERT(pipe_info->ce_hdl != NULL);
3363 		if (pipe_info->ce_hdl == NULL) {
3364 			rv = QDF_STATUS_E_FAILURE;
3365 			A_TARGET_ACCESS_UNLIKELY(scn);
3366 			goto err;
3367 		}
3368 
3369 		ce_state->lro_data = qdf_lro_init();
3370 
3371 		if (attr->flags & CE_ATTR_DIAG) {
3372 			/* Reserve the ultimate CE for
3373 			 * Diagnostic Window support
3374 			 */
3375 			hif_state->ce_diag = pipe_info->ce_hdl;
3376 			continue;
3377 		}
3378 
3379 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3380 				(ce_state->htt_rx_data))
3381 			continue;
3382 
3383 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3384 		if (attr->dest_nentries > 0) {
3385 			atomic_set(&pipe_info->recv_bufs_needed,
3386 				   init_buffer_count(attr->dest_nentries - 1));
3387 			/*SRNG based CE has one entry less */
3388 			if (ce_srng_based(scn))
3389 				atomic_dec(&pipe_info->recv_bufs_needed);
3390 		} else {
3391 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3392 		}
3393 		ce_tasklet_init(hif_state, (1 << pipe_num));
3394 		ce_register_irq(hif_state, (1 << pipe_num));
3395 	}
3396 
3397 	if (athdiag_procfs_init(scn) != 0) {
3398 		A_TARGET_ACCESS_UNLIKELY(scn);
3399 		goto err;
3400 	}
3401 	scn->athdiag_procfs_inited = true;
3402 
3403 	HIF_DBG("%s: ce_init done", __func__);
3404 
3405 	init_tasklet_workers(hif_hdl);
3406 
3407 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3408 
3409 #ifdef ADRASTEA_SHADOW_REGISTERS
3410 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3411 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3412 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3413 			  __func__, i,
3414 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3415 	}
3416 #endif
3417 
3418 	return rv != QDF_STATUS_SUCCESS;
3419 
3420 err:
3421 	/* Failure, so clean up */
3422 	hif_unconfig_ce(scn);
3423 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3424 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3425 }
3426 
3427 #ifdef IPA_OFFLOAD
3428 /**
3429  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3430  * @scn: bus context
3431  * @ce_sr_base_paddr: copyengine source ring base physical address
3432  * @ce_sr_ring_size: copyengine source ring size
3433  * @ce_reg_paddr: copyengine register physical address
3434  *
3435  * IPA micro controller data path offload feature enabled,
3436  * HIF should release copy engine related resource information to IPA UC
3437  * IPA UC will access hardware resource with released information
3438  *
3439  * Return: None
3440  */
3441 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3442 			     qdf_shared_mem_t **ce_sr,
3443 			     uint32_t *ce_sr_ring_size,
3444 			     qdf_dma_addr_t *ce_reg_paddr)
3445 {
3446 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3447 	struct HIF_CE_pipe_info *pipe_info =
3448 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3449 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3450 
3451 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3452 			    ce_reg_paddr);
3453 }
3454 #endif /* IPA_OFFLOAD */
3455 
3456 
3457 #ifdef ADRASTEA_SHADOW_REGISTERS
3458 
3459 /*
3460  * Current shadow register config
3461  *
3462  * -----------------------------------------------------------
3463  * Shadow Register      |     CE   |    src/dst write index
3464  * -----------------------------------------------------------
3465  *         0            |     0    |           src
3466  *         1     No Config - Doesn't point to anything
3467  *         2     No Config - Doesn't point to anything
3468  *         3            |     3    |           src
3469  *         4            |     4    |           src
3470  *         5            |     5    |           src
3471  *         6     No Config - Doesn't point to anything
3472  *         7            |     7    |           src
3473  *         8     No Config - Doesn't point to anything
3474  *         9     No Config - Doesn't point to anything
3475  *         10    No Config - Doesn't point to anything
3476  *         11    No Config - Doesn't point to anything
3477  * -----------------------------------------------------------
3478  *         12    No Config - Doesn't point to anything
3479  *         13           |     1    |           dst
3480  *         14           |     2    |           dst
3481  *         15    No Config - Doesn't point to anything
3482  *         16    No Config - Doesn't point to anything
3483  *         17    No Config - Doesn't point to anything
3484  *         18    No Config - Doesn't point to anything
3485  *         19           |     7    |           dst
3486  *         20           |     8    |           dst
3487  *         21    No Config - Doesn't point to anything
3488  *         22    No Config - Doesn't point to anything
3489  *         23    No Config - Doesn't point to anything
3490  * -----------------------------------------------------------
3491  *
3492  *
3493  * ToDo - Move shadow register config to following in the future
3494  * This helps free up a block of shadow registers towards the end.
3495  * Can be used for other purposes
3496  *
3497  * -----------------------------------------------------------
3498  * Shadow Register      |     CE   |    src/dst write index
3499  * -----------------------------------------------------------
3500  *      0            |     0    |           src
3501  *      1            |     3    |           src
3502  *      2            |     4    |           src
3503  *      3            |     5    |           src
3504  *      4            |     7    |           src
3505  * -----------------------------------------------------------
3506  *      5            |     1    |           dst
3507  *      6            |     2    |           dst
3508  *      7            |     7    |           dst
3509  *      8            |     8    |           dst
3510  * -----------------------------------------------------------
3511  *      9     No Config - Doesn't point to anything
3512  *      12    No Config - Doesn't point to anything
3513  *      13    No Config - Doesn't point to anything
3514  *      14    No Config - Doesn't point to anything
3515  *      15    No Config - Doesn't point to anything
3516  *      16    No Config - Doesn't point to anything
3517  *      17    No Config - Doesn't point to anything
3518  *      18    No Config - Doesn't point to anything
3519  *      19    No Config - Doesn't point to anything
3520  *      20    No Config - Doesn't point to anything
3521  *      21    No Config - Doesn't point to anything
3522  *      22    No Config - Doesn't point to anything
3523  *      23    No Config - Doesn't point to anything
3524  * -----------------------------------------------------------
3525 */
3526 #ifndef QCN7605_SUPPORT
3527 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3528 {
3529 	u32 addr = 0;
3530 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3531 
3532 	switch (ce) {
3533 	case 0:
3534 		addr = SHADOW_VALUE0;
3535 		break;
3536 	case 3:
3537 		addr = SHADOW_VALUE3;
3538 		break;
3539 	case 4:
3540 		addr = SHADOW_VALUE4;
3541 		break;
3542 	case 5:
3543 		addr = SHADOW_VALUE5;
3544 		break;
3545 	case 7:
3546 		addr = SHADOW_VALUE7;
3547 		break;
3548 	default:
3549 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3550 		QDF_ASSERT(0);
3551 	}
3552 	return addr;
3553 
3554 }
3555 
3556 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3557 {
3558 	u32 addr = 0;
3559 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3560 
3561 	switch (ce) {
3562 	case 1:
3563 		addr = SHADOW_VALUE13;
3564 		break;
3565 	case 2:
3566 		addr = SHADOW_VALUE14;
3567 		break;
3568 	case 5:
3569 		addr = SHADOW_VALUE17;
3570 		break;
3571 	case 7:
3572 		addr = SHADOW_VALUE19;
3573 		break;
3574 	case 8:
3575 		addr = SHADOW_VALUE20;
3576 		break;
3577 	case 9:
3578 		addr = SHADOW_VALUE21;
3579 		break;
3580 	case 10:
3581 		addr = SHADOW_VALUE22;
3582 		break;
3583 	case 11:
3584 		addr = SHADOW_VALUE23;
3585 		break;
3586 	default:
3587 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3588 		QDF_ASSERT(0);
3589 	}
3590 
3591 	return addr;
3592 
3593 }
3594 #else
3595 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3596 {
3597 	u32 addr = 0;
3598 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3599 
3600 	switch (ce) {
3601 	case 0:
3602 		addr = SHADOW_VALUE0;
3603 		break;
3604 	case 4:
3605 		addr = SHADOW_VALUE4;
3606 		break;
3607 	case 5:
3608 		addr = SHADOW_VALUE5;
3609 		break;
3610 	default:
3611 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3612 		QDF_ASSERT(0);
3613 	}
3614 	return addr;
3615 }
3616 
3617 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3618 {
3619 	u32 addr = 0;
3620 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3621 
3622 	switch (ce) {
3623 	case 1:
3624 		addr = SHADOW_VALUE13;
3625 		break;
3626 	case 2:
3627 		addr = SHADOW_VALUE14;
3628 		break;
3629 	case 3:
3630 		addr = SHADOW_VALUE15;
3631 		break;
3632 	case 5:
3633 		addr = SHADOW_VALUE17;
3634 		break;
3635 	case 7:
3636 		addr = SHADOW_VALUE19;
3637 		break;
3638 	case 8:
3639 		addr = SHADOW_VALUE20;
3640 		break;
3641 	case 9:
3642 		addr = SHADOW_VALUE21;
3643 		break;
3644 	case 10:
3645 		addr = SHADOW_VALUE22;
3646 		break;
3647 	case 11:
3648 		addr = SHADOW_VALUE23;
3649 		break;
3650 	default:
3651 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3652 		QDF_ASSERT(0);
3653 	}
3654 
3655 	return addr;
3656 }
3657 #endif
3658 #endif
3659 
3660 #if defined(FEATURE_LRO)
3661 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3662 {
3663 	struct CE_state *ce_state;
3664 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3665 
3666 	ce_state = scn->ce_id_to_state[ctx_id];
3667 
3668 	return ce_state->lro_data;
3669 }
3670 #endif
3671 
3672 /**
3673  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3674  * this service
3675  * @scn: hif_softc pointer.
3676  * @svc_id: Service ID for which the mapping is needed.
3677  * @ul_pipe: address of the container in which ul pipe is returned.
3678  * @dl_pipe: address of the container in which dl pipe is returned.
3679  * @ul_is_polled: address of the container in which a bool
3680  *			indicating if the UL CE for this service
3681  *			is polled is returned.
3682  * @dl_is_polled: address of the container in which a bool
3683  *			indicating if the DL CE for this service
3684  *			is polled is returned.
3685  *
3686  * Return: Indicates whether the service has been found in the table.
3687  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3688  *         There will be warning logs if either leg has not been updated
3689  *         because it missed the entry in the table (but this is not an err).
3690  */
3691 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3692 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3693 			int *dl_is_polled)
3694 {
3695 	int status = QDF_STATUS_E_INVAL;
3696 	unsigned int i;
3697 	struct service_to_pipe element;
3698 	struct service_to_pipe *tgt_svc_map_to_use;
3699 	uint32_t sz_tgt_svc_map_to_use;
3700 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3701 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3702 	bool dl_updated = false;
3703 	bool ul_updated = false;
3704 
3705 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3706 				       &sz_tgt_svc_map_to_use);
3707 
3708 	*dl_is_polled = 0;  /* polling for received messages not supported */
3709 
3710 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3711 
3712 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3713 		if (element.service_id == svc_id) {
3714 			if (element.pipedir == PIPEDIR_OUT) {
3715 				*ul_pipe = element.pipenum;
3716 				*ul_is_polled =
3717 					(hif_state->host_ce_config[*ul_pipe].flags &
3718 					 CE_ATTR_DISABLE_INTR) != 0;
3719 				ul_updated = true;
3720 			} else if (element.pipedir == PIPEDIR_IN) {
3721 				*dl_pipe = element.pipenum;
3722 				dl_updated = true;
3723 			}
3724 			status = QDF_STATUS_SUCCESS;
3725 		}
3726 	}
3727 	if (ul_updated == false)
3728 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3729 	if (dl_updated == false)
3730 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3731 
3732 	return status;
3733 }
3734 
3735 #ifdef SHADOW_REG_DEBUG
3736 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3737 		uint32_t CE_ctrl_addr)
3738 {
3739 	uint32_t read_from_hw, srri_from_ddr = 0;
3740 
3741 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3742 
3743 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3744 
3745 	if (read_from_hw != srri_from_ddr) {
3746 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3747 		       __func__, srri_from_ddr, read_from_hw,
3748 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3749 		QDF_ASSERT(0);
3750 	}
3751 	return srri_from_ddr;
3752 }
3753 
3754 
3755 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3756 		uint32_t CE_ctrl_addr)
3757 {
3758 	uint32_t read_from_hw, drri_from_ddr = 0;
3759 
3760 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3761 
3762 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3763 
3764 	if (read_from_hw != drri_from_ddr) {
3765 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3766 		       drri_from_ddr, read_from_hw,
3767 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3768 		QDF_ASSERT(0);
3769 	}
3770 	return drri_from_ddr;
3771 }
3772 
3773 #endif
3774 
3775 /**
3776  * hif_dump_ce_registers() - dump ce registers
3777  * @scn: hif_opaque_softc pointer.
3778  *
3779  * Output the copy engine registers
3780  *
3781  * Return: 0 for success or error code
3782  */
3783 int hif_dump_ce_registers(struct hif_softc *scn)
3784 {
3785 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3786 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3787 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3788 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3789 	uint16_t i;
3790 	QDF_STATUS status;
3791 
3792 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3793 		if (scn->ce_id_to_state[i] == NULL) {
3794 			HIF_DBG("CE%d not used.", i);
3795 			continue;
3796 		}
3797 
3798 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3799 					   (uint8_t *) &ce_reg_values[0],
3800 					   ce_reg_word_size * sizeof(uint32_t));
3801 
3802 		if (status != QDF_STATUS_SUCCESS) {
3803 			HIF_ERROR("Dumping CE register failed!");
3804 			return -EACCES;
3805 		}
3806 		HIF_ERROR("CE%d=>\n", i);
3807 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3808 				   (uint8_t *) &ce_reg_values[0],
3809 				   ce_reg_word_size * sizeof(uint32_t));
3810 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3811 				+ SR_WR_INDEX_ADDRESS),
3812 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3813 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3814 				+ CURRENT_SRRI_ADDRESS),
3815 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3816 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3817 				+ DST_WR_INDEX_ADDRESS),
3818 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3819 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3820 				+ CURRENT_DRRI_ADDRESS),
3821 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3822 		qdf_print("---");
3823 	}
3824 	return 0;
3825 }
3826 qdf_export_symbol(hif_dump_ce_registers);
3827 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3828 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3829 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3830 {
3831 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3832 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3833 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3834 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3835 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3836 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3837 	struct CE_ring_state *src_ring = ce_state->src_ring;
3838 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3839 
3840 	if (src_ring) {
3841 		hif_info->ul_pipe.nentries = src_ring->nentries;
3842 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3843 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3844 		hif_info->ul_pipe.write_index = src_ring->write_index;
3845 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3846 		hif_info->ul_pipe.base_addr_CE_space =
3847 			src_ring->base_addr_CE_space;
3848 		hif_info->ul_pipe.base_addr_owner_space =
3849 			src_ring->base_addr_owner_space;
3850 	}
3851 
3852 
3853 	if (dest_ring) {
3854 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3855 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3856 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3857 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3858 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3859 		hif_info->dl_pipe.base_addr_CE_space =
3860 			dest_ring->base_addr_CE_space;
3861 		hif_info->dl_pipe.base_addr_owner_space =
3862 			dest_ring->base_addr_owner_space;
3863 	}
3864 
3865 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3866 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3867 
3868 	return hif_info;
3869 }
3870 qdf_export_symbol(hif_get_addl_pipe_info);
3871 
3872 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3873 {
3874 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3875 
3876 	scn->nss_wifi_ol_mode = mode;
3877 	return 0;
3878 }
3879 qdf_export_symbol(hif_set_nss_wifiol_mode);
3880 #endif
3881 
3882 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3883 {
3884 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3885 	scn->hif_attribute = hif_attrib;
3886 }
3887 
3888 
3889 /* disable interrupts (only applicable for legacy copy engine currently */
3890 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3891 {
3892 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3893 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3894 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3895 
3896 	Q_TARGET_ACCESS_BEGIN(scn);
3897 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3898 	Q_TARGET_ACCESS_END(scn);
3899 }
3900 qdf_export_symbol(hif_disable_interrupt);
3901 
3902 /**
3903  * hif_fw_event_handler() - hif fw event handler
3904  * @hif_state: pointer to hif ce state structure
3905  *
3906  * Process fw events and raise HTC callback to process fw events.
3907  *
3908  * Return: none
3909  */
3910 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3911 {
3912 	struct hif_msg_callbacks *msg_callbacks =
3913 		&hif_state->msg_callbacks_current;
3914 
3915 	if (!msg_callbacks->fwEventHandler)
3916 		return;
3917 
3918 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3919 			QDF_STATUS_E_FAILURE);
3920 }
3921 
3922 #ifndef QCA_WIFI_3_0
3923 /**
3924  * hif_fw_interrupt_handler() - FW interrupt handler
3925  * @irq: irq number
3926  * @arg: the user pointer
3927  *
3928  * Called from the PCI interrupt handler when a
3929  * firmware-generated interrupt to the Host.
3930  *
3931  * only registered for legacy ce devices
3932  *
3933  * Return: status of handled irq
3934  */
3935 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3936 {
3937 	struct hif_softc *scn = arg;
3938 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3939 	uint32_t fw_indicator_address, fw_indicator;
3940 
3941 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3942 		return ATH_ISR_NOSCHED;
3943 
3944 	fw_indicator_address = hif_state->fw_indicator_address;
3945 	/* For sudden unplug this will return ~0 */
3946 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3947 
3948 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3949 		/* ACK: clear Target-side pending event */
3950 		A_TARGET_WRITE(scn, fw_indicator_address,
3951 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3952 		if (Q_TARGET_ACCESS_END(scn) < 0)
3953 			return ATH_ISR_SCHED;
3954 
3955 		if (hif_state->started) {
3956 			hif_fw_event_handler(hif_state);
3957 		} else {
3958 			/*
3959 			 * Probable Target failure before we're prepared
3960 			 * to handle it.  Generally unexpected.
3961 			 * fw_indicator used as bitmap, and defined as below:
3962 			 *     FW_IND_EVENT_PENDING    0x1
3963 			 *     FW_IND_INITIALIZED      0x2
3964 			 *     FW_IND_NEEDRECOVER      0x4
3965 			 */
3966 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3967 				("%s: Early firmware event indicated 0x%x\n",
3968 				 __func__, fw_indicator));
3969 		}
3970 	} else {
3971 		if (Q_TARGET_ACCESS_END(scn) < 0)
3972 			return ATH_ISR_SCHED;
3973 	}
3974 
3975 	return ATH_ISR_SCHED;
3976 }
3977 #else
3978 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3979 {
3980 	return ATH_ISR_SCHED;
3981 }
3982 #endif /* #ifdef QCA_WIFI_3_0 */
3983 
3984 
3985 /**
3986  * hif_wlan_disable(): call the platform driver to disable wlan
3987  * @scn: HIF Context
3988  *
3989  * This function passes the con_mode to platform driver to disable
3990  * wlan.
3991  *
3992  * Return: void
3993  */
3994 void hif_wlan_disable(struct hif_softc *scn)
3995 {
3996 	enum pld_driver_mode mode;
3997 	uint32_t con_mode = hif_get_conparam(scn);
3998 
3999 	if (scn->target_status == TARGET_STATUS_RESET)
4000 		return;
4001 
4002 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4003 		mode = PLD_FTM;
4004 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4005 		mode = PLD_EPPING;
4006 	else
4007 		mode = PLD_MISSION;
4008 
4009 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4010 }
4011 
4012 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4013 {
4014 	QDF_STATUS status;
4015 	uint8_t ul_pipe, dl_pipe;
4016 	int ul_is_polled, dl_is_polled;
4017 
4018 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4019 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4020 					 HTC_CTRL_RSVD_SVC,
4021 					 &ul_pipe, &dl_pipe,
4022 					 &ul_is_polled, &dl_is_polled);
4023 	if (status) {
4024 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4025 		return qdf_status_to_os_return(status);
4026 	}
4027 
4028 	*ce_id = dl_pipe;
4029 
4030 	return 0;
4031 }
4032