xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ce/ce_main.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "targcfg.h"
20 #include "qdf_lock.h"
21 #include "qdf_status.h"
22 #include "qdf_status.h"
23 #include <qdf_atomic.h>         /* qdf_atomic_read */
24 #include <targaddrs.h>
25 #include "hif_io32.h"
26 #include <hif.h>
27 #include <target_type.h>
28 #include "regtable.h"
29 #define ATH_MODULE_NAME hif
30 #include <a_debug.h>
31 #include "hif_main.h"
32 #include "ce_api.h"
33 #include "qdf_trace.h"
34 #include "pld_common.h"
35 #include "hif_debug.h"
36 #include "ce_internal.h"
37 #include "ce_reg.h"
38 #include "ce_assignment.h"
39 #include "ce_tasklet.h"
40 #ifndef CONFIG_WIN
41 #include "qwlan_version.h"
42 #endif
43 #include "qdf_module.h"
44 
45 #define CE_POLL_TIMEOUT 10      /* ms */
46 
47 #define AGC_DUMP         1
48 #define CHANINFO_DUMP    2
49 #define BB_WATCHDOG_DUMP 3
50 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
51 #define PCIE_ACCESS_DUMP 4
52 #endif
53 #include "mp_dev.h"
54 
55 #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6290) || \
56 	defined(QCA_WIFI_QCA6018)) && !defined(QCA_WIFI_SUPPORT_SRNG)
57 #define QCA_WIFI_SUPPORT_SRNG
58 #endif
59 
60 /* Forward references */
61 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
62 
63 /*
64  * Fix EV118783, poll to check whether a BMI response comes
65  * other than waiting for the interruption which may be lost.
66  */
67 /* #define BMI_RSP_POLLING */
68 #define BMI_RSP_TO_MILLISEC  1000
69 
70 #ifdef CONFIG_BYPASS_QMI
71 #define BYPASS_QMI 1
72 #else
73 #define BYPASS_QMI 0
74 #endif
75 
76 #ifdef ENABLE_10_4_FW_HDR
77 #if (ENABLE_10_4_FW_HDR == 1)
78 #define WDI_IPA_SERVICE_GROUP 5
79 #define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP, 0)
80 #define HTT_DATA2_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 1)
81 #define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
82 #endif /* ENABLE_10_4_FW_HDR == 1 */
83 #endif /* ENABLE_10_4_FW_HDR */
84 
85 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
86 static void hif_config_rri_on_ddr(struct hif_softc *scn);
87 
88 /**
89  * hif_target_access_log_dump() - dump access log
90  *
91  * dump access log
92  *
93  * Return: n/a
94  */
95 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
96 static void hif_target_access_log_dump(void)
97 {
98 	hif_target_dump_access_log();
99 }
100 #endif
101 
102 
103 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
104 		      uint8_t cmd_id, bool start)
105 {
106 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
107 
108 	switch (cmd_id) {
109 	case AGC_DUMP:
110 		if (start)
111 			priv_start_agc(scn);
112 		else
113 			priv_dump_agc(scn);
114 		break;
115 	case CHANINFO_DUMP:
116 		if (start)
117 			priv_start_cap_chaninfo(scn);
118 		else
119 			priv_dump_chaninfo(scn);
120 		break;
121 	case BB_WATCHDOG_DUMP:
122 		priv_dump_bbwatchdog(scn);
123 		break;
124 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
125 	case PCIE_ACCESS_DUMP:
126 		hif_target_access_log_dump();
127 		break;
128 #endif
129 	default:
130 		HIF_ERROR("%s: Invalid htc dump command", __func__);
131 		break;
132 	}
133 }
134 
135 static void ce_poll_timeout(void *arg)
136 {
137 	struct CE_state *CE_state = (struct CE_state *)arg;
138 
139 	if (CE_state->timer_inited) {
140 		ce_per_engine_service(CE_state->scn, CE_state->id);
141 		qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
142 	}
143 }
144 
145 static unsigned int roundup_pwr2(unsigned int n)
146 {
147 	int i;
148 	unsigned int test_pwr2;
149 
150 	if (!(n & (n - 1)))
151 		return n; /* already a power of 2 */
152 
153 	test_pwr2 = 4;
154 	for (i = 0; i < 29; i++) {
155 		if (test_pwr2 > n)
156 			return test_pwr2;
157 		test_pwr2 = test_pwr2 << 1;
158 	}
159 
160 	QDF_ASSERT(0); /* n too large */
161 	return 0;
162 }
163 
164 #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
165 #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
166 
167 static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
168 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
169 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
170 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
171 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
172 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
173 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
174 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
175 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
176 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
177 #ifdef QCA_WIFI_3_0_ADRASTEA
178 	{ 9, ADRASTEA_DST_WR_INDEX_OFFSET},
179 	{ 10, ADRASTEA_DST_WR_INDEX_OFFSET},
180 	{ 11, ADRASTEA_DST_WR_INDEX_OFFSET},
181 #endif
182 };
183 
184 #ifdef QCN7605_SUPPORT
185 static struct shadow_reg_cfg target_shadow_reg_cfg_map_qcn7605[] = {
186 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
187 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
188 	{ 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
189 	{ 3, ADRASTEA_DST_WR_INDEX_OFFSET},
190 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
191 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
192 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
193 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
194 };
195 #endif
196 
197 #ifdef WLAN_FEATURE_EPPING
198 static struct shadow_reg_cfg target_shadow_reg_cfg_epping[] = {
199 	{ 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
200 	{ 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
201 	{ 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
202 	{ 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
203 	{ 1, ADRASTEA_DST_WR_INDEX_OFFSET},
204 	{ 2, ADRASTEA_DST_WR_INDEX_OFFSET},
205 	{ 5, ADRASTEA_DST_WR_INDEX_OFFSET},
206 	{ 7, ADRASTEA_DST_WR_INDEX_OFFSET},
207 	{ 8, ADRASTEA_DST_WR_INDEX_OFFSET},
208 };
209 #endif
210 
211 /* CE_PCI TABLE */
212 /*
213  * NOTE: the table below is out of date, though still a useful reference.
214  * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
215  * mapping of HTC services to HIF pipes.
216  */
217 /*
218  * This authoritative table defines Copy Engine configuration and the mapping
219  * of services/endpoints to CEs.  A subset of this information is passed to
220  * the Target during startup as a prerequisite to entering BMI phase.
221  * See:
222  *    target_service_to_ce_map - Target-side mapping
223  *    hif_map_service_to_pipe      - Host-side mapping
224  *    target_ce_config         - Target-side configuration
225  *    host_ce_config           - Host-side configuration
226    ============================================================================
227    Purpose    | Service / Endpoint   | CE   | Dire | Xfer     | Xfer
228  |                      |      | ctio | Size     | Frequency
229  |                      |      | n    |          |
230    ============================================================================
231    tx         | HTT_DATA (downlink)  | CE 0 | h->t | medium - | very frequent
232    descriptor |                      |      |      | O(100B)  | and regular
233    download   |                      |      |      |          |
234    ----------------------------------------------------------------------------
235    rx         | HTT_DATA (uplink)    | CE 1 | t->h | small -  | frequent and
236    indication |                      |      |      | O(10B)   | regular
237    upload     |                      |      |      |          |
238    ----------------------------------------------------------------------------
239    MSDU       | DATA_BK (uplink)     | CE 2 | t->h | large -  | rare
240    upload     |                      |      |      | O(1000B) | (frequent
241    e.g. noise |                      |      |      |          | during IP1.0
242    packets    |                      |      |      |          | testing)
243    ----------------------------------------------------------------------------
244    MSDU       | DATA_BK (downlink)   | CE 3 | h->t | large -  | very rare
245    download   |                      |      |      | O(1000B) | (frequent
246    e.g.       |                      |      |      |          | during IP1.0
247    misdirecte |                      |      |      |          | testing)
248    d EAPOL    |                      |      |      |          |
249    packets    |                      |      |      |          |
250    ----------------------------------------------------------------------------
251    n/a        | DATA_BE, DATA_VI     | CE 2 | t->h |          | never(?)
252  | DATA_VO (uplink)     |      |      |          |
253    ----------------------------------------------------------------------------
254    n/a        | DATA_BE, DATA_VI     | CE 3 | h->t |          | never(?)
255  | DATA_VO (downlink)   |      |      |          |
256    ----------------------------------------------------------------------------
257    WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
258  |                      |      |      | O(100B)  |
259    ----------------------------------------------------------------------------
260    WMI        | WMI_CONTROL          | CE 5 | h->t | medium - | infrequent
261    messages   | (downlink)           |      |      | O(100B)  |
262  |                      |      |      |          |
263    ----------------------------------------------------------------------------
264    n/a        | HTC_CTRL_RSVD,       | CE 1 | t->h |          | never(?)
265  | HTC_RAW_STREAMS      |      |      |          |
266  | (uplink)             |      |      |          |
267    ----------------------------------------------------------------------------
268    n/a        | HTC_CTRL_RSVD,       | CE 0 | h->t |          | never(?)
269  | HTC_RAW_STREAMS      |      |      |          |
270  | (downlink)           |      |      |          |
271    ----------------------------------------------------------------------------
272    diag       | none (raw CE)        | CE 7 | t<>h |    4     | Diag Window
273  |                      |      |      |          | infrequent
274    ============================================================================
275  */
276 
277 /*
278  * Map from service/endpoint to Copy Engine.
279  * This table is derived from the CE_PCI TABLE, above.
280  * It is passed to the Target at startup for use by firmware.
281  */
282 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
283 	{
284 		WMI_DATA_VO_SVC,
285 		PIPEDIR_OUT,    /* out = UL = host -> target */
286 		3,
287 	},
288 	{
289 		WMI_DATA_VO_SVC,
290 		PIPEDIR_IN,     /* in = DL = target -> host */
291 		2,
292 	},
293 	{
294 		WMI_DATA_BK_SVC,
295 		PIPEDIR_OUT,    /* out = UL = host -> target */
296 		3,
297 	},
298 	{
299 		WMI_DATA_BK_SVC,
300 		PIPEDIR_IN,     /* in = DL = target -> host */
301 		2,
302 	},
303 	{
304 		WMI_DATA_BE_SVC,
305 		PIPEDIR_OUT,    /* out = UL = host -> target */
306 		3,
307 	},
308 	{
309 		WMI_DATA_BE_SVC,
310 		PIPEDIR_IN,     /* in = DL = target -> host */
311 		2,
312 	},
313 	{
314 		WMI_DATA_VI_SVC,
315 		PIPEDIR_OUT,    /* out = UL = host -> target */
316 		3,
317 	},
318 	{
319 		WMI_DATA_VI_SVC,
320 		PIPEDIR_IN,     /* in = DL = target -> host */
321 		2,
322 	},
323 	{
324 		WMI_CONTROL_SVC,
325 		PIPEDIR_OUT,    /* out = UL = host -> target */
326 		3,
327 	},
328 	{
329 		WMI_CONTROL_SVC,
330 		PIPEDIR_IN,     /* in = DL = target -> host */
331 		2,
332 	},
333 	{
334 		HTC_CTRL_RSVD_SVC,
335 		PIPEDIR_OUT,    /* out = UL = host -> target */
336 		0,              /* could be moved to 3 (share with WMI) */
337 	},
338 	{
339 		HTC_CTRL_RSVD_SVC,
340 		PIPEDIR_IN,     /* in = DL = target -> host */
341 		2,
342 	},
343 	{
344 		HTC_RAW_STREAMS_SVC, /* not currently used */
345 		PIPEDIR_OUT,    /* out = UL = host -> target */
346 		0,
347 	},
348 	{
349 		HTC_RAW_STREAMS_SVC, /* not currently used */
350 		PIPEDIR_IN,     /* in = DL = target -> host */
351 		2,
352 	},
353 	{
354 		HTT_DATA_MSG_SVC,
355 		PIPEDIR_OUT,    /* out = UL = host -> target */
356 		4,
357 	},
358 	{
359 		HTT_DATA_MSG_SVC,
360 		PIPEDIR_IN,     /* in = DL = target -> host */
361 		1,
362 	},
363 	{
364 		WDI_IPA_TX_SVC,
365 		PIPEDIR_OUT,    /* in = DL = target -> host */
366 		5,
367 	},
368 #if defined(QCA_WIFI_3_0_ADRASTEA)
369 	{
370 		HTT_DATA2_MSG_SVC,
371 		PIPEDIR_IN,    /* in = DL = target -> host */
372 		9,
373 	},
374 	{
375 		HTT_DATA3_MSG_SVC,
376 		PIPEDIR_IN,    /* in = DL = target -> host */
377 		10,
378 	},
379 	{
380 		PACKET_LOG_SVC,
381 		PIPEDIR_IN,    /* in = DL = target -> host */
382 		11,
383 	},
384 #endif
385 	/* (Additions here) */
386 
387 	{                       /* Must be last */
388 		0,
389 		0,
390 		0,
391 	},
392 };
393 
394 /* PIPEDIR_OUT = HOST to Target */
395 /* PIPEDIR_IN  = TARGET to HOST */
396 #if (defined(QCA_WIFI_QCA8074))
397 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
398 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
399 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
400 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
401 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
402 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
403 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
404 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
405 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
406 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
407 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
408 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
409 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
410 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
411 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
412 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
413 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
414 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
415 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
416 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
417 	/* (Additions here) */
418 	{ 0, 0, 0, },
419 };
420 #else
421 static struct service_to_pipe target_service_to_ce_map_qca8074[] = {
422 };
423 #endif
424 
425 #if (defined(QCA_WIFI_QCA8074V2))
426 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
427 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
428 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
429 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
430 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
431 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
432 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
433 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
434 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
435 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
436 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
437 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
438 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
439 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_OUT, 9},
440 	{ WMI_CONTROL_SVC_WMAC2, PIPEDIR_IN, 2},
441 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
442 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
443 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
444 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
445 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
446 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
447 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
448 	/* (Additions here) */
449 	{ 0, 0, 0, },
450 };
451 #else
452 static struct service_to_pipe target_service_to_ce_map_qca8074_v2[] = {
453 };
454 #endif
455 
456 #if (defined(QCA_WIFI_QCA6018))
457 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
458 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
459 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
460 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
461 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
462 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
463 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
464 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
465 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
466 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
467 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
468 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
469 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
470 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
471 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 1, },
472 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0},
473 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 1 },
474 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
475 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
476 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
477 	/* (Additions here) */
478 	{ 0, 0, 0, },
479 };
480 #else
481 static struct service_to_pipe target_service_to_ce_map_qca6018[] = {
482 };
483 #endif
484 
485 /* PIPEDIR_OUT = HOST to Target */
486 /* PIPEDIR_IN  = TARGET to HOST */
487 #ifdef QCN7605_SUPPORT
488 static struct service_to_pipe target_service_to_ce_map_qcn7605[] = {
489 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 0, },
490 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
491 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 0, },
492 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
493 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 0, },
494 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
495 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 0, },
496 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
497 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 0, },
498 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
499 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
500 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
501 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0, },
502 	{ HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2, },
503 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
504 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
505 	{ HTT_DATA2_MSG_SVC, PIPEDIR_IN, 3, },
506 #ifdef IPA_OFFLOAD
507 	{ WDI_IPA_TX_SVC, PIPEDIR_OUT, 5, },
508 #else
509 	{ HTT_DATA3_MSG_SVC, PIPEDIR_IN, 8, },
510 #endif
511 	{ PACKET_LOG_SVC, PIPEDIR_IN, 7, },
512 	/* (Additions here) */
513 	{ 0, 0, 0, },
514 };
515 #endif
516 
517 #if (defined(QCA_WIFI_QCA6290))
518 #ifdef QCA_6290_AP_MODE
519 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
520 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
521 	{ WMI_DATA_VO_SVC, PIPEDIR_IN , 2, },
522 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
523 	{ WMI_DATA_BK_SVC, PIPEDIR_IN , 2, },
524 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
525 	{ WMI_DATA_BE_SVC, PIPEDIR_IN , 2, },
526 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
527 	{ WMI_DATA_VI_SVC, PIPEDIR_IN , 2, },
528 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
529 	{ WMI_CONTROL_SVC, PIPEDIR_IN , 2, },
530 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
531 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN , 2, },
532 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
533 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN , 1, },
534 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_OUT, 7},
535 	{ WMI_CONTROL_SVC_WMAC1, PIPEDIR_IN, 2},
536 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
537 	/* (Additions here) */
538 	{ 0, 0, 0, },
539 };
540 #else
541 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
542 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
543 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
544 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
545 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
546 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
547 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
548 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
549 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
550 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
551 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
552 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
553 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
554 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
555 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
556 	/* (Additions here) */
557 	{ 0, 0, 0, },
558 };
559 #endif
560 #else
561 static struct service_to_pipe target_service_to_ce_map_qca6290[] = {
562 };
563 #endif
564 
565 #if (defined(QCA_WIFI_QCA6390))
566 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
567 	{ WMI_DATA_VO_SVC, PIPEDIR_OUT, 3, },
568 	{ WMI_DATA_VO_SVC, PIPEDIR_IN, 2, },
569 	{ WMI_DATA_BK_SVC, PIPEDIR_OUT, 3, },
570 	{ WMI_DATA_BK_SVC, PIPEDIR_IN, 2, },
571 	{ WMI_DATA_BE_SVC, PIPEDIR_OUT, 3, },
572 	{ WMI_DATA_BE_SVC, PIPEDIR_IN, 2, },
573 	{ WMI_DATA_VI_SVC, PIPEDIR_OUT, 3, },
574 	{ WMI_DATA_VI_SVC, PIPEDIR_IN, 2, },
575 	{ WMI_CONTROL_SVC, PIPEDIR_OUT, 3, },
576 	{ WMI_CONTROL_SVC, PIPEDIR_IN, 2, },
577 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0, },
578 	{ HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2, },
579 	{ HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4, },
580 	{ HTT_DATA_MSG_SVC, PIPEDIR_IN, 1, },
581 	{ PACKET_LOG_SVC, PIPEDIR_IN, 5, },
582 	/* (Additions here) */
583 	{ 0, 0, 0, },
584 };
585 #else
586 static struct service_to_pipe target_service_to_ce_map_qca6390[] = {
587 };
588 #endif
589 
590 static struct service_to_pipe target_service_to_ce_map_ar900b[] = {
591 	{
592 		WMI_DATA_VO_SVC,
593 		PIPEDIR_OUT,    /* out = UL = host -> target */
594 		3,
595 	},
596 	{
597 		WMI_DATA_VO_SVC,
598 		PIPEDIR_IN,     /* in = DL = target -> host */
599 		2,
600 	},
601 	{
602 		WMI_DATA_BK_SVC,
603 		PIPEDIR_OUT,    /* out = UL = host -> target */
604 		3,
605 	},
606 	{
607 		WMI_DATA_BK_SVC,
608 		PIPEDIR_IN,     /* in = DL = target -> host */
609 		2,
610 	},
611 	{
612 		WMI_DATA_BE_SVC,
613 		PIPEDIR_OUT,    /* out = UL = host -> target */
614 		3,
615 	},
616 	{
617 		WMI_DATA_BE_SVC,
618 		PIPEDIR_IN,     /* in = DL = target -> host */
619 		2,
620 	},
621 	{
622 		WMI_DATA_VI_SVC,
623 		PIPEDIR_OUT,    /* out = UL = host -> target */
624 		3,
625 	},
626 	{
627 		WMI_DATA_VI_SVC,
628 		PIPEDIR_IN,     /* in = DL = target -> host */
629 		2,
630 	},
631 	{
632 		WMI_CONTROL_SVC,
633 		PIPEDIR_OUT,    /* out = UL = host -> target */
634 		3,
635 	},
636 	{
637 		WMI_CONTROL_SVC,
638 		PIPEDIR_IN,     /* in = DL = target -> host */
639 		2,
640 	},
641 	{
642 		HTC_CTRL_RSVD_SVC,
643 		PIPEDIR_OUT,    /* out = UL = host -> target */
644 		0,              /* could be moved to 3 (share with WMI) */
645 	},
646 	{
647 		HTC_CTRL_RSVD_SVC,
648 		PIPEDIR_IN,     /* in = DL = target -> host */
649 		1,
650 	},
651 	{
652 		HTC_RAW_STREAMS_SVC, /* not currently used */
653 		PIPEDIR_OUT,    /* out = UL = host -> target */
654 		0,
655 	},
656 	{
657 		HTC_RAW_STREAMS_SVC, /* not currently used */
658 		PIPEDIR_IN,     /* in = DL = target -> host */
659 		1,
660 	},
661 	{
662 		HTT_DATA_MSG_SVC,
663 		PIPEDIR_OUT,    /* out = UL = host -> target */
664 		4,
665 	},
666 #ifdef WLAN_FEATURE_FASTPATH
667 	{
668 		HTT_DATA_MSG_SVC,
669 		PIPEDIR_IN,     /* in = DL = target -> host */
670 		5,
671 	},
672 #else /* WLAN_FEATURE_FASTPATH */
673 	{
674 		HTT_DATA_MSG_SVC,
675 		PIPEDIR_IN,  /* in = DL = target -> host */
676 		1,
677 	},
678 #endif /* WLAN_FEATURE_FASTPATH */
679 
680 	/* (Additions here) */
681 
682 	{                       /* Must be last */
683 		0,
684 		0,
685 		0,
686 	},
687 };
688 
689 static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
690 static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
691 
692 #ifdef WLAN_FEATURE_EPPING
693 static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
694 	{WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
695 	{WMI_DATA_VO_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
696 	{WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,},     /* out = UL = host -> target */
697 	{WMI_DATA_BK_SVC, PIPEDIR_IN, 1,},      /* in = DL = target -> host */
698 	{WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
699 	{WMI_DATA_BE_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
700 	{WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
701 	{WMI_DATA_VI_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
702 	{WMI_CONTROL_SVC, PIPEDIR_OUT, 3,},     /* out = UL = host -> target */
703 	{WMI_CONTROL_SVC, PIPEDIR_IN, 2,},      /* in = DL = target -> host */
704 	{HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,},   /* out = UL = host -> target */
705 	{HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,},    /* in = DL = target -> host */
706 	{HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
707 	{HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,},  /* in = DL = target -> host */
708 	{HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,},    /* out = UL = host -> target */
709 	{HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,},     /* in = DL = target -> host */
710 	{0, 0, 0,},             /* Must be last */
711 };
712 
713 void hif_select_epping_service_to_pipe_map(struct service_to_pipe
714 					   **tgt_svc_map_to_use,
715 					   uint32_t *sz_tgt_svc_map_to_use)
716 {
717 	*tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
718 	*sz_tgt_svc_map_to_use =
719 			sizeof(target_service_to_ce_map_wlan_epping);
720 }
721 #endif
722 
723 #ifdef QCN7605_SUPPORT
724 static inline
725 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
726 			       uint32_t *sz_tgt_svc_map_to_use)
727 {
728 	*tgt_svc_map_to_use = target_service_to_ce_map_qcn7605;
729 	*sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_qcn7605);
730 }
731 #else
732 static inline
733 void hif_select_ce_map_qcn7605(struct service_to_pipe **tgt_svc_map_to_use,
734 			       uint32_t *sz_tgt_svc_map_to_use)
735 {
736 	HIF_ERROR("%s: QCN7605 not supported", __func__);
737 }
738 #endif
739 
740 static void hif_select_service_to_pipe_map(struct hif_softc *scn,
741 				    struct service_to_pipe **tgt_svc_map_to_use,
742 				    uint32_t *sz_tgt_svc_map_to_use)
743 {
744 	uint32_t mode = hif_get_conparam(scn);
745 	struct hif_target_info *tgt_info = &scn->target_info;
746 
747 	if (QDF_IS_EPPING_ENABLED(mode)) {
748 		hif_select_epping_service_to_pipe_map(tgt_svc_map_to_use,
749 						      sz_tgt_svc_map_to_use);
750 	} else {
751 		switch (tgt_info->target_type) {
752 		default:
753 			*tgt_svc_map_to_use = target_service_to_ce_map_wlan;
754 			*sz_tgt_svc_map_to_use =
755 				sizeof(target_service_to_ce_map_wlan);
756 			break;
757 		case TARGET_TYPE_QCN7605:
758 			hif_select_ce_map_qcn7605(tgt_svc_map_to_use,
759 						  sz_tgt_svc_map_to_use);
760 			break;
761 		case TARGET_TYPE_AR900B:
762 		case TARGET_TYPE_QCA9984:
763 		case TARGET_TYPE_IPQ4019:
764 		case TARGET_TYPE_QCA9888:
765 		case TARGET_TYPE_AR9888:
766 		case TARGET_TYPE_AR9888V2:
767 			*tgt_svc_map_to_use = target_service_to_ce_map_ar900b;
768 			*sz_tgt_svc_map_to_use =
769 				sizeof(target_service_to_ce_map_ar900b);
770 			break;
771 		case TARGET_TYPE_QCA6290:
772 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6290;
773 			*sz_tgt_svc_map_to_use =
774 				sizeof(target_service_to_ce_map_qca6290);
775 			break;
776 		case TARGET_TYPE_QCA6390:
777 			*tgt_svc_map_to_use = target_service_to_ce_map_qca6390;
778 			*sz_tgt_svc_map_to_use =
779 				sizeof(target_service_to_ce_map_qca6390);
780 			break;
781 		case TARGET_TYPE_QCA8074:
782 			*tgt_svc_map_to_use = target_service_to_ce_map_qca8074;
783 			*sz_tgt_svc_map_to_use =
784 				sizeof(target_service_to_ce_map_qca8074);
785 			break;
786 		case TARGET_TYPE_QCA8074V2:
787 			*tgt_svc_map_to_use =
788 				target_service_to_ce_map_qca8074_v2;
789 			*sz_tgt_svc_map_to_use =
790 				sizeof(target_service_to_ce_map_qca8074_v2);
791 			break;
792 		case TARGET_TYPE_QCA6018:
793 			*tgt_svc_map_to_use =
794 				target_service_to_ce_map_qca6018;
795 			*sz_tgt_svc_map_to_use =
796 				sizeof(target_service_to_ce_map_qca6018);
797 			break;
798 		}
799 	}
800 }
801 
802 /**
803  * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
804  * @ce_state : pointer to the state context of the CE
805  *
806  * Description:
807  *   Sets htt_rx_data attribute of the state structure if the
808  *   CE serves one of the HTT DATA services.
809  *
810  * Return:
811  *  false (attribute set to false)
812  *  true  (attribute set to true);
813  */
814 static bool ce_mark_datapath(struct CE_state *ce_state)
815 {
816 	struct service_to_pipe *svc_map;
817 	uint32_t map_sz, map_len;
818 	int    i;
819 	bool   rc = false;
820 
821 	if (ce_state) {
822 		hif_select_service_to_pipe_map(ce_state->scn, &svc_map,
823 					       &map_sz);
824 
825 		map_len = map_sz / sizeof(struct service_to_pipe);
826 		for (i = 0; i < map_len; i++) {
827 			if ((svc_map[i].pipenum == ce_state->id) &&
828 			    ((svc_map[i].service_id == HTT_DATA_MSG_SVC)  ||
829 			     (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
830 			     (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
831 				/* HTT CEs are unidirectional */
832 				if (svc_map[i].pipedir == PIPEDIR_IN)
833 					ce_state->htt_rx_data = true;
834 				else
835 					ce_state->htt_tx_data = true;
836 				rc = true;
837 			}
838 		}
839 	}
840 	return rc;
841 }
842 
843 /**
844  * ce_ring_test_initial_indexes() - tests the initial ce ring indexes
845  * @ce_id: ce in question
846  * @ring: ring state being examined
847  * @type: "src_ring" or "dest_ring" string for identifying the ring
848  *
849  * Warns on non-zero index values.
850  * Causes a kernel panic if the ring is not empty durring initialization.
851  */
852 static void ce_ring_test_initial_indexes(int ce_id, struct CE_ring_state *ring,
853 					 char *type)
854 {
855 	if (ring->write_index != 0 || ring->sw_index != 0)
856 		HIF_ERROR("ce %d, %s, initial sw_index = %d, initial write_index =%d",
857 			  ce_id, type, ring->sw_index, ring->write_index);
858 	if (ring->write_index != ring->sw_index)
859 		QDF_BUG(0);
860 }
861 
862 #ifdef IPA_OFFLOAD
863 /**
864  * ce_alloc_desc_ring() - Allocate copyengine descriptor ring
865  * @scn: softc instance
866  * @ce_id: ce in question
867  * @base_addr: pointer to copyengine ring base address
868  * @ce_ring: copyengine instance
869  * @nentries: number of entries should be allocated
870  * @desc_size: ce desc size
871  *
872  * Return: QDF_STATUS_SUCCESS - for success
873  */
874 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
875 				     qdf_dma_addr_t *base_addr,
876 				     struct CE_ring_state *ce_ring,
877 				     unsigned int nentries, uint32_t desc_size)
878 {
879 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
880 	    !ce_srng_based(scn)) {
881 		if (!scn->ipa_ce_ring) {
882 			scn->ipa_ce_ring = qdf_mem_shared_mem_alloc(
883 				scn->qdf_dev,
884 				nentries * desc_size + CE_DESC_RING_ALIGN);
885 			if (!scn->ipa_ce_ring) {
886 				HIF_ERROR(
887 				"%s: Failed to allocate memory for IPA ce ring",
888 				__func__);
889 				return QDF_STATUS_E_NOMEM;
890 			}
891 		}
892 		*base_addr = qdf_mem_get_dma_addr(scn->qdf_dev,
893 						&scn->ipa_ce_ring->mem_info);
894 		ce_ring->base_addr_owner_space_unaligned =
895 						scn->ipa_ce_ring->vaddr;
896 	} else {
897 		ce_ring->base_addr_owner_space_unaligned =
898 			qdf_mem_alloc_consistent(scn->qdf_dev,
899 						 scn->qdf_dev->dev,
900 						 (nentries * desc_size +
901 						 CE_DESC_RING_ALIGN),
902 						 base_addr);
903 		if (!ce_ring->base_addr_owner_space_unaligned) {
904 			HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
905 				  __func__, CE_id);
906 			return QDF_STATUS_E_NOMEM;
907 		}
908 	}
909 	return QDF_STATUS_SUCCESS;
910 }
911 
912 /**
913  * ce_free_desc_ring() - Frees copyengine descriptor ring
914  * @scn: softc instance
915  * @ce_id: ce in question
916  * @ce_ring: copyengine instance
917  * @desc_size: ce desc size
918  *
919  * Return: None
920  */
921 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
922 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
923 {
924 	if ((CE_id == HIF_PCI_IPA_UC_ASSIGNED_CE) &&
925 	    !ce_srng_based(scn)) {
926 		if (scn->ipa_ce_ring) {
927 			qdf_mem_shared_mem_free(scn->qdf_dev,
928 						scn->ipa_ce_ring);
929 			scn->ipa_ce_ring = NULL;
930 		}
931 		ce_ring->base_addr_owner_space_unaligned = NULL;
932 	} else {
933 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
934 			ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
935 			ce_ring->base_addr_owner_space_unaligned,
936 			ce_ring->base_addr_CE_space, 0);
937 		ce_ring->base_addr_owner_space_unaligned = NULL;
938 	}
939 }
940 #else
941 static QDF_STATUS ce_alloc_desc_ring(struct hif_softc *scn, unsigned int CE_id,
942 				     qdf_dma_addr_t *base_addr,
943 				     struct CE_ring_state *ce_ring,
944 				     unsigned int nentries, uint32_t desc_size)
945 {
946 	ce_ring->base_addr_owner_space_unaligned =
947 		qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
948 					 (nentries * desc_size +
949 					 CE_DESC_RING_ALIGN), base_addr);
950 	if (!ce_ring->base_addr_owner_space_unaligned) {
951 		HIF_ERROR("%s: Failed to allocate DMA memory for ce ring id : %u",
952 			  __func__, CE_id);
953 		return QDF_STATUS_E_NOMEM;
954 	}
955 	return QDF_STATUS_SUCCESS;
956 }
957 
958 static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
959 			      struct CE_ring_state *ce_ring, uint32_t desc_size)
960 {
961 	qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
962 		ce_ring->nentries * desc_size + CE_DESC_RING_ALIGN,
963 		ce_ring->base_addr_owner_space_unaligned,
964 		ce_ring->base_addr_CE_space, 0);
965 	ce_ring->base_addr_owner_space_unaligned = NULL;
966 }
967 #endif /* IPA_OFFLOAD */
968 
969 /*
970  * TODO: Need to explore the possibility of having this as part of a
971  * target context instead of a global array.
972  */
973 static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
974 
975 void ce_service_register_module(enum ce_target_type target_type,
976 				struct ce_ops* (*ce_attach)(void))
977 {
978 	if (target_type < CE_MAX_TARGET_TYPE)
979 		ce_attach_register[target_type] = ce_attach;
980 }
981 
982 qdf_export_symbol(ce_service_register_module);
983 
984 /**
985  * ce_srng_based() - Does this target use srng
986  * @ce_state : pointer to the state context of the CE
987  *
988  * Description:
989  *   returns true if the target is SRNG based
990  *
991  * Return:
992  *  false (attribute set to false)
993  *  true  (attribute set to true);
994  */
995 bool ce_srng_based(struct hif_softc *scn)
996 {
997 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
998 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
999 
1000 	switch (tgt_info->target_type) {
1001 	case TARGET_TYPE_QCA8074:
1002 	case TARGET_TYPE_QCA8074V2:
1003 	case TARGET_TYPE_QCA6290:
1004 	case TARGET_TYPE_QCA6390:
1005 	case TARGET_TYPE_QCA6018:
1006 		return true;
1007 	default:
1008 		return false;
1009 	}
1010 	return false;
1011 }
1012 qdf_export_symbol(ce_srng_based);
1013 
1014 #ifdef QCA_WIFI_SUPPORT_SRNG
1015 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1016 {
1017 	struct ce_ops *ops = NULL;
1018 
1019 	if (ce_srng_based(scn)) {
1020 		if (ce_attach_register[CE_SVC_SRNG])
1021 			ops = ce_attach_register[CE_SVC_SRNG]();
1022 	} else if (ce_attach_register[CE_SVC_LEGACY]) {
1023 		ops = ce_attach_register[CE_SVC_LEGACY]();
1024 	}
1025 
1026 	return ops;
1027 }
1028 
1029 
1030 #else	/* QCA_LITHIUM */
1031 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
1032 {
1033 	if (ce_attach_register[CE_SVC_LEGACY])
1034 		return ce_attach_register[CE_SVC_LEGACY]();
1035 
1036 	return NULL;
1037 }
1038 #endif /* QCA_LITHIUM */
1039 
1040 static void hif_prepare_hal_shadow_register_cfg(struct hif_softc *scn,
1041 		struct pld_shadow_reg_v2_cfg **shadow_config,
1042 		int *num_shadow_registers_configured) {
1043 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1044 
1045 	return hif_state->ce_services->ce_prepare_shadow_register_v2_cfg(
1046 			scn, shadow_config, num_shadow_registers_configured);
1047 }
1048 
1049 static inline uint32_t ce_get_desc_size(struct hif_softc *scn,
1050 						uint8_t ring_type)
1051 {
1052 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1053 
1054 	return hif_state->ce_services->ce_get_desc_size(ring_type);
1055 }
1056 
1057 
1058 static struct CE_ring_state *ce_alloc_ring_state(struct CE_state *CE_state,
1059 		uint8_t ring_type, uint32_t nentries)
1060 {
1061 	uint32_t ce_nbytes;
1062 	char *ptr;
1063 	qdf_dma_addr_t base_addr;
1064 	struct CE_ring_state *ce_ring;
1065 	uint32_t desc_size;
1066 	struct hif_softc *scn = CE_state->scn;
1067 
1068 	ce_nbytes = sizeof(struct CE_ring_state)
1069 		+ (nentries * sizeof(void *));
1070 	ptr = qdf_mem_malloc(ce_nbytes);
1071 	if (!ptr)
1072 		return NULL;
1073 
1074 	ce_ring = (struct CE_ring_state *)ptr;
1075 	ptr += sizeof(struct CE_ring_state);
1076 	ce_ring->nentries = nentries;
1077 	ce_ring->nentries_mask = nentries - 1;
1078 
1079 	ce_ring->low_water_mark_nentries = 0;
1080 	ce_ring->high_water_mark_nentries = nentries;
1081 	ce_ring->per_transfer_context = (void **)ptr;
1082 
1083 	desc_size = ce_get_desc_size(scn, ring_type);
1084 
1085 	/* Legacy platforms that do not support cache
1086 	 * coherent DMA are unsupported
1087 	 */
1088 	if (ce_alloc_desc_ring(scn, CE_state->id, &base_addr,
1089 			       ce_ring, nentries,
1090 			       desc_size) !=
1091 	    QDF_STATUS_SUCCESS) {
1092 		HIF_ERROR("%s: ring has no DMA mem",
1093 				__func__);
1094 		qdf_mem_free(ce_ring);
1095 		return NULL;
1096 	}
1097 	ce_ring->base_addr_CE_space_unaligned = base_addr;
1098 
1099 	/* Correctly initialize memory to 0 to
1100 	 * prevent garbage data crashing system
1101 	 * when download firmware
1102 	 */
1103 	qdf_mem_zero(ce_ring->base_addr_owner_space_unaligned,
1104 			nentries * desc_size +
1105 			CE_DESC_RING_ALIGN);
1106 
1107 	if (ce_ring->base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN - 1)) {
1108 
1109 		ce_ring->base_addr_CE_space =
1110 			(ce_ring->base_addr_CE_space_unaligned +
1111 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1);
1112 
1113 		ce_ring->base_addr_owner_space = (void *)
1114 			(((size_t) ce_ring->base_addr_owner_space_unaligned +
1115 			 CE_DESC_RING_ALIGN - 1) & ~(CE_DESC_RING_ALIGN - 1));
1116 	} else {
1117 		ce_ring->base_addr_CE_space =
1118 				ce_ring->base_addr_CE_space_unaligned;
1119 		ce_ring->base_addr_owner_space =
1120 				ce_ring->base_addr_owner_space_unaligned;
1121 	}
1122 
1123 	return ce_ring;
1124 }
1125 
1126 static int ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
1127 			uint32_t ce_id, struct CE_ring_state *ring,
1128 			struct CE_attr *attr)
1129 {
1130 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1131 
1132 	return hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id,
1133 					      ring, attr);
1134 }
1135 
1136 int hif_ce_bus_early_suspend(struct hif_softc *scn)
1137 {
1138 	uint8_t ul_pipe, dl_pipe;
1139 	int ce_id, status, ul_is_polled, dl_is_polled;
1140 	struct CE_state *ce_state;
1141 
1142 	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
1143 					 &ul_pipe, &dl_pipe,
1144 					 &ul_is_polled, &dl_is_polled);
1145 	if (status) {
1146 		HIF_ERROR("%s: pipe_mapping failure", __func__);
1147 		return status;
1148 	}
1149 
1150 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1151 		if (ce_id == ul_pipe)
1152 			continue;
1153 		if (ce_id == dl_pipe)
1154 			continue;
1155 
1156 		ce_state = scn->ce_id_to_state[ce_id];
1157 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1158 		if (ce_state->state == CE_RUNNING)
1159 			ce_state->state = CE_PAUSED;
1160 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1161 	}
1162 
1163 	return status;
1164 }
1165 
1166 int hif_ce_bus_late_resume(struct hif_softc *scn)
1167 {
1168 	int ce_id;
1169 	struct CE_state *ce_state;
1170 	int write_index = 0;
1171 	bool index_updated;
1172 
1173 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1174 		ce_state = scn->ce_id_to_state[ce_id];
1175 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
1176 		if (ce_state->state == CE_PENDING) {
1177 			write_index = ce_state->src_ring->write_index;
1178 			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
1179 					write_index);
1180 			ce_state->state = CE_RUNNING;
1181 			index_updated = true;
1182 		} else {
1183 			index_updated = false;
1184 		}
1185 
1186 		if (ce_state->state == CE_PAUSED)
1187 			ce_state->state = CE_RUNNING;
1188 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1189 
1190 		if (index_updated)
1191 			hif_record_ce_desc_event(scn, ce_id,
1192 				RESUME_WRITE_INDEX_UPDATE,
1193 				NULL, NULL, write_index, 0);
1194 	}
1195 
1196 	return 0;
1197 }
1198 
1199 /**
1200  * ce_oom_recovery() - try to recover rx ce from oom condition
1201  * @context: CE_state of the CE with oom rx ring
1202  *
1203  * the executing work Will continue to be rescheduled until
1204  * at least 1 descriptor is successfully posted to the rx ring.
1205  *
1206  * return: none
1207  */
1208 static void ce_oom_recovery(void *context)
1209 {
1210 	struct CE_state *ce_state = context;
1211 	struct hif_softc *scn = ce_state->scn;
1212 	struct HIF_CE_state *ce_softc = HIF_GET_CE_STATE(scn);
1213 	struct HIF_CE_pipe_info *pipe_info =
1214 		&ce_softc->pipe_info[ce_state->id];
1215 
1216 	hif_post_recv_buffers_for_pipe(pipe_info);
1217 }
1218 
1219 #ifdef HIF_CE_DEBUG_DATA_BUF
1220 /**
1221  * alloc_mem_ce_debug_hist_data() - Allocate mem for the data pointed by
1222  * the CE descriptors.
1223  * Allocate HIF_CE_HISTORY_MAX records by CE_DEBUG_MAX_DATA_BUF_SIZE
1224  * @scn: hif scn handle
1225  * ce_id: Copy Engine Id
1226  *
1227  * Return: QDF_STATUS
1228  */
1229 QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1230 {
1231 	struct hif_ce_desc_event *event = NULL;
1232 	struct hif_ce_desc_event *hist_ev = NULL;
1233 	uint32_t index = 0;
1234 
1235 	hist_ev =
1236 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1237 
1238 	if (!hist_ev)
1239 		return QDF_STATUS_E_NOMEM;
1240 
1241 	scn->hif_ce_desc_hist.data_enable[ce_id] = true;
1242 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1243 		event = &hist_ev[index];
1244 		event->data =
1245 			(uint8_t *)qdf_mem_malloc(CE_DEBUG_MAX_DATA_BUF_SIZE);
1246 		if (!event->data)
1247 			return QDF_STATUS_E_NOMEM;
1248 	}
1249 	return QDF_STATUS_SUCCESS;
1250 }
1251 
1252 /**
1253  * free_mem_ce_debug_hist_data() - Free mem of the data pointed by
1254  * the CE descriptors.
1255  * @scn: hif scn handle
1256  * ce_id: Copy Engine Id
1257  *
1258  * Return:
1259  */
1260 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
1261 {
1262 	struct hif_ce_desc_event *event = NULL;
1263 	struct hif_ce_desc_event *hist_ev = NULL;
1264 	uint32_t index = 0;
1265 
1266 	hist_ev =
1267 	(struct hif_ce_desc_event *)scn->hif_ce_desc_hist.hist_ev[ce_id];
1268 
1269 	if (!hist_ev)
1270 		return;
1271 
1272 	for (index = 0; index < HIF_CE_HISTORY_MAX; index++) {
1273 		event = &hist_ev[index];
1274 		if (event->data)
1275 			qdf_mem_free(event->data);
1276 		event->data = NULL;
1277 		event = NULL;
1278 	}
1279 }
1280 #endif /* HIF_CE_DEBUG_DATA_BUF */
1281 
1282 #if defined(CONFIG_MCL)
1283 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1284 struct hif_ce_desc_event hif_ce_desc_history[CE_COUNT_MAX][HIF_CE_HISTORY_MAX];
1285 
1286 /**
1287  * alloc_mem_ce_debug_history() - Allocate CE descriptor history
1288  * @scn: hif scn handle
1289  * @ce_id: Copy Engine Id
1290  * @src_nentries: source ce ring entries
1291  * Return: QDF_STATUS
1292  */
1293 static QDF_STATUS
1294 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id,
1295 			   uint32_t src_nentries)
1296 {
1297 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1298 
1299 	ce_hist->hist_ev[ce_id] = hif_ce_desc_history[ce_id];
1300 	ce_hist->enable[ce_id] = 1;
1301 
1302 	if (src_nentries)
1303 		alloc_mem_ce_debug_hist_data(scn, ce_id);
1304 	else
1305 		ce_hist->data_enable[ce_id] = false;
1306 
1307 	return QDF_STATUS_SUCCESS;
1308 }
1309 
1310 /**
1311  * free_mem_ce_debug_history() - Free CE descriptor history
1312  * @scn: hif scn handle
1313  * @ce_id: Copy Engine Id
1314  *
1315  * Return: None
1316  */
1317 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int ce_id)
1318 {
1319 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1320 
1321 	ce_hist->enable[ce_id] = 0;
1322 	ce_hist->hist_ev[ce_id] = NULL;
1323 	if (ce_hist->data_enable[ce_id]) {
1324 		ce_hist->data_enable[ce_id] = false;
1325 		free_mem_ce_debug_hist_data(scn, ce_id);
1326 	}
1327 }
1328 #else
1329 static inline QDF_STATUS
1330 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1331 			   uint32_t src_nentries)
1332 {
1333 	return QDF_STATUS_SUCCESS;
1334 }
1335 
1336 static inline void
1337 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1338 #endif /* (HIF_CONFIG_SLUB_DEBUG_ON) || (HIF_CE_DEBUG_DATA_BUF) */
1339 #elif defined(CONFIG_WIN)
1340 #if defined(HIF_CE_DEBUG_DATA_BUF)
1341 
1342 static QDF_STATUS
1343 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1344 			   uint32_t src_nentries)
1345 {
1346 	scn->hif_ce_desc_hist.hist_ev[CE_id] = (struct hif_ce_desc_event *)
1347 	qdf_mem_malloc(HIF_CE_HISTORY_MAX * sizeof(struct hif_ce_desc_event));
1348 
1349 	if (!scn->hif_ce_desc_hist.hist_ev[CE_id]) {
1350 		scn->hif_ce_desc_hist.enable[CE_id] = 0;
1351 		return QDF_STATUS_E_NOMEM;
1352 	} else {
1353 		scn->hif_ce_desc_hist.enable[CE_id] = 1;
1354 		return QDF_STATUS_SUCCESS;
1355 	}
1356 }
1357 
1358 static void free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id)
1359 {
1360 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1361 	struct hif_ce_desc_event *hist_ev = ce_hist->hist_ev[CE_id];
1362 
1363 	if (!hist_ev)
1364 		return;
1365 
1366 	if (ce_hist->data_enable[CE_id]) {
1367 		ce_hist->data_enable[CE_id] = false;
1368 		free_mem_ce_debug_hist_data(scn, CE_id);
1369 	}
1370 
1371 	ce_hist->enable[CE_id] = 0;
1372 	qdf_mem_free(ce_hist->hist_ev[CE_id]);
1373 	ce_hist->hist_ev[CE_id] = NULL;
1374 }
1375 
1376 #else
1377 
1378 static inline QDF_STATUS
1379 alloc_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id,
1380 			   uint32_t src_nentries)
1381 {
1382 	return QDF_STATUS_SUCCESS;
1383 }
1384 
1385 static inline void
1386 free_mem_ce_debug_history(struct hif_softc *scn, unsigned int CE_id) { }
1387 #endif /* HIF_CE_DEBUG_DATA_BUF */
1388 #endif /* CONFIG_MCL */
1389 
1390 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
1391 /**
1392  * reset_ce_debug_history() - reset the index and ce id used for dumping the
1393  * CE records on the console using sysfs.
1394  * @scn: hif scn handle
1395  *
1396  * Return:
1397  */
1398 static inline void reset_ce_debug_history(struct hif_softc *scn)
1399 {
1400 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
1401 	/* Initialise the CE debug history sysfs interface inputs ce_id and
1402 	 * index. Disable data storing
1403 	 */
1404 	ce_hist->hist_index = 0;
1405 	ce_hist->hist_id = 0;
1406 }
1407 #else /* defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1408 static inline void reset_ce_debug_history(struct hif_softc *scn) { }
1409 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1410 
1411 void ce_enable_polling(void *cestate)
1412 {
1413 	struct CE_state *CE_state = (struct CE_state *)cestate;
1414 
1415 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1416 		CE_state->timer_inited = true;
1417 }
1418 
1419 void ce_disable_polling(void *cestate)
1420 {
1421 	struct CE_state *CE_state = (struct CE_state *)cestate;
1422 
1423 	if (CE_state && CE_state->attr_flags & CE_ATTR_ENABLE_POLL)
1424 		CE_state->timer_inited = false;
1425 }
1426 
1427 /*
1428  * Initialize a Copy Engine based on caller-supplied attributes.
1429  * This may be called once to initialize both source and destination
1430  * rings or it may be called twice for separate source and destination
1431  * initialization. It may be that only one side or the other is
1432  * initialized by software/firmware.
1433  *
1434  * This should be called durring the initialization sequence before
1435  * interupts are enabled, so we don't have to worry about thread safety.
1436  */
1437 struct CE_handle *ce_init(struct hif_softc *scn,
1438 			  unsigned int CE_id, struct CE_attr *attr)
1439 {
1440 	struct CE_state *CE_state;
1441 	uint32_t ctrl_addr;
1442 	unsigned int nentries;
1443 	bool malloc_CE_state = false;
1444 	bool malloc_src_ring = false;
1445 	int status;
1446 
1447 	QDF_ASSERT(CE_id < scn->ce_count);
1448 	ctrl_addr = CE_BASE_ADDRESS(CE_id);
1449 	CE_state = scn->ce_id_to_state[CE_id];
1450 
1451 	if (!CE_state) {
1452 		CE_state =
1453 		    (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
1454 		if (!CE_state)
1455 			return NULL;
1456 
1457 		malloc_CE_state = true;
1458 		qdf_spinlock_create(&CE_state->ce_index_lock);
1459 
1460 		CE_state->id = CE_id;
1461 		CE_state->ctrl_addr = ctrl_addr;
1462 		CE_state->state = CE_RUNNING;
1463 		CE_state->attr_flags = attr->flags;
1464 	}
1465 	CE_state->scn = scn;
1466 	CE_state->service = ce_engine_service_reg;
1467 
1468 	qdf_atomic_init(&CE_state->rx_pending);
1469 	if (!attr) {
1470 		/* Already initialized; caller wants the handle */
1471 		return (struct CE_handle *)CE_state;
1472 	}
1473 
1474 	if (CE_state->src_sz_max)
1475 		QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
1476 	else
1477 		CE_state->src_sz_max = attr->src_sz_max;
1478 
1479 	ce_init_ce_desc_event_log(scn, CE_id,
1480 				  attr->src_nentries + attr->dest_nentries);
1481 
1482 	/* source ring setup */
1483 	nentries = attr->src_nentries;
1484 	if (nentries) {
1485 		struct CE_ring_state *src_ring;
1486 
1487 		nentries = roundup_pwr2(nentries);
1488 		if (CE_state->src_ring) {
1489 			QDF_ASSERT(CE_state->src_ring->nentries == nentries);
1490 		} else {
1491 			src_ring = CE_state->src_ring =
1492 				ce_alloc_ring_state(CE_state,
1493 						CE_RING_SRC,
1494 						nentries);
1495 			if (!src_ring) {
1496 				/* cannot allocate src ring. If the
1497 				 * CE_state is allocated locally free
1498 				 * CE_State and return error.
1499 				 */
1500 				HIF_ERROR("%s: src ring has no mem", __func__);
1501 				if (malloc_CE_state) {
1502 					/* allocated CE_state locally */
1503 					qdf_mem_free(CE_state);
1504 					malloc_CE_state = false;
1505 				}
1506 				return NULL;
1507 			}
1508 			/* we can allocate src ring. Mark that the src ring is
1509 			 * allocated locally
1510 			 */
1511 			malloc_src_ring = true;
1512 
1513 			/*
1514 			 * Also allocate a shadow src ring in
1515 			 * regular mem to use for faster access.
1516 			 */
1517 			src_ring->shadow_base_unaligned =
1518 				qdf_mem_malloc(nentries *
1519 					       sizeof(struct CE_src_desc) +
1520 					       CE_DESC_RING_ALIGN);
1521 			if (!src_ring->shadow_base_unaligned)
1522 				goto error_no_dma_mem;
1523 
1524 			src_ring->shadow_base = (struct CE_src_desc *)
1525 				(((size_t) src_ring->shadow_base_unaligned +
1526 				CE_DESC_RING_ALIGN - 1) &
1527 				 ~(CE_DESC_RING_ALIGN - 1));
1528 
1529 			status = ce_ring_setup(scn, CE_RING_SRC, CE_id,
1530 					       src_ring, attr);
1531 			if (status < 0)
1532 				goto error_target_access;
1533 
1534 			ce_ring_test_initial_indexes(CE_id, src_ring,
1535 						     "src_ring");
1536 		}
1537 	}
1538 
1539 	/* destination ring setup */
1540 	nentries = attr->dest_nentries;
1541 	if (nentries) {
1542 		struct CE_ring_state *dest_ring;
1543 
1544 		nentries = roundup_pwr2(nentries);
1545 		if (CE_state->dest_ring) {
1546 			QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
1547 		} else {
1548 			dest_ring = CE_state->dest_ring =
1549 				ce_alloc_ring_state(CE_state,
1550 						CE_RING_DEST,
1551 						nentries);
1552 			if (!dest_ring) {
1553 				/* cannot allocate dst ring. If the CE_state
1554 				 * or src ring is allocated locally free
1555 				 * CE_State and src ring and return error.
1556 				 */
1557 				HIF_ERROR("%s: dest ring has no mem",
1558 					  __func__);
1559 				goto error_no_dma_mem;
1560 			}
1561 
1562 			status = ce_ring_setup(scn, CE_RING_DEST, CE_id,
1563 				      dest_ring, attr);
1564 			if (status < 0)
1565 				goto error_target_access;
1566 
1567 			ce_ring_test_initial_indexes(CE_id, dest_ring,
1568 						     "dest_ring");
1569 
1570 			/* For srng based target, init status ring here */
1571 			if (ce_srng_based(CE_state->scn)) {
1572 				CE_state->status_ring =
1573 					ce_alloc_ring_state(CE_state,
1574 							CE_RING_STATUS,
1575 							nentries);
1576 				if (!CE_state->status_ring) {
1577 					/*Allocation failed. Cleanup*/
1578 					qdf_mem_free(CE_state->dest_ring);
1579 					if (malloc_src_ring) {
1580 						qdf_mem_free
1581 							(CE_state->src_ring);
1582 						CE_state->src_ring = NULL;
1583 						malloc_src_ring = false;
1584 					}
1585 					if (malloc_CE_state) {
1586 						/* allocated CE_state locally */
1587 						scn->ce_id_to_state[CE_id] =
1588 							NULL;
1589 						qdf_mem_free(CE_state);
1590 						malloc_CE_state = false;
1591 					}
1592 
1593 					return NULL;
1594 				}
1595 
1596 				status = ce_ring_setup(scn, CE_RING_STATUS,
1597 					       CE_id, CE_state->status_ring,
1598 					       attr);
1599 				if (status < 0)
1600 					goto error_target_access;
1601 
1602 			}
1603 
1604 			/* epping */
1605 			/* poll timer */
1606 			if (CE_state->attr_flags & CE_ATTR_ENABLE_POLL) {
1607 				qdf_timer_init(scn->qdf_dev,
1608 						&CE_state->poll_timer,
1609 						ce_poll_timeout,
1610 						CE_state,
1611 						QDF_TIMER_TYPE_WAKE_APPS);
1612 				ce_enable_polling(CE_state);
1613 				qdf_timer_mod(&CE_state->poll_timer,
1614 						      CE_POLL_TIMEOUT);
1615 			}
1616 		}
1617 	}
1618 
1619 	if (!ce_srng_based(scn)) {
1620 		/* Enable CE error interrupts */
1621 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1622 			goto error_target_access;
1623 		CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
1624 		if (Q_TARGET_ACCESS_END(scn) < 0)
1625 			goto error_target_access;
1626 	}
1627 
1628 	qdf_create_work(scn->qdf_dev, &CE_state->oom_allocation_work,
1629 			ce_oom_recovery, CE_state);
1630 
1631 	/* update the htt_data attribute */
1632 	ce_mark_datapath(CE_state);
1633 	scn->ce_id_to_state[CE_id] = CE_state;
1634 
1635 	alloc_mem_ce_debug_history(scn, CE_id, attr->src_nentries);
1636 
1637 	return (struct CE_handle *)CE_state;
1638 
1639 error_target_access:
1640 error_no_dma_mem:
1641 	ce_fini((struct CE_handle *)CE_state);
1642 	return NULL;
1643 }
1644 
1645 /**
1646  * hif_is_polled_mode_enabled - API to query if polling is enabled on all CEs
1647  * @hif_ctx: HIF Context
1648  *
1649  * API to check if polling is enabled on all CEs. Returns true when polling
1650  * is enabled on all CEs.
1651  *
1652  * Return: bool
1653  */
1654 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx)
1655 {
1656 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1657 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1658 	struct CE_attr *attr;
1659 	int id;
1660 
1661 	for (id = 0; id < scn->ce_count; id++) {
1662 		attr = &hif_state->host_ce_config[id];
1663 		if (attr && (attr->dest_nentries) &&
1664 		    !(attr->flags & CE_ATTR_ENABLE_POLL))
1665 			return false;
1666 	}
1667 	return true;
1668 }
1669 qdf_export_symbol(hif_is_polled_mode_enabled);
1670 
1671 #ifdef WLAN_FEATURE_FASTPATH
1672 /**
1673  * hif_enable_fastpath() Update that we have enabled fastpath mode
1674  * @hif_ctx: HIF context
1675  *
1676  * For use in data path
1677  *
1678  * Retrun: void
1679  */
1680 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
1681 {
1682 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1683 
1684 	if (ce_srng_based(scn)) {
1685 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
1686 		return;
1687 	}
1688 	HIF_DBG("%s, Enabling fastpath mode", __func__);
1689 	scn->fastpath_mode_on = true;
1690 }
1691 
1692 /**
1693  * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
1694  * @hif_ctx: HIF Context
1695  *
1696  * For use in data path to skip HTC
1697  *
1698  * Return: bool
1699  */
1700 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
1701 {
1702 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1703 
1704 	return scn->fastpath_mode_on;
1705 }
1706 
1707 /**
1708  * hif_get_ce_handle - API to get CE handle for FastPath mode
1709  * @hif_ctx: HIF Context
1710  * @id: CopyEngine Id
1711  *
1712  * API to return CE handle for fastpath mode
1713  *
1714  * Return: void
1715  */
1716 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
1717 {
1718 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1719 
1720 	return scn->ce_id_to_state[id];
1721 }
1722 qdf_export_symbol(hif_get_ce_handle);
1723 
1724 /**
1725  * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
1726  * No processing is required inside this function.
1727  * @ce_hdl: Cope engine handle
1728  * Using an assert, this function makes sure that,
1729  * the TX CE has been processed completely.
1730  *
1731  * This is called while dismantling CE structures. No other thread
1732  * should be using these structures while dismantling is occurring
1733  * therfore no locking is needed.
1734  *
1735  * Return: none
1736  */
1737 void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
1738 {
1739 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1740 	struct CE_ring_state *src_ring = ce_state->src_ring;
1741 	struct hif_softc *sc = ce_state->scn;
1742 	uint32_t sw_index, write_index;
1743 
1744 	if (hif_is_nss_wifi_enabled(sc))
1745 		return;
1746 
1747 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
1748 		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
1749 			 __func__, __LINE__);
1750 		sw_index = src_ring->sw_index;
1751 		write_index = src_ring->sw_index;
1752 
1753 		/* At this point Tx CE should be clean */
1754 		qdf_assert_always(sw_index == write_index);
1755 	}
1756 }
1757 
1758 /**
1759  * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
1760  * @ce_hdl: Handle to CE
1761  *
1762  * These buffers are never allocated on the fly, but
1763  * are allocated only once during HIF start and freed
1764  * only once during HIF stop.
1765  * NOTE:
1766  * The assumption here is there is no in-flight DMA in progress
1767  * currently, so that buffers can be freed up safely.
1768  *
1769  * Return: NONE
1770  */
1771 void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
1772 {
1773 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
1774 	struct CE_ring_state *dst_ring = ce_state->dest_ring;
1775 	qdf_nbuf_t nbuf;
1776 	int i;
1777 
1778 	if (ce_state->scn->fastpath_mode_on == false)
1779 		return;
1780 
1781 	if (!ce_state->htt_rx_data)
1782 		return;
1783 
1784 	/*
1785 	 * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
1786 	 * this CE is completely full: does not leave one blank space, to
1787 	 * distinguish between empty queue & full queue. So free all the
1788 	 * entries.
1789 	 */
1790 	for (i = 0; i < dst_ring->nentries; i++) {
1791 		nbuf = dst_ring->per_transfer_context[i];
1792 
1793 		/*
1794 		 * The reasons for doing this check are:
1795 		 * 1) Protect against calling cleanup before allocating buffers
1796 		 * 2) In a corner case, FASTPATH_mode_on may be set, but we
1797 		 *    could have a partially filled ring, because of a memory
1798 		 *    allocation failure in the middle of allocating ring.
1799 		 *    This check accounts for that case, checking
1800 		 *    fastpath_mode_on flag or started flag would not have
1801 		 *    covered that case. This is not in performance path,
1802 		 *    so OK to do this.
1803 		 */
1804 		if (nbuf) {
1805 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
1806 					      QDF_DMA_FROM_DEVICE);
1807 			qdf_nbuf_free(nbuf);
1808 		}
1809 	}
1810 }
1811 
1812 /**
1813  * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
1814  * @scn: HIF handle
1815  *
1816  * Datapath Rx CEs are special case, where we reuse all the message buffers.
1817  * Hence we have to post all the entries in the pipe, even, in the beginning
1818  * unlike for other CE pipes where one less than dest_nentries are filled in
1819  * the beginning.
1820  *
1821  * Return: None
1822  */
1823 static void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1824 {
1825 	int pipe_num;
1826 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1827 
1828 	if (scn->fastpath_mode_on == false)
1829 		return;
1830 
1831 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
1832 		struct HIF_CE_pipe_info *pipe_info =
1833 			&hif_state->pipe_info[pipe_num];
1834 		struct CE_state *ce_state =
1835 			scn->ce_id_to_state[pipe_info->pipe_num];
1836 
1837 		if (ce_state->htt_rx_data)
1838 			atomic_inc(&pipe_info->recv_bufs_needed);
1839 	}
1840 }
1841 #else
1842 static inline void hif_update_fastpath_recv_bufs_cnt(struct hif_softc *scn)
1843 {
1844 }
1845 
1846 static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
1847 {
1848 	return false;
1849 }
1850 #endif /* WLAN_FEATURE_FASTPATH */
1851 
1852 void ce_fini(struct CE_handle *copyeng)
1853 {
1854 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1855 	unsigned int CE_id = CE_state->id;
1856 	struct hif_softc *scn = CE_state->scn;
1857 	uint32_t desc_size;
1858 
1859 	bool inited = CE_state->timer_inited;
1860 	CE_state->state = CE_UNUSED;
1861 	scn->ce_id_to_state[CE_id] = NULL;
1862 	/* Set the flag to false first to stop processing in ce_poll_timeout */
1863 	ce_disable_polling(CE_state);
1864 
1865 	qdf_lro_deinit(CE_state->lro_data);
1866 
1867 	if (CE_state->src_ring) {
1868 		/* Cleanup the datapath Tx ring */
1869 		ce_h2t_tx_ce_cleanup(copyeng);
1870 
1871 		desc_size = ce_get_desc_size(scn, CE_RING_SRC);
1872 		if (CE_state->src_ring->shadow_base_unaligned)
1873 			qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
1874 		if (CE_state->src_ring->base_addr_owner_space_unaligned)
1875 			ce_free_desc_ring(scn, CE_state->id,
1876 					  CE_state->src_ring,
1877 					  desc_size);
1878 		qdf_mem_free(CE_state->src_ring);
1879 	}
1880 	if (CE_state->dest_ring) {
1881 		/* Cleanup the datapath Rx ring */
1882 		ce_t2h_msg_ce_cleanup(copyeng);
1883 
1884 		desc_size = ce_get_desc_size(scn, CE_RING_DEST);
1885 		if (CE_state->dest_ring->base_addr_owner_space_unaligned)
1886 			ce_free_desc_ring(scn, CE_state->id,
1887 					  CE_state->dest_ring,
1888 					  desc_size);
1889 		qdf_mem_free(CE_state->dest_ring);
1890 
1891 		/* epping */
1892 		if (inited) {
1893 			qdf_timer_free(&CE_state->poll_timer);
1894 		}
1895 	}
1896 	if ((ce_srng_based(CE_state->scn)) && (CE_state->status_ring)) {
1897 		/* Cleanup the datapath Tx ring */
1898 		ce_h2t_tx_ce_cleanup(copyeng);
1899 
1900 		if (CE_state->status_ring->shadow_base_unaligned)
1901 			qdf_mem_free(
1902 				CE_state->status_ring->shadow_base_unaligned);
1903 
1904 		desc_size = ce_get_desc_size(scn, CE_RING_STATUS);
1905 		if (CE_state->status_ring->base_addr_owner_space_unaligned)
1906 			ce_free_desc_ring(scn, CE_state->id,
1907 					  CE_state->status_ring,
1908 					  desc_size);
1909 		qdf_mem_free(CE_state->status_ring);
1910 	}
1911 
1912 	free_mem_ce_debug_history(scn, CE_id);
1913 	reset_ce_debug_history(scn);
1914 	ce_deinit_ce_desc_event_log(scn, CE_id);
1915 
1916 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
1917 	qdf_mem_free(CE_state);
1918 }
1919 
1920 void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
1921 {
1922 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1923 
1924 	qdf_mem_zero(&hif_state->msg_callbacks_pending,
1925 		  sizeof(hif_state->msg_callbacks_pending));
1926 	qdf_mem_zero(&hif_state->msg_callbacks_current,
1927 		  sizeof(hif_state->msg_callbacks_current));
1928 }
1929 
1930 /* Send the first nbytes bytes of the buffer */
1931 QDF_STATUS
1932 hif_send_head(struct hif_opaque_softc *hif_ctx,
1933 	      uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
1934 	      qdf_nbuf_t nbuf, unsigned int data_attr)
1935 {
1936 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1937 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1938 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
1939 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
1940 	int bytes = nbytes, nfrags = 0;
1941 	struct ce_sendlist sendlist;
1942 	int status, i = 0;
1943 	unsigned int mux_id = 0;
1944 
1945 	if (nbytes > qdf_nbuf_len(nbuf)) {
1946 		HIF_ERROR("%s: nbytes:%d nbuf_len:%d", __func__, nbytes,
1947 			  (uint32_t)qdf_nbuf_len(nbuf));
1948 		QDF_ASSERT(0);
1949 	}
1950 
1951 	transfer_id =
1952 		(mux_id & MUX_ID_MASK) |
1953 		(transfer_id & TRANSACTION_ID_MASK);
1954 	data_attr &= DESC_DATA_FLAG_MASK;
1955 	/*
1956 	 * The common case involves sending multiple fragments within a
1957 	 * single download (the tx descriptor and the tx frame header).
1958 	 * So, optimize for the case of multiple fragments by not even
1959 	 * checking whether it's necessary to use a sendlist.
1960 	 * The overhead of using a sendlist for a single buffer download
1961 	 * is not a big deal, since it happens rarely (for WMI messages).
1962 	 */
1963 	ce_sendlist_init(&sendlist);
1964 	do {
1965 		qdf_dma_addr_t frag_paddr;
1966 		int frag_bytes;
1967 
1968 		frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
1969 		frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
1970 		/*
1971 		 * Clear the packet offset for all but the first CE desc.
1972 		 */
1973 		if (i++ > 0)
1974 			data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
1975 
1976 		status = ce_sendlist_buf_add(&sendlist, frag_paddr,
1977 				    frag_bytes >
1978 				    bytes ? bytes : frag_bytes,
1979 				    qdf_nbuf_get_frag_is_wordstream
1980 				    (nbuf,
1981 				    nfrags) ? 0 :
1982 				    CE_SEND_FLAG_SWAP_DISABLE,
1983 				    data_attr);
1984 		if (status != QDF_STATUS_SUCCESS) {
1985 			HIF_ERROR("%s: error, frag_num %d larger than limit",
1986 				__func__, nfrags);
1987 			return status;
1988 		}
1989 		bytes -= frag_bytes;
1990 		nfrags++;
1991 	} while (bytes > 0);
1992 
1993 	/* Make sure we have resources to handle this request */
1994 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
1995 	if (pipe_info->num_sends_allowed < nfrags) {
1996 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
1997 		ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
1998 		return QDF_STATUS_E_RESOURCES;
1999 	}
2000 	pipe_info->num_sends_allowed -= nfrags;
2001 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2002 
2003 	if (qdf_unlikely(!ce_hdl)) {
2004 		HIF_ERROR("%s: error CE handle is null", __func__);
2005 		return A_ERROR;
2006 	}
2007 
2008 	QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
2009 	DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
2010 		QDF_TRACE_DEFAULT_PDEV_ID, qdf_nbuf_data_addr(nbuf),
2011 		sizeof(qdf_nbuf_data(nbuf)), QDF_TX));
2012 	status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
2013 	QDF_ASSERT(status == QDF_STATUS_SUCCESS);
2014 
2015 	return status;
2016 }
2017 
2018 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
2019 								int force)
2020 {
2021 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2022 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2023 
2024 	if (!force) {
2025 		int resources;
2026 		/*
2027 		 * Decide whether to actually poll for completions, or just
2028 		 * wait for a later chance. If there seem to be plenty of
2029 		 * resources left, then just wait, since checking involves
2030 		 * reading a CE register, which is a relatively expensive
2031 		 * operation.
2032 		 */
2033 		resources = hif_get_free_queue_number(hif_ctx, pipe);
2034 		/*
2035 		 * If at least 50% of the total resources are still available,
2036 		 * don't bother checking again yet.
2037 		 */
2038 		if (resources > (hif_state->host_ce_config[pipe].src_nentries >>
2039 									 1))
2040 			return;
2041 	}
2042 #if ATH_11AC_TXCOMPACT
2043 	ce_per_engine_servicereap(scn, pipe);
2044 #else
2045 	ce_per_engine_service(scn, pipe);
2046 #endif
2047 }
2048 
2049 uint16_t
2050 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
2051 {
2052 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2053 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
2054 	uint16_t rv;
2055 
2056 	qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2057 	rv = pipe_info->num_sends_allowed;
2058 	qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2059 	return rv;
2060 }
2061 
2062 /* Called by lower (CE) layer when a send to Target completes. */
2063 static void
2064 hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
2065 		     void *transfer_context, qdf_dma_addr_t CE_data,
2066 		     unsigned int nbytes, unsigned int transfer_id,
2067 		     unsigned int sw_index, unsigned int hw_index,
2068 		     unsigned int toeplitz_hash_result)
2069 {
2070 	struct HIF_CE_pipe_info *pipe_info =
2071 		(struct HIF_CE_pipe_info *)ce_context;
2072 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2073 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2074 	unsigned int sw_idx = sw_index, hw_idx = hw_index;
2075 	struct hif_msg_callbacks *msg_callbacks =
2076 		&pipe_info->pipe_callbacks;
2077 
2078 	do {
2079 		/*
2080 		 * The upper layer callback will be triggered
2081 		 * when last fragment is complteted.
2082 		 */
2083 		if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
2084 			if (scn->target_status == TARGET_STATUS_RESET) {
2085 
2086 				qdf_nbuf_unmap_single(scn->qdf_dev,
2087 						      transfer_context,
2088 						      QDF_DMA_TO_DEVICE);
2089 				qdf_nbuf_free(transfer_context);
2090 			} else
2091 				msg_callbacks->txCompletionHandler(
2092 					msg_callbacks->Context,
2093 					transfer_context, transfer_id,
2094 					toeplitz_hash_result);
2095 		}
2096 
2097 		qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
2098 		pipe_info->num_sends_allowed++;
2099 		qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
2100 	} while (ce_completed_send_next(copyeng,
2101 			&ce_context, &transfer_context,
2102 			&CE_data, &nbytes, &transfer_id,
2103 			&sw_idx, &hw_idx,
2104 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
2105 }
2106 
2107 /**
2108  * hif_ce_do_recv(): send message from copy engine to upper layers
2109  * @msg_callbacks: structure containing callback and callback context
2110  * @netbuff: skb containing message
2111  * @nbytes: number of bytes in the message
2112  * @pipe_info: used for the pipe_number info
2113  *
2114  * Checks the packet length, configures the length in the netbuff,
2115  * and calls the upper layer callback.
2116  *
2117  * return: None
2118  */
2119 static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
2120 		qdf_nbuf_t netbuf, int nbytes,
2121 		struct HIF_CE_pipe_info *pipe_info) {
2122 	if (nbytes <= pipe_info->buf_sz) {
2123 		qdf_nbuf_set_pktlen(netbuf, nbytes);
2124 		msg_callbacks->
2125 			rxCompletionHandler(msg_callbacks->Context,
2126 					netbuf, pipe_info->pipe_num);
2127 	} else {
2128 		HIF_ERROR("%s: Invalid Rx msg buf:%pK nbytes:%d",
2129 				__func__, netbuf, nbytes);
2130 
2131 		qdf_nbuf_free(netbuf);
2132 	}
2133 }
2134 
2135 /* Called by lower (CE) layer when data is received from the Target. */
2136 static void
2137 hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
2138 		     void *transfer_context, qdf_dma_addr_t CE_data,
2139 		     unsigned int nbytes, unsigned int transfer_id,
2140 		     unsigned int flags)
2141 {
2142 	struct HIF_CE_pipe_info *pipe_info =
2143 		(struct HIF_CE_pipe_info *)ce_context;
2144 	struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
2145 	struct CE_state *ce_state = (struct CE_state *) copyeng;
2146 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2147 #ifdef HIF_PCI
2148 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
2149 #endif
2150 	struct hif_msg_callbacks *msg_callbacks =
2151 		 &pipe_info->pipe_callbacks;
2152 
2153 	do {
2154 #ifdef HIF_PCI
2155 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2156 #endif
2157 		qdf_nbuf_unmap_single(scn->qdf_dev,
2158 				      (qdf_nbuf_t) transfer_context,
2159 				      QDF_DMA_FROM_DEVICE);
2160 
2161 		atomic_inc(&pipe_info->recv_bufs_needed);
2162 		hif_post_recv_buffers_for_pipe(pipe_info);
2163 		if (scn->target_status == TARGET_STATUS_RESET)
2164 			qdf_nbuf_free(transfer_context);
2165 		else
2166 			hif_ce_do_recv(msg_callbacks, transfer_context,
2167 				nbytes, pipe_info);
2168 
2169 		/* Set up force_break flag if num of receices reaches
2170 		 * MAX_NUM_OF_RECEIVES
2171 		 */
2172 		ce_state->receive_count++;
2173 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
2174 			ce_state->force_break = 1;
2175 			break;
2176 		}
2177 	} while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
2178 					&CE_data, &nbytes, &transfer_id,
2179 					&flags) == QDF_STATUS_SUCCESS);
2180 
2181 }
2182 
2183 /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
2184 
2185 void
2186 hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
2187 	      struct hif_msg_callbacks *callbacks)
2188 {
2189 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
2190 
2191 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2192 	spin_lock_init(&pcie_access_log_lock);
2193 #endif
2194 	/* Save callbacks for later installation */
2195 	qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
2196 		 sizeof(hif_state->msg_callbacks_pending));
2197 
2198 }
2199 
2200 static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
2201 {
2202 	struct CE_handle *ce_diag = hif_state->ce_diag;
2203 	int pipe_num;
2204 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2205 	struct hif_msg_callbacks *hif_msg_callbacks =
2206 		&hif_state->msg_callbacks_current;
2207 
2208 	/* daemonize("hif_compl_thread"); */
2209 
2210 	if (scn->ce_count == 0) {
2211 		HIF_ERROR("%s: Invalid ce_count", __func__);
2212 		return -EINVAL;
2213 	}
2214 
2215 	if (!hif_msg_callbacks ||
2216 			!hif_msg_callbacks->rxCompletionHandler ||
2217 			!hif_msg_callbacks->txCompletionHandler) {
2218 		HIF_ERROR("%s: no completion handler registered", __func__);
2219 		return -EFAULT;
2220 	}
2221 
2222 	A_TARGET_ACCESS_LIKELY(scn);
2223 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2224 		struct CE_attr attr;
2225 		struct HIF_CE_pipe_info *pipe_info;
2226 
2227 		pipe_info = &hif_state->pipe_info[pipe_num];
2228 		if (pipe_info->ce_hdl == ce_diag)
2229 			continue;       /* Handle Diagnostic CE specially */
2230 		attr = hif_state->host_ce_config[pipe_num];
2231 		if (attr.src_nentries) {
2232 			/* pipe used to send to target */
2233 			HIF_DBG("%s: pipe_num:%d pipe_info:0x%pK",
2234 					 __func__, pipe_num, pipe_info);
2235 			ce_send_cb_register(pipe_info->ce_hdl,
2236 					    hif_pci_ce_send_done, pipe_info,
2237 					    attr.flags & CE_ATTR_DISABLE_INTR);
2238 			pipe_info->num_sends_allowed = attr.src_nentries - 1;
2239 		}
2240 		if (attr.dest_nentries) {
2241 			/* pipe used to receive from target */
2242 			ce_recv_cb_register(pipe_info->ce_hdl,
2243 					    hif_pci_ce_recv_data, pipe_info,
2244 					    attr.flags & CE_ATTR_DISABLE_INTR);
2245 		}
2246 
2247 		if (attr.src_nentries)
2248 			qdf_spinlock_create(&pipe_info->completion_freeq_lock);
2249 
2250 		qdf_mem_copy(&pipe_info->pipe_callbacks, hif_msg_callbacks,
2251 					sizeof(pipe_info->pipe_callbacks));
2252 	}
2253 
2254 	A_TARGET_ACCESS_UNLIKELY(scn);
2255 	return 0;
2256 }
2257 
2258 /*
2259  * Install pending msg callbacks.
2260  *
2261  * TBDXXX: This hack is needed because upper layers install msg callbacks
2262  * for use with HTC before BMI is done; yet this HIF implementation
2263  * needs to continue to use BMI msg callbacks. Really, upper layers
2264  * should not register HTC callbacks until AFTER BMI phase.
2265  */
2266 static void hif_msg_callbacks_install(struct hif_softc *scn)
2267 {
2268 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2269 
2270 	qdf_mem_copy(&hif_state->msg_callbacks_current,
2271 		 &hif_state->msg_callbacks_pending,
2272 		 sizeof(hif_state->msg_callbacks_pending));
2273 }
2274 
2275 void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
2276 							uint8_t *DLPipe)
2277 {
2278 	int ul_is_polled, dl_is_polled;
2279 
2280 	(void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
2281 		ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
2282 }
2283 
2284 /**
2285  * hif_dump_pipe_debug_count() - Log error count
2286  * @scn: hif_softc pointer.
2287  *
2288  * Output the pipe error counts of each pipe to log file
2289  *
2290  * Return: N/A
2291  */
2292 void hif_dump_pipe_debug_count(struct hif_softc *scn)
2293 {
2294 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2295 	int pipe_num;
2296 
2297 	if (!hif_state) {
2298 		HIF_ERROR("%s hif_state is NULL", __func__);
2299 		return;
2300 	}
2301 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2302 		struct HIF_CE_pipe_info *pipe_info;
2303 
2304 	pipe_info = &hif_state->pipe_info[pipe_num];
2305 
2306 	if (pipe_info->nbuf_alloc_err_count > 0 ||
2307 			pipe_info->nbuf_dma_err_count > 0 ||
2308 			pipe_info->nbuf_ce_enqueue_err_count)
2309 		HIF_ERROR(
2310 			"%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
2311 			__func__, pipe_info->pipe_num,
2312 			atomic_read(&pipe_info->recv_bufs_needed),
2313 			pipe_info->nbuf_alloc_err_count,
2314 			pipe_info->nbuf_dma_err_count,
2315 			pipe_info->nbuf_ce_enqueue_err_count);
2316 	}
2317 }
2318 
2319 static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info,
2320 					  void *nbuf, uint32_t *error_cnt,
2321 					  enum hif_ce_event_type failure_type,
2322 					  const char *failure_type_string)
2323 {
2324 	int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed);
2325 	struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl;
2326 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2327 	int ce_id = CE_state->id;
2328 	uint32_t error_cnt_tmp;
2329 
2330 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2331 	error_cnt_tmp = ++(*error_cnt);
2332 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2333 	HIF_DBG("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s",
2334 		  __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp,
2335 		  failure_type_string);
2336 	hif_record_ce_desc_event(scn, ce_id, failure_type,
2337 				 NULL, nbuf, bufs_needed_tmp, 0);
2338 	/* if we fail to allocate the last buffer for an rx pipe,
2339 	 *	there is no trigger to refill the ce and we will
2340 	 *	eventually crash
2341 	 */
2342 	if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1)
2343 		qdf_sched_work(scn->qdf_dev, &CE_state->oom_allocation_work);
2344 
2345 }
2346 
2347 
2348 
2349 
2350 QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
2351 {
2352 	struct CE_handle *ce_hdl;
2353 	qdf_size_t buf_sz;
2354 	struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
2355 	QDF_STATUS status;
2356 	uint32_t bufs_posted = 0;
2357 
2358 	buf_sz = pipe_info->buf_sz;
2359 	if (buf_sz == 0) {
2360 		/* Unused Copy Engine */
2361 		return QDF_STATUS_SUCCESS;
2362 	}
2363 
2364 	ce_hdl = pipe_info->ce_hdl;
2365 
2366 	qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2367 	while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
2368 		qdf_dma_addr_t CE_data;      /* CE space buffer address */
2369 		qdf_nbuf_t nbuf;
2370 
2371 		atomic_dec(&pipe_info->recv_bufs_needed);
2372 		qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2373 
2374 		nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
2375 		if (!nbuf) {
2376 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2377 					&pipe_info->nbuf_alloc_err_count,
2378 					 HIF_RX_NBUF_ALLOC_FAILURE,
2379 					"HIF_RX_NBUF_ALLOC_FAILURE");
2380 			return QDF_STATUS_E_NOMEM;
2381 		}
2382 
2383 		/*
2384 		 * qdf_nbuf_peek_header(nbuf, &data, &unused);
2385 		 * CE_data = dma_map_single(dev, data, buf_sz, );
2386 		 * DMA_FROM_DEVICE);
2387 		 */
2388 		status = qdf_nbuf_map_single(scn->qdf_dev, nbuf,
2389 					    QDF_DMA_FROM_DEVICE);
2390 
2391 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2392 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2393 					&pipe_info->nbuf_dma_err_count,
2394 					 HIF_RX_NBUF_MAP_FAILURE,
2395 					"HIF_RX_NBUF_MAP_FAILURE");
2396 			qdf_nbuf_free(nbuf);
2397 			return status;
2398 		}
2399 
2400 		CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
2401 
2402 		qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
2403 					       buf_sz, DMA_FROM_DEVICE);
2404 		status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
2405 		if (qdf_unlikely(status != QDF_STATUS_SUCCESS)) {
2406 			hif_post_recv_buffers_failure(pipe_info, nbuf,
2407 					&pipe_info->nbuf_ce_enqueue_err_count,
2408 					 HIF_RX_NBUF_ENQUEUE_FAILURE,
2409 					"HIF_RX_NBUF_ENQUEUE_FAILURE");
2410 
2411 			qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
2412 						QDF_DMA_FROM_DEVICE);
2413 			qdf_nbuf_free(nbuf);
2414 			return status;
2415 		}
2416 
2417 		qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
2418 		bufs_posted++;
2419 	}
2420 	pipe_info->nbuf_alloc_err_count =
2421 		(pipe_info->nbuf_alloc_err_count > bufs_posted) ?
2422 		pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
2423 	pipe_info->nbuf_dma_err_count =
2424 		(pipe_info->nbuf_dma_err_count > bufs_posted) ?
2425 		pipe_info->nbuf_dma_err_count - bufs_posted : 0;
2426 	pipe_info->nbuf_ce_enqueue_err_count =
2427 		(pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
2428 	pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
2429 
2430 	qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
2431 
2432 	return QDF_STATUS_SUCCESS;
2433 }
2434 
2435 /*
2436  * Try to post all desired receive buffers for all pipes.
2437  * Returns 0 for non fastpath rx copy engine as
2438  * oom_allocation_work will be scheduled to recover any
2439  * failures, non-zero if unable to completely replenish
2440  * receive buffers for fastpath rx Copy engine.
2441  */
2442 QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
2443 {
2444 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2445 	int pipe_num;
2446 	struct CE_state *ce_state = NULL;
2447 	QDF_STATUS qdf_status;
2448 
2449 	A_TARGET_ACCESS_LIKELY(scn);
2450 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2451 		struct HIF_CE_pipe_info *pipe_info;
2452 
2453 		ce_state = scn->ce_id_to_state[pipe_num];
2454 		pipe_info = &hif_state->pipe_info[pipe_num];
2455 
2456 		if (hif_is_nss_wifi_enabled(scn) &&
2457 		    ce_state && (ce_state->htt_rx_data))
2458 			continue;
2459 
2460 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
2461 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
2462 			ce_state->htt_rx_data &&
2463 			scn->fastpath_mode_on) {
2464 			A_TARGET_ACCESS_UNLIKELY(scn);
2465 			return qdf_status;
2466 		}
2467 	}
2468 
2469 	A_TARGET_ACCESS_UNLIKELY(scn);
2470 
2471 	return QDF_STATUS_SUCCESS;
2472 }
2473 
2474 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
2475 {
2476 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2477 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2478 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
2479 
2480 	hif_update_fastpath_recv_bufs_cnt(scn);
2481 
2482 	hif_msg_callbacks_install(scn);
2483 
2484 	if (hif_completion_thread_startup(hif_state))
2485 		return QDF_STATUS_E_FAILURE;
2486 
2487 	/* enable buffer cleanup */
2488 	hif_state->started = true;
2489 
2490 	/* Post buffers once to start things off. */
2491 	qdf_status = hif_post_recv_buffers(scn);
2492 	if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
2493 		/* cleanup is done in hif_ce_disable */
2494 		HIF_ERROR("%s:failed to post buffers", __func__);
2495 		return qdf_status;
2496 	}
2497 
2498 	return qdf_status;
2499 }
2500 
2501 static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2502 {
2503 	struct hif_softc *scn;
2504 	struct CE_handle *ce_hdl;
2505 	uint32_t buf_sz;
2506 	struct HIF_CE_state *hif_state;
2507 	qdf_nbuf_t netbuf;
2508 	qdf_dma_addr_t CE_data;
2509 	void *per_CE_context;
2510 
2511 	buf_sz = pipe_info->buf_sz;
2512 	/* Unused Copy Engine */
2513 	if (buf_sz == 0)
2514 		return;
2515 
2516 
2517 	hif_state = pipe_info->HIF_CE_state;
2518 	if (!hif_state->started)
2519 		return;
2520 
2521 	scn = HIF_GET_SOFTC(hif_state);
2522 	ce_hdl = pipe_info->ce_hdl;
2523 
2524 	if (!scn->qdf_dev)
2525 		return;
2526 	while (ce_revoke_recv_next
2527 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
2528 			&CE_data) == QDF_STATUS_SUCCESS) {
2529 		if (netbuf) {
2530 			qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
2531 					      QDF_DMA_FROM_DEVICE);
2532 			qdf_nbuf_free(netbuf);
2533 		}
2534 	}
2535 }
2536 
2537 static void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
2538 {
2539 	struct CE_handle *ce_hdl;
2540 	struct HIF_CE_state *hif_state;
2541 	struct hif_softc *scn;
2542 	qdf_nbuf_t netbuf;
2543 	void *per_CE_context;
2544 	qdf_dma_addr_t CE_data;
2545 	unsigned int nbytes;
2546 	unsigned int id;
2547 	uint32_t buf_sz;
2548 	uint32_t toeplitz_hash_result;
2549 
2550 	buf_sz = pipe_info->buf_sz;
2551 	if (buf_sz == 0) {
2552 		/* Unused Copy Engine */
2553 		return;
2554 	}
2555 
2556 	hif_state = pipe_info->HIF_CE_state;
2557 	if (!hif_state->started) {
2558 		return;
2559 	}
2560 
2561 	scn = HIF_GET_SOFTC(hif_state);
2562 
2563 	ce_hdl = pipe_info->ce_hdl;
2564 
2565 	while (ce_cancel_send_next
2566 		       (ce_hdl, &per_CE_context,
2567 		       (void **)&netbuf, &CE_data, &nbytes,
2568 		       &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
2569 		if (netbuf != CE_SENDLIST_ITEM_CTXT) {
2570 			/*
2571 			 * Packets enqueued by htt_h2t_ver_req_msg() and
2572 			 * htt_h2t_rx_ring_cfg_msg_ll() have already been
2573 			 * freed in htt_htc_misc_pkt_pool_free() in
2574 			 * wlantl_close(), so do not free them here again
2575 			 * by checking whether it's the endpoint
2576 			 * which they are queued in.
2577 			 */
2578 			if (id == scn->htc_htt_tx_endpoint)
2579 				return;
2580 			/* Indicate the completion to higher
2581 			 * layer to free the buffer
2582 			 */
2583 			if (pipe_info->pipe_callbacks.txCompletionHandler)
2584 				pipe_info->pipe_callbacks.
2585 				    txCompletionHandler(pipe_info->
2586 					    pipe_callbacks.Context,
2587 					    netbuf, id, toeplitz_hash_result);
2588 		}
2589 	}
2590 }
2591 
2592 /*
2593  * Cleanup residual buffers for device shutdown:
2594  *    buffers that were enqueued for receive
2595  *    buffers that were to be sent
2596  * Note: Buffers that had completed but which were
2597  * not yet processed are on a completion queue. They
2598  * are handled when the completion thread shuts down.
2599  */
2600 static void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
2601 {
2602 	int pipe_num;
2603 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
2604 	struct CE_state *ce_state;
2605 
2606 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2607 		struct HIF_CE_pipe_info *pipe_info;
2608 
2609 		ce_state = scn->ce_id_to_state[pipe_num];
2610 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
2611 				((ce_state->htt_tx_data) ||
2612 				 (ce_state->htt_rx_data))) {
2613 			continue;
2614 		}
2615 
2616 		pipe_info = &hif_state->pipe_info[pipe_num];
2617 		hif_recv_buffer_cleanup_on_pipe(pipe_info);
2618 		hif_send_buffer_cleanup_on_pipe(pipe_info);
2619 	}
2620 }
2621 
2622 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
2623 {
2624 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2625 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2626 
2627 	hif_buffer_cleanup(hif_state);
2628 }
2629 
2630 static void hif_destroy_oom_work(struct hif_softc *scn)
2631 {
2632 	struct CE_state *ce_state;
2633 	int ce_id;
2634 
2635 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2636 		ce_state = scn->ce_id_to_state[ce_id];
2637 		if (ce_state)
2638 			qdf_destroy_work(scn->qdf_dev,
2639 					 &ce_state->oom_allocation_work);
2640 	}
2641 }
2642 
2643 void hif_ce_stop(struct hif_softc *scn)
2644 {
2645 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2646 	int pipe_num;
2647 
2648 	/*
2649 	 * before cleaning up any memory, ensure irq &
2650 	 * bottom half contexts will not be re-entered
2651 	 */
2652 	hif_disable_isr(&scn->osc);
2653 	hif_destroy_oom_work(scn);
2654 	scn->hif_init_done = false;
2655 
2656 	/*
2657 	 * At this point, asynchronous threads are stopped,
2658 	 * The Target should not DMA nor interrupt, Host code may
2659 	 * not initiate anything more.  So we just need to clean
2660 	 * up Host-side state.
2661 	 */
2662 
2663 	if (scn->athdiag_procfs_inited) {
2664 		athdiag_procfs_remove();
2665 		scn->athdiag_procfs_inited = false;
2666 	}
2667 
2668 	hif_buffer_cleanup(hif_state);
2669 
2670 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
2671 		struct HIF_CE_pipe_info *pipe_info;
2672 		struct CE_attr attr;
2673 		struct CE_handle *ce_diag = hif_state->ce_diag;
2674 
2675 		pipe_info = &hif_state->pipe_info[pipe_num];
2676 		if (pipe_info->ce_hdl) {
2677 			if (pipe_info->ce_hdl != ce_diag) {
2678 				attr = hif_state->host_ce_config[pipe_num];
2679 				if (attr.src_nentries)
2680 					qdf_spinlock_destroy(&pipe_info->
2681 							completion_freeq_lock);
2682 			}
2683 			ce_fini(pipe_info->ce_hdl);
2684 			pipe_info->ce_hdl = NULL;
2685 			pipe_info->buf_sz = 0;
2686 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
2687 		}
2688 	}
2689 
2690 	if (hif_state->sleep_timer_init) {
2691 		qdf_timer_stop(&hif_state->sleep_timer);
2692 		qdf_timer_free(&hif_state->sleep_timer);
2693 		hif_state->sleep_timer_init = false;
2694 	}
2695 
2696 	hif_state->started = false;
2697 }
2698 
2699 static void hif_get_shadow_reg_cfg(struct hif_softc *scn,
2700 				   struct shadow_reg_cfg
2701 				   **target_shadow_reg_cfg_ret,
2702 				   uint32_t *shadow_cfg_sz_ret)
2703 {
2704 	if (target_shadow_reg_cfg_ret)
2705 		*target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
2706 	if (shadow_cfg_sz_ret)
2707 		*shadow_cfg_sz_ret = shadow_cfg_sz;
2708 }
2709 
2710 /**
2711  * hif_get_target_ce_config() - get copy engine configuration
2712  * @target_ce_config_ret: basic copy engine configuration
2713  * @target_ce_config_sz_ret: size of the basic configuration in bytes
2714  * @target_service_to_ce_map_ret: service mapping for the copy engines
2715  * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
2716  * @target_shadow_reg_cfg_ret: shadow register configuration
2717  * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
2718  *
2719  * providing accessor to these values outside of this file.
2720  * currently these are stored in static pointers to const sections.
2721  * there are multiple configurations that are selected from at compile time.
2722  * Runtime selection would need to consider mode, target type and bus type.
2723  *
2724  * Return: return by parameter.
2725  */
2726 void hif_get_target_ce_config(struct hif_softc *scn,
2727 		struct CE_pipe_config **target_ce_config_ret,
2728 		uint32_t *target_ce_config_sz_ret,
2729 		struct service_to_pipe **target_service_to_ce_map_ret,
2730 		uint32_t *target_service_to_ce_map_sz_ret,
2731 		struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
2732 		uint32_t *shadow_cfg_sz_ret)
2733 {
2734 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2735 
2736 	*target_ce_config_ret = hif_state->target_ce_config;
2737 	*target_ce_config_sz_ret = hif_state->target_ce_config_sz;
2738 
2739 	hif_select_service_to_pipe_map(scn, target_service_to_ce_map_ret,
2740 				       target_service_to_ce_map_sz_ret);
2741 	hif_get_shadow_reg_cfg(scn, target_shadow_reg_cfg_ret,
2742 			       shadow_cfg_sz_ret);
2743 }
2744 
2745 #ifdef CONFIG_SHADOW_V2
2746 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2747 {
2748 	int i;
2749 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2750 		  "%s: num_config %d", __func__, cfg->num_shadow_reg_v2_cfg);
2751 
2752 	for (i = 0; i < cfg->num_shadow_reg_v2_cfg; i++) {
2753 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
2754 		     "%s: i %d, val %x", __func__, i,
2755 		     cfg->shadow_reg_v2_cfg[i].addr);
2756 	}
2757 }
2758 
2759 #else
2760 static void hif_print_hal_shadow_register_cfg(struct pld_wlan_enable_cfg *cfg)
2761 {
2762 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2763 		  "%s: CONFIG_SHADOW_V2 not defined", __func__);
2764 }
2765 #endif
2766 
2767 #ifdef ADRASTEA_RRI_ON_DDR
2768 /**
2769  * hif_get_src_ring_read_index(): Called to get the SRRI
2770  *
2771  * @scn: hif_softc pointer
2772  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2773  *
2774  * This function returns the SRRI to the caller. For CEs that
2775  * dont have interrupts enabled, we look at the DDR based SRRI
2776  *
2777  * Return: SRRI
2778  */
2779 inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
2780 		uint32_t CE_ctrl_addr)
2781 {
2782 	struct CE_attr attr;
2783 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2784 
2785 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2786 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2787 		return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2788 	} else {
2789 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2790 			return A_TARGET_READ(scn,
2791 					(CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
2792 		else
2793 			return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
2794 					CE_ctrl_addr);
2795 	}
2796 }
2797 
2798 /**
2799  * hif_get_dst_ring_read_index(): Called to get the DRRI
2800  *
2801  * @scn: hif_softc pointer
2802  * @CE_ctrl_addr: base address of the CE whose RRI is to be read
2803  *
2804  * This function returns the DRRI to the caller. For CEs that
2805  * dont have interrupts enabled, we look at the DDR based DRRI
2806  *
2807  * Return: DRRI
2808  */
2809 inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
2810 		uint32_t CE_ctrl_addr)
2811 {
2812 	struct CE_attr attr;
2813 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2814 
2815 	attr = hif_state->host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
2816 
2817 	if (attr.flags & CE_ATTR_DISABLE_INTR) {
2818 		return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
2819 	} else {
2820 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn))
2821 			return A_TARGET_READ(scn,
2822 					(CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
2823 		else
2824 			return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
2825 					CE_ctrl_addr);
2826 	}
2827 }
2828 
2829 /**
2830  * hif_alloc_rri_on_ddr() - Allocate memory for rri on ddr
2831  * @scn: hif_softc pointer
2832  *
2833  * Return: qdf status
2834  */
2835 static inline QDF_STATUS hif_alloc_rri_on_ddr(struct hif_softc *scn)
2836 {
2837 	qdf_dma_addr_t paddr_rri_on_ddr = 0;
2838 
2839 	scn->vaddr_rri_on_ddr =
2840 		(uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
2841 		scn->qdf_dev->dev, (CE_COUNT * sizeof(uint32_t)),
2842 		&paddr_rri_on_ddr);
2843 
2844 	if (!scn->vaddr_rri_on_ddr) {
2845 		hif_err("dmaable page alloc fail");
2846 		return QDF_STATUS_E_NOMEM;
2847 	}
2848 
2849 	scn->paddr_rri_on_ddr = paddr_rri_on_ddr;
2850 
2851 	qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT * sizeof(uint32_t));
2852 
2853 	return QDF_STATUS_SUCCESS;
2854 }
2855 #endif
2856 
2857 #if (!defined(QCN7605_SUPPORT)) && defined(ADRASTEA_RRI_ON_DDR)
2858 /**
2859  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2860  *
2861  * @scn: hif_softc pointer
2862  *
2863  * This function allocates non cached memory on ddr and sends
2864  * the physical address of this memory to the CE hardware. The
2865  * hardware updates the RRI on this particular location.
2866  *
2867  * Return: None
2868  */
2869 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2870 {
2871 	unsigned int i;
2872 	uint32_t high_paddr, low_paddr;
2873 
2874 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2875 		return;
2876 
2877 	low_paddr  = BITS0_TO_31(scn->paddr_rri_on_ddr);
2878 	high_paddr = BITS32_TO_35(scn->paddr_rri_on_ddr);
2879 
2880 	HIF_DBG("%s using srri and drri from DDR", __func__);
2881 
2882 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
2883 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
2884 
2885 	for (i = 0; i < CE_COUNT; i++)
2886 		CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
2887 }
2888 #else
2889 /**
2890  * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
2891  *
2892  * @scn: hif_softc pointer
2893  *
2894  * This is a dummy implementation for platforms that don't
2895  * support this functionality.
2896  *
2897  * Return: None
2898  */
2899 static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
2900 {
2901 }
2902 #endif
2903 
2904 /**
2905  * hif_update_rri_over_ddr_config() - update rri_over_ddr config for
2906  *                                    QMI command
2907  * @scn: hif context
2908  * @cfg: wlan enable config
2909  *
2910  * In case of Genoa, rri_over_ddr memory configuration is passed
2911  * to firmware through QMI configure command.
2912  */
2913 #if defined(QCN7605_SUPPORT) && defined(ADRASTEA_RRI_ON_DDR)
2914 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2915 					   struct pld_wlan_enable_cfg *cfg)
2916 {
2917 	if (hif_alloc_rri_on_ddr(scn) != QDF_STATUS_SUCCESS)
2918 		return;
2919 
2920 	cfg->rri_over_ddr_cfg_valid = true;
2921 	cfg->rri_over_ddr_cfg.base_addr_low =
2922 		 BITS0_TO_31(scn->paddr_rri_on_ddr);
2923 	cfg->rri_over_ddr_cfg.base_addr_high =
2924 		 BITS32_TO_35(scn->paddr_rri_on_ddr);
2925 }
2926 #else
2927 static void hif_update_rri_over_ddr_config(struct hif_softc *scn,
2928 					   struct pld_wlan_enable_cfg *cfg)
2929 {
2930 }
2931 #endif
2932 
2933 /**
2934  * hif_wlan_enable(): call the platform driver to enable wlan
2935  * @scn: HIF Context
2936  *
2937  * This function passes the con_mode and CE configuration to
2938  * platform driver to enable wlan.
2939  *
2940  * Return: linux error code
2941  */
2942 int hif_wlan_enable(struct hif_softc *scn)
2943 {
2944 	struct pld_wlan_enable_cfg cfg;
2945 	enum pld_driver_mode mode;
2946 	uint32_t con_mode = hif_get_conparam(scn);
2947 
2948 	hif_get_target_ce_config(scn,
2949 			(struct CE_pipe_config **)&cfg.ce_tgt_cfg,
2950 			&cfg.num_ce_tgt_cfg,
2951 			(struct service_to_pipe **)&cfg.ce_svc_cfg,
2952 			&cfg.num_ce_svc_pipe_cfg,
2953 			(struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
2954 			&cfg.num_shadow_reg_cfg);
2955 
2956 	/* translate from structure size to array size */
2957 	cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
2958 	cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
2959 	cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
2960 
2961 	hif_prepare_hal_shadow_register_cfg(scn, &cfg.shadow_reg_v2_cfg,
2962 			      &cfg.num_shadow_reg_v2_cfg);
2963 
2964 	hif_print_hal_shadow_register_cfg(&cfg);
2965 
2966 	hif_update_rri_over_ddr_config(scn, &cfg);
2967 
2968 	if (QDF_GLOBAL_FTM_MODE == con_mode)
2969 		mode = PLD_FTM;
2970 	else if (QDF_GLOBAL_COLDBOOT_CALIB_MODE == con_mode)
2971 		mode = PLD_COLDBOOT_CALIBRATION;
2972 	else if (QDF_IS_EPPING_ENABLED(con_mode))
2973 		mode = PLD_EPPING;
2974 	else
2975 		mode = PLD_MISSION;
2976 
2977 	if (BYPASS_QMI)
2978 		return 0;
2979 	else
2980 		return pld_wlan_enable(scn->qdf_dev->dev, &cfg,
2981 				       mode, QWLAN_VERSIONSTR);
2982 }
2983 
2984 #ifdef WLAN_FEATURE_EPPING
2985 
2986 #define CE_EPPING_USES_IRQ true
2987 
2988 void hif_ce_prepare_epping_config(struct HIF_CE_state *hif_state)
2989 {
2990 	if (CE_EPPING_USES_IRQ)
2991 		hif_state->host_ce_config = host_ce_config_wlan_epping_irq;
2992 	else
2993 		hif_state->host_ce_config = host_ce_config_wlan_epping_poll;
2994 	hif_state->target_ce_config = target_ce_config_wlan_epping;
2995 	hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
2996 	target_shadow_reg_cfg = target_shadow_reg_cfg_epping;
2997 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_epping);
2998 }
2999 #endif
3000 
3001 #ifdef QCN7605_SUPPORT
3002 static inline
3003 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3004 			       struct HIF_CE_state *hif_state)
3005 {
3006 	hif_state->host_ce_config = host_ce_config_wlan_qcn7605;
3007 	hif_state->target_ce_config = target_ce_config_wlan_qcn7605;
3008 	hif_state->target_ce_config_sz =
3009 				 sizeof(target_ce_config_wlan_qcn7605);
3010 	target_shadow_reg_cfg = target_shadow_reg_cfg_map_qcn7605;
3011 	shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map_qcn7605);
3012 	scn->ce_count = QCN7605_CE_COUNT;
3013 }
3014 #else
3015 static inline
3016 void hif_set_ce_config_qcn7605(struct hif_softc *scn,
3017 			       struct HIF_CE_state *hif_state)
3018 {
3019 	HIF_ERROR("QCN7605 not supported");
3020 }
3021 #endif
3022 
3023 #ifdef CE_SVC_CMN_INIT
3024 #ifdef QCA_WIFI_SUPPORT_SRNG
3025 static inline void hif_ce_service_init(void)
3026 {
3027 	ce_service_srng_init();
3028 }
3029 #else
3030 static inline void hif_ce_service_init(void)
3031 {
3032 	ce_service_legacy_init();
3033 }
3034 #endif
3035 #else
3036 static inline void hif_ce_service_init(void)
3037 {
3038 }
3039 #endif
3040 
3041 
3042 /**
3043  * hif_ce_prepare_config() - load the correct static tables.
3044  * @scn: hif context
3045  *
3046  * Epping uses different static attribute tables than mission mode.
3047  */
3048 void hif_ce_prepare_config(struct hif_softc *scn)
3049 {
3050 	uint32_t mode = hif_get_conparam(scn);
3051 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3052 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
3053 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3054 
3055 	hif_ce_service_init();
3056 	hif_state->ce_services = ce_services_attach(scn);
3057 
3058 	scn->ce_count = HOST_CE_COUNT;
3059 	/* if epping is enabled we need to use the epping configuration. */
3060 	if (QDF_IS_EPPING_ENABLED(mode)) {
3061 		hif_ce_prepare_epping_config(hif_state);
3062 		return;
3063 	}
3064 
3065 	switch (tgt_info->target_type) {
3066 	default:
3067 		hif_state->host_ce_config = host_ce_config_wlan;
3068 		hif_state->target_ce_config = target_ce_config_wlan;
3069 		hif_state->target_ce_config_sz = sizeof(target_ce_config_wlan);
3070 		break;
3071 	case TARGET_TYPE_QCN7605:
3072 		hif_set_ce_config_qcn7605(scn, hif_state);
3073 		break;
3074 	case TARGET_TYPE_AR900B:
3075 	case TARGET_TYPE_QCA9984:
3076 	case TARGET_TYPE_IPQ4019:
3077 	case TARGET_TYPE_QCA9888:
3078 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG)) {
3079 			hif_state->host_ce_config =
3080 				host_lowdesc_ce_cfg_wlan_ar900b_nopktlog;
3081 		} else if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3082 			hif_state->host_ce_config =
3083 				host_lowdesc_ce_cfg_wlan_ar900b;
3084 		} else {
3085 			hif_state->host_ce_config = host_ce_config_wlan_ar900b;
3086 		}
3087 
3088 		hif_state->target_ce_config = target_ce_config_wlan_ar900b;
3089 		hif_state->target_ce_config_sz =
3090 				sizeof(target_ce_config_wlan_ar900b);
3091 
3092 		break;
3093 
3094 	case TARGET_TYPE_AR9888:
3095 	case TARGET_TYPE_AR9888V2:
3096 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_CFG)) {
3097 			hif_state->host_ce_config = host_lowdesc_ce_cfg_wlan_ar9888;
3098 		} else {
3099 			hif_state->host_ce_config = host_ce_config_wlan_ar9888;
3100 		}
3101 
3102 		hif_state->target_ce_config = target_ce_config_wlan_ar9888;
3103 		hif_state->target_ce_config_sz =
3104 					sizeof(target_ce_config_wlan_ar9888);
3105 
3106 		break;
3107 
3108 	case TARGET_TYPE_QCA8074:
3109 	case TARGET_TYPE_QCA8074V2:
3110 	case TARGET_TYPE_QCA6018:
3111 		if (scn->bus_type == QDF_BUS_TYPE_PCI) {
3112 			hif_state->host_ce_config =
3113 					host_ce_config_wlan_qca8074_pci;
3114 			hif_state->target_ce_config =
3115 				target_ce_config_wlan_qca8074_pci;
3116 			hif_state->target_ce_config_sz =
3117 				sizeof(target_ce_config_wlan_qca8074_pci);
3118 		} else {
3119 			hif_state->host_ce_config = host_ce_config_wlan_qca8074;
3120 			hif_state->target_ce_config =
3121 					target_ce_config_wlan_qca8074;
3122 			hif_state->target_ce_config_sz =
3123 				sizeof(target_ce_config_wlan_qca8074);
3124 		}
3125 		break;
3126 	case TARGET_TYPE_QCA6290:
3127 		hif_state->host_ce_config = host_ce_config_wlan_qca6290;
3128 		hif_state->target_ce_config = target_ce_config_wlan_qca6290;
3129 		hif_state->target_ce_config_sz =
3130 					sizeof(target_ce_config_wlan_qca6290);
3131 
3132 		scn->ce_count = QCA_6290_CE_COUNT;
3133 		break;
3134 	case TARGET_TYPE_QCA6390:
3135 		hif_state->host_ce_config = host_ce_config_wlan_qca6390;
3136 		hif_state->target_ce_config = target_ce_config_wlan_qca6390;
3137 		hif_state->target_ce_config_sz =
3138 					sizeof(target_ce_config_wlan_qca6390);
3139 
3140 		scn->ce_count = QCA_6390_CE_COUNT;
3141 		break;
3142 	case TARGET_TYPE_ADRASTEA:
3143 		if (hif_is_attribute_set(scn, HIF_LOWDESC_CE_NO_PKTLOG_CFG))
3144 			hif_state->host_ce_config =
3145 				host_lowdesc_ce_config_wlan_adrastea_nopktlog;
3146 		else
3147 			hif_state->host_ce_config =
3148 				host_ce_config_wlan_adrastea;
3149 
3150 		hif_state->target_ce_config = target_ce_config_wlan_adrastea;
3151 		hif_state->target_ce_config_sz =
3152 					sizeof(target_ce_config_wlan_adrastea);
3153 		break;
3154 
3155 	}
3156 	QDF_BUG(scn->ce_count <= CE_COUNT_MAX);
3157 }
3158 
3159 /**
3160  * hif_ce_open() - do ce specific allocations
3161  * @hif_sc: pointer to hif context
3162  *
3163  * return: 0 for success or QDF_STATUS_E_NOMEM
3164  */
3165 QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
3166 {
3167 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3168 
3169 	qdf_spinlock_create(&hif_state->irq_reg_lock);
3170 	qdf_spinlock_create(&hif_state->keep_awake_lock);
3171 	return QDF_STATUS_SUCCESS;
3172 }
3173 
3174 /**
3175  * hif_ce_close() - do ce specific free
3176  * @hif_sc: pointer to hif context
3177  */
3178 void hif_ce_close(struct hif_softc *hif_sc)
3179 {
3180 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3181 
3182 	qdf_spinlock_destroy(&hif_state->irq_reg_lock);
3183 	qdf_spinlock_destroy(&hif_state->keep_awake_lock);
3184 }
3185 
3186 /**
3187  * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
3188  * @hif_sc: hif context
3189  *
3190  * uses state variables to support cleaning up when hif_config_ce fails.
3191  */
3192 void hif_unconfig_ce(struct hif_softc *hif_sc)
3193 {
3194 	int pipe_num;
3195 	struct HIF_CE_pipe_info *pipe_info;
3196 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
3197 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(hif_sc);
3198 
3199 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3200 		pipe_info = &hif_state->pipe_info[pipe_num];
3201 		if (pipe_info->ce_hdl) {
3202 			ce_unregister_irq(hif_state, (1 << pipe_num));
3203 		}
3204 	}
3205 	deinit_tasklet_workers(hif_hdl);
3206 	for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
3207 		pipe_info = &hif_state->pipe_info[pipe_num];
3208 		if (pipe_info->ce_hdl) {
3209 			ce_fini(pipe_info->ce_hdl);
3210 			pipe_info->ce_hdl = NULL;
3211 			pipe_info->buf_sz = 0;
3212 			qdf_spinlock_destroy(&pipe_info->recv_bufs_needed_lock);
3213 		}
3214 	}
3215 	if (hif_sc->athdiag_procfs_inited) {
3216 		athdiag_procfs_remove();
3217 		hif_sc->athdiag_procfs_inited = false;
3218 	}
3219 }
3220 
3221 #ifdef CONFIG_BYPASS_QMI
3222 #ifdef QCN7605_SUPPORT
3223 /**
3224  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3225  * @scn: pointer to HIF structure
3226  *
3227  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3228  *
3229  * Return: void
3230  */
3231 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3232 {
3233 	void *target_va;
3234 	phys_addr_t target_pa;
3235 	struct ce_info *ce_info_ptr;
3236 	uint32_t msi_data_start;
3237 	uint32_t msi_data_count;
3238 	uint32_t msi_irq_start;
3239 	uint32_t i = 0;
3240 	int ret;
3241 
3242 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev,
3243 					     scn->qdf_dev->dev,
3244 					     FW_SHARED_MEM +
3245 					     sizeof(struct ce_info),
3246 					     &target_pa);
3247 	if (!target_va)
3248 		return;
3249 
3250 	ce_info_ptr = (struct ce_info *)target_va;
3251 
3252 	if (scn->vaddr_rri_on_ddr) {
3253 		ce_info_ptr->rri_over_ddr_low_paddr  =
3254 			 BITS0_TO_31(scn->paddr_rri_on_ddr);
3255 		ce_info_ptr->rri_over_ddr_high_paddr =
3256 			 BITS32_TO_35(scn->paddr_rri_on_ddr);
3257 	}
3258 
3259 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3260 					  &msi_data_count, &msi_data_start,
3261 					  &msi_irq_start);
3262 	if (ret) {
3263 		hif_err("Failed to get CE msi config");
3264 		return;
3265 	}
3266 
3267 	for (i = 0; i < CE_COUNT_MAX; i++) {
3268 		ce_info_ptr->cfg[i].ce_id = i;
3269 		ce_info_ptr->cfg[i].msi_vector =
3270 			 (i % msi_data_count) + msi_irq_start;
3271 	}
3272 
3273 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3274 	hif_info("target va %pK target pa %pa", target_va, &target_pa);
3275 }
3276 #else
3277 /**
3278  * hif_post_static_buf_to_target() - post static buffer to WLAN FW
3279  * @scn: pointer to HIF structure
3280  *
3281  * WLAN FW needs 2MB memory from DDR when QMI is disabled.
3282  *
3283  * Return: void
3284  */
3285 static void hif_post_static_buf_to_target(struct hif_softc *scn)
3286 {
3287 	void *target_va;
3288 	phys_addr_t target_pa;
3289 
3290 	target_va = qdf_mem_alloc_consistent(scn->qdf_dev, scn->qdf_dev->dev,
3291 				FW_SHARED_MEM, &target_pa);
3292 	if (!target_va) {
3293 		HIF_TRACE("Memory allocation failed could not post target buf");
3294 		return;
3295 	}
3296 	hif_write32_mb(scn, scn->mem + BYPASS_QMI_TEMP_REGISTER, target_pa);
3297 	HIF_TRACE("target va %pK target pa %pa", target_va, &target_pa);
3298 }
3299 #endif
3300 
3301 #else
3302 static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
3303 {
3304 }
3305 #endif
3306 
3307 static int hif_srng_sleep_state_adjust(struct hif_softc *scn, bool sleep_ok,
3308 				bool wait_for_it)
3309 {
3310 	/* todo */
3311 	return 0;
3312 }
3313 
3314 /**
3315  * hif_config_ce() - configure copy engines
3316  * @scn: hif context
3317  *
3318  * Prepares fw, copy engine hardware and host sw according
3319  * to the attributes selected by hif_ce_prepare_config.
3320  *
3321  * also calls athdiag_procfs_init
3322  *
3323  * return: 0 for success nonzero for failure.
3324  */
3325 int hif_config_ce(struct hif_softc *scn)
3326 {
3327 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3328 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3329 	struct HIF_CE_pipe_info *pipe_info;
3330 	int pipe_num;
3331 	struct CE_state *ce_state = NULL;
3332 
3333 #ifdef ADRASTEA_SHADOW_REGISTERS
3334 	int i;
3335 #endif
3336 	QDF_STATUS rv = QDF_STATUS_SUCCESS;
3337 
3338 	scn->notice_send = true;
3339 	scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3340 
3341 	hif_post_static_buf_to_target(scn);
3342 
3343 	hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
3344 
3345 	hif_config_rri_on_ddr(scn);
3346 
3347 	if (ce_srng_based(scn))
3348 		scn->bus_ops.hif_target_sleep_state_adjust =
3349 			&hif_srng_sleep_state_adjust;
3350 
3351 	/* Initialise the CE debug history sysfs interface inputs ce_id and
3352 	 * index. Disable data storing
3353 	 */
3354 	reset_ce_debug_history(scn);
3355 
3356 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
3357 		struct CE_attr *attr;
3358 
3359 		pipe_info = &hif_state->pipe_info[pipe_num];
3360 		pipe_info->pipe_num = pipe_num;
3361 		pipe_info->HIF_CE_state = hif_state;
3362 		attr = &hif_state->host_ce_config[pipe_num];
3363 
3364 		pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
3365 		ce_state = scn->ce_id_to_state[pipe_num];
3366 		if (!ce_state) {
3367 			A_TARGET_ACCESS_UNLIKELY(scn);
3368 			goto err;
3369 		}
3370 		qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
3371 		QDF_ASSERT(pipe_info->ce_hdl);
3372 		if (!pipe_info->ce_hdl) {
3373 			rv = QDF_STATUS_E_FAILURE;
3374 			A_TARGET_ACCESS_UNLIKELY(scn);
3375 			goto err;
3376 		}
3377 
3378 		ce_state->lro_data = qdf_lro_init();
3379 
3380 		if (attr->flags & CE_ATTR_DIAG) {
3381 			/* Reserve the ultimate CE for
3382 			 * Diagnostic Window support
3383 			 */
3384 			hif_state->ce_diag = pipe_info->ce_hdl;
3385 			continue;
3386 		}
3387 
3388 		if (hif_is_nss_wifi_enabled(scn) && ce_state &&
3389 				(ce_state->htt_rx_data))
3390 			continue;
3391 
3392 		pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
3393 		if (attr->dest_nentries > 0) {
3394 			atomic_set(&pipe_info->recv_bufs_needed,
3395 				   init_buffer_count(attr->dest_nentries - 1));
3396 			/*SRNG based CE has one entry less */
3397 			if (ce_srng_based(scn))
3398 				atomic_dec(&pipe_info->recv_bufs_needed);
3399 		} else {
3400 			atomic_set(&pipe_info->recv_bufs_needed, 0);
3401 		}
3402 		ce_tasklet_init(hif_state, (1 << pipe_num));
3403 		ce_register_irq(hif_state, (1 << pipe_num));
3404 	}
3405 
3406 	if (athdiag_procfs_init(scn) != 0) {
3407 		A_TARGET_ACCESS_UNLIKELY(scn);
3408 		goto err;
3409 	}
3410 	scn->athdiag_procfs_inited = true;
3411 
3412 	HIF_DBG("%s: ce_init done", __func__);
3413 
3414 	init_tasklet_workers(hif_hdl);
3415 
3416 	HIF_DBG("%s: X, ret = %d", __func__, rv);
3417 
3418 #ifdef ADRASTEA_SHADOW_REGISTERS
3419 	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
3420 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
3421 		HIF_DBG("%s Shadow Register%d is mapped to address %x",
3422 			  __func__, i,
3423 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
3424 	}
3425 #endif
3426 
3427 	return rv != QDF_STATUS_SUCCESS;
3428 
3429 err:
3430 	/* Failure, so clean up */
3431 	hif_unconfig_ce(scn);
3432 	HIF_TRACE("%s: X, ret = %d", __func__, rv);
3433 	return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
3434 }
3435 
3436 #ifdef IPA_OFFLOAD
3437 /**
3438  * hif_ce_ipa_get_ce_resource() - get uc resource on hif
3439  * @scn: bus context
3440  * @ce_sr_base_paddr: copyengine source ring base physical address
3441  * @ce_sr_ring_size: copyengine source ring size
3442  * @ce_reg_paddr: copyengine register physical address
3443  *
3444  * IPA micro controller data path offload feature enabled,
3445  * HIF should release copy engine related resource information to IPA UC
3446  * IPA UC will access hardware resource with released information
3447  *
3448  * Return: None
3449  */
3450 void hif_ce_ipa_get_ce_resource(struct hif_softc *scn,
3451 			     qdf_shared_mem_t **ce_sr,
3452 			     uint32_t *ce_sr_ring_size,
3453 			     qdf_dma_addr_t *ce_reg_paddr)
3454 {
3455 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3456 	struct HIF_CE_pipe_info *pipe_info =
3457 		&(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
3458 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3459 
3460 	ce_ipa_get_resource(ce_hdl, ce_sr, ce_sr_ring_size,
3461 			    ce_reg_paddr);
3462 }
3463 #endif /* IPA_OFFLOAD */
3464 
3465 
3466 #ifdef ADRASTEA_SHADOW_REGISTERS
3467 
3468 /*
3469  * Current shadow register config
3470  *
3471  * -----------------------------------------------------------
3472  * Shadow Register      |     CE   |    src/dst write index
3473  * -----------------------------------------------------------
3474  *         0            |     0    |           src
3475  *         1     No Config - Doesn't point to anything
3476  *         2     No Config - Doesn't point to anything
3477  *         3            |     3    |           src
3478  *         4            |     4    |           src
3479  *         5            |     5    |           src
3480  *         6     No Config - Doesn't point to anything
3481  *         7            |     7    |           src
3482  *         8     No Config - Doesn't point to anything
3483  *         9     No Config - Doesn't point to anything
3484  *         10    No Config - Doesn't point to anything
3485  *         11    No Config - Doesn't point to anything
3486  * -----------------------------------------------------------
3487  *         12    No Config - Doesn't point to anything
3488  *         13           |     1    |           dst
3489  *         14           |     2    |           dst
3490  *         15    No Config - Doesn't point to anything
3491  *         16    No Config - Doesn't point to anything
3492  *         17    No Config - Doesn't point to anything
3493  *         18    No Config - Doesn't point to anything
3494  *         19           |     7    |           dst
3495  *         20           |     8    |           dst
3496  *         21    No Config - Doesn't point to anything
3497  *         22    No Config - Doesn't point to anything
3498  *         23    No Config - Doesn't point to anything
3499  * -----------------------------------------------------------
3500  *
3501  *
3502  * ToDo - Move shadow register config to following in the future
3503  * This helps free up a block of shadow registers towards the end.
3504  * Can be used for other purposes
3505  *
3506  * -----------------------------------------------------------
3507  * Shadow Register      |     CE   |    src/dst write index
3508  * -----------------------------------------------------------
3509  *      0            |     0    |           src
3510  *      1            |     3    |           src
3511  *      2            |     4    |           src
3512  *      3            |     5    |           src
3513  *      4            |     7    |           src
3514  * -----------------------------------------------------------
3515  *      5            |     1    |           dst
3516  *      6            |     2    |           dst
3517  *      7            |     7    |           dst
3518  *      8            |     8    |           dst
3519  * -----------------------------------------------------------
3520  *      9     No Config - Doesn't point to anything
3521  *      12    No Config - Doesn't point to anything
3522  *      13    No Config - Doesn't point to anything
3523  *      14    No Config - Doesn't point to anything
3524  *      15    No Config - Doesn't point to anything
3525  *      16    No Config - Doesn't point to anything
3526  *      17    No Config - Doesn't point to anything
3527  *      18    No Config - Doesn't point to anything
3528  *      19    No Config - Doesn't point to anything
3529  *      20    No Config - Doesn't point to anything
3530  *      21    No Config - Doesn't point to anything
3531  *      22    No Config - Doesn't point to anything
3532  *      23    No Config - Doesn't point to anything
3533  * -----------------------------------------------------------
3534 */
3535 #ifndef QCN7605_SUPPORT
3536 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3537 {
3538 	u32 addr = 0;
3539 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3540 
3541 	switch (ce) {
3542 	case 0:
3543 		addr = SHADOW_VALUE0;
3544 		break;
3545 	case 3:
3546 		addr = SHADOW_VALUE3;
3547 		break;
3548 	case 4:
3549 		addr = SHADOW_VALUE4;
3550 		break;
3551 	case 5:
3552 		addr = SHADOW_VALUE5;
3553 		break;
3554 	case 7:
3555 		addr = SHADOW_VALUE7;
3556 		break;
3557 	default:
3558 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3559 		QDF_ASSERT(0);
3560 	}
3561 	return addr;
3562 
3563 }
3564 
3565 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3566 {
3567 	u32 addr = 0;
3568 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3569 
3570 	switch (ce) {
3571 	case 1:
3572 		addr = SHADOW_VALUE13;
3573 		break;
3574 	case 2:
3575 		addr = SHADOW_VALUE14;
3576 		break;
3577 	case 5:
3578 		addr = SHADOW_VALUE17;
3579 		break;
3580 	case 7:
3581 		addr = SHADOW_VALUE19;
3582 		break;
3583 	case 8:
3584 		addr = SHADOW_VALUE20;
3585 		break;
3586 	case 9:
3587 		addr = SHADOW_VALUE21;
3588 		break;
3589 	case 10:
3590 		addr = SHADOW_VALUE22;
3591 		break;
3592 	case 11:
3593 		addr = SHADOW_VALUE23;
3594 		break;
3595 	default:
3596 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3597 		QDF_ASSERT(0);
3598 	}
3599 
3600 	return addr;
3601 
3602 }
3603 #else
3604 u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3605 {
3606 	u32 addr = 0;
3607 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3608 
3609 	switch (ce) {
3610 	case 0:
3611 		addr = SHADOW_VALUE0;
3612 		break;
3613 	case 4:
3614 		addr = SHADOW_VALUE4;
3615 		break;
3616 	case 5:
3617 		addr = SHADOW_VALUE5;
3618 		break;
3619 	default:
3620 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3621 		QDF_ASSERT(0);
3622 	}
3623 	return addr;
3624 }
3625 
3626 u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
3627 {
3628 	u32 addr = 0;
3629 	u32 ce = COPY_ENGINE_ID(ctrl_addr);
3630 
3631 	switch (ce) {
3632 	case 1:
3633 		addr = SHADOW_VALUE13;
3634 		break;
3635 	case 2:
3636 		addr = SHADOW_VALUE14;
3637 		break;
3638 	case 3:
3639 		addr = SHADOW_VALUE15;
3640 		break;
3641 	case 5:
3642 		addr = SHADOW_VALUE17;
3643 		break;
3644 	case 7:
3645 		addr = SHADOW_VALUE19;
3646 		break;
3647 	case 8:
3648 		addr = SHADOW_VALUE20;
3649 		break;
3650 	case 9:
3651 		addr = SHADOW_VALUE21;
3652 		break;
3653 	case 10:
3654 		addr = SHADOW_VALUE22;
3655 		break;
3656 	case 11:
3657 		addr = SHADOW_VALUE23;
3658 		break;
3659 	default:
3660 		HIF_ERROR("invalid CE ctrl_addr (CE=%d)", ce);
3661 		QDF_ASSERT(0);
3662 	}
3663 
3664 	return addr;
3665 }
3666 #endif
3667 #endif
3668 
3669 #if defined(FEATURE_LRO)
3670 void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id)
3671 {
3672 	struct CE_state *ce_state;
3673 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3674 
3675 	ce_state = scn->ce_id_to_state[ctx_id];
3676 
3677 	return ce_state->lro_data;
3678 }
3679 #endif
3680 
3681 /**
3682  * hif_map_service_to_pipe() - returns  the ce ids pertaining to
3683  * this service
3684  * @scn: hif_softc pointer.
3685  * @svc_id: Service ID for which the mapping is needed.
3686  * @ul_pipe: address of the container in which ul pipe is returned.
3687  * @dl_pipe: address of the container in which dl pipe is returned.
3688  * @ul_is_polled: address of the container in which a bool
3689  *			indicating if the UL CE for this service
3690  *			is polled is returned.
3691  * @dl_is_polled: address of the container in which a bool
3692  *			indicating if the DL CE for this service
3693  *			is polled is returned.
3694  *
3695  * Return: Indicates whether the service has been found in the table.
3696  *         Upon return, ul_is_polled is updated only if ul_pipe is updated.
3697  *         There will be warning logs if either leg has not been updated
3698  *         because it missed the entry in the table (but this is not an err).
3699  */
3700 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
3701 			uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
3702 			int *dl_is_polled)
3703 {
3704 	int status = QDF_STATUS_E_INVAL;
3705 	unsigned int i;
3706 	struct service_to_pipe element;
3707 	struct service_to_pipe *tgt_svc_map_to_use;
3708 	uint32_t sz_tgt_svc_map_to_use;
3709 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
3710 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3711 	bool dl_updated = false;
3712 	bool ul_updated = false;
3713 
3714 	hif_select_service_to_pipe_map(scn, &tgt_svc_map_to_use,
3715 				       &sz_tgt_svc_map_to_use);
3716 
3717 	*dl_is_polled = 0;  /* polling for received messages not supported */
3718 
3719 	for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
3720 
3721 		memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
3722 		if (element.service_id == svc_id) {
3723 			if (element.pipedir == PIPEDIR_OUT) {
3724 				*ul_pipe = element.pipenum;
3725 				*ul_is_polled =
3726 					(hif_state->host_ce_config[*ul_pipe].flags &
3727 					 CE_ATTR_DISABLE_INTR) != 0;
3728 				ul_updated = true;
3729 			} else if (element.pipedir == PIPEDIR_IN) {
3730 				*dl_pipe = element.pipenum;
3731 				dl_updated = true;
3732 			}
3733 			status = QDF_STATUS_SUCCESS;
3734 		}
3735 	}
3736 	if (ul_updated == false)
3737 		HIF_DBG("ul pipe is NOT updated for service %d", svc_id);
3738 	if (dl_updated == false)
3739 		HIF_DBG("dl pipe is NOT updated for service %d", svc_id);
3740 
3741 	return status;
3742 }
3743 
3744 #ifdef SHADOW_REG_DEBUG
3745 inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
3746 		uint32_t CE_ctrl_addr)
3747 {
3748 	uint32_t read_from_hw, srri_from_ddr = 0;
3749 
3750 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
3751 
3752 	srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3753 
3754 	if (read_from_hw != srri_from_ddr) {
3755 		HIF_ERROR("%s: error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3756 		       __func__, srri_from_ddr, read_from_hw,
3757 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3758 		QDF_ASSERT(0);
3759 	}
3760 	return srri_from_ddr;
3761 }
3762 
3763 
3764 inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
3765 		uint32_t CE_ctrl_addr)
3766 {
3767 	uint32_t read_from_hw, drri_from_ddr = 0;
3768 
3769 	read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
3770 
3771 	drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
3772 
3773 	if (read_from_hw != drri_from_ddr) {
3774 		HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x",
3775 		       drri_from_ddr, read_from_hw,
3776 		       CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
3777 		QDF_ASSERT(0);
3778 	}
3779 	return drri_from_ddr;
3780 }
3781 
3782 #endif
3783 
3784 /**
3785  * hif_dump_ce_registers() - dump ce registers
3786  * @scn: hif_opaque_softc pointer.
3787  *
3788  * Output the copy engine registers
3789  *
3790  * Return: 0 for success or error code
3791  */
3792 int hif_dump_ce_registers(struct hif_softc *scn)
3793 {
3794 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
3795 	uint32_t ce_reg_address = CE0_BASE_ADDRESS;
3796 	uint32_t ce_reg_values[CE_USEFUL_SIZE >> 2];
3797 	uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
3798 	uint16_t i;
3799 	QDF_STATUS status;
3800 
3801 	for (i = 0; i < scn->ce_count; i++, ce_reg_address += CE_OFFSET) {
3802 		if (!scn->ce_id_to_state[i]) {
3803 			HIF_DBG("CE%d not used.", i);
3804 			continue;
3805 		}
3806 
3807 		status = hif_diag_read_mem(hif_hdl, ce_reg_address,
3808 					   (uint8_t *) &ce_reg_values[0],
3809 					   ce_reg_word_size * sizeof(uint32_t));
3810 
3811 		if (status != QDF_STATUS_SUCCESS) {
3812 			HIF_ERROR("Dumping CE register failed!");
3813 			return -EACCES;
3814 		}
3815 		HIF_ERROR("CE%d=>\n", i);
3816 		qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
3817 				   (uint8_t *) &ce_reg_values[0],
3818 				   ce_reg_word_size * sizeof(uint32_t));
3819 		qdf_print("ADDR:[0x%08X], SR_WR_INDEX:%d", (ce_reg_address
3820 				+ SR_WR_INDEX_ADDRESS),
3821 				ce_reg_values[SR_WR_INDEX_ADDRESS/4]);
3822 		qdf_print("ADDR:[0x%08X], CURRENT_SRRI:%d", (ce_reg_address
3823 				+ CURRENT_SRRI_ADDRESS),
3824 				ce_reg_values[CURRENT_SRRI_ADDRESS/4]);
3825 		qdf_print("ADDR:[0x%08X], DST_WR_INDEX:%d", (ce_reg_address
3826 				+ DST_WR_INDEX_ADDRESS),
3827 				ce_reg_values[DST_WR_INDEX_ADDRESS/4]);
3828 		qdf_print("ADDR:[0x%08X], CURRENT_DRRI:%d", (ce_reg_address
3829 				+ CURRENT_DRRI_ADDRESS),
3830 				ce_reg_values[CURRENT_DRRI_ADDRESS/4]);
3831 		qdf_print("---");
3832 	}
3833 	return 0;
3834 }
3835 qdf_export_symbol(hif_dump_ce_registers);
3836 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
3837 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
3838 		struct hif_pipe_addl_info *hif_info, uint32_t pipe)
3839 {
3840 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3841 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3842 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(osc);
3843 	struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
3844 	struct CE_handle *ce_hdl = pipe_info->ce_hdl;
3845 	struct CE_state *ce_state = (struct CE_state *)ce_hdl;
3846 	struct CE_ring_state *src_ring = ce_state->src_ring;
3847 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
3848 
3849 	if (src_ring) {
3850 		hif_info->ul_pipe.nentries = src_ring->nentries;
3851 		hif_info->ul_pipe.nentries_mask = src_ring->nentries_mask;
3852 		hif_info->ul_pipe.sw_index = src_ring->sw_index;
3853 		hif_info->ul_pipe.write_index = src_ring->write_index;
3854 		hif_info->ul_pipe.hw_index = src_ring->hw_index;
3855 		hif_info->ul_pipe.base_addr_CE_space =
3856 			src_ring->base_addr_CE_space;
3857 		hif_info->ul_pipe.base_addr_owner_space =
3858 			src_ring->base_addr_owner_space;
3859 	}
3860 
3861 
3862 	if (dest_ring) {
3863 		hif_info->dl_pipe.nentries = dest_ring->nentries;
3864 		hif_info->dl_pipe.nentries_mask = dest_ring->nentries_mask;
3865 		hif_info->dl_pipe.sw_index = dest_ring->sw_index;
3866 		hif_info->dl_pipe.write_index = dest_ring->write_index;
3867 		hif_info->dl_pipe.hw_index = dest_ring->hw_index;
3868 		hif_info->dl_pipe.base_addr_CE_space =
3869 			dest_ring->base_addr_CE_space;
3870 		hif_info->dl_pipe.base_addr_owner_space =
3871 			dest_ring->base_addr_owner_space;
3872 	}
3873 
3874 	hif_info->pci_mem = pci_resource_start(sc->pdev, 0);
3875 	hif_info->ctrl_addr = ce_state->ctrl_addr;
3876 
3877 	return hif_info;
3878 }
3879 qdf_export_symbol(hif_get_addl_pipe_info);
3880 
3881 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc, uint32_t mode)
3882 {
3883 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3884 
3885 	scn->nss_wifi_ol_mode = mode;
3886 	return 0;
3887 }
3888 qdf_export_symbol(hif_set_nss_wifiol_mode);
3889 #endif
3890 
3891 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib)
3892 {
3893 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3894 	scn->hif_attribute = hif_attrib;
3895 }
3896 
3897 
3898 /* disable interrupts (only applicable for legacy copy engine currently */
3899 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num)
3900 {
3901 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
3902 	struct CE_state *CE_state = scn->ce_id_to_state[pipe_num];
3903 	uint32_t ctrl_addr = CE_state->ctrl_addr;
3904 
3905 	Q_TARGET_ACCESS_BEGIN(scn);
3906 	CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
3907 	Q_TARGET_ACCESS_END(scn);
3908 }
3909 qdf_export_symbol(hif_disable_interrupt);
3910 
3911 /**
3912  * hif_fw_event_handler() - hif fw event handler
3913  * @hif_state: pointer to hif ce state structure
3914  *
3915  * Process fw events and raise HTC callback to process fw events.
3916  *
3917  * Return: none
3918  */
3919 static inline void hif_fw_event_handler(struct HIF_CE_state *hif_state)
3920 {
3921 	struct hif_msg_callbacks *msg_callbacks =
3922 		&hif_state->msg_callbacks_current;
3923 
3924 	if (!msg_callbacks->fwEventHandler)
3925 		return;
3926 
3927 	msg_callbacks->fwEventHandler(msg_callbacks->Context,
3928 			QDF_STATUS_E_FAILURE);
3929 }
3930 
3931 #ifndef QCA_WIFI_3_0
3932 /**
3933  * hif_fw_interrupt_handler() - FW interrupt handler
3934  * @irq: irq number
3935  * @arg: the user pointer
3936  *
3937  * Called from the PCI interrupt handler when a
3938  * firmware-generated interrupt to the Host.
3939  *
3940  * only registered for legacy ce devices
3941  *
3942  * Return: status of handled irq
3943  */
3944 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3945 {
3946 	struct hif_softc *scn = arg;
3947 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3948 	uint32_t fw_indicator_address, fw_indicator;
3949 
3950 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3951 		return ATH_ISR_NOSCHED;
3952 
3953 	fw_indicator_address = hif_state->fw_indicator_address;
3954 	/* For sudden unplug this will return ~0 */
3955 	fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
3956 
3957 	if ((fw_indicator != ~0) && (fw_indicator & FW_IND_EVENT_PENDING)) {
3958 		/* ACK: clear Target-side pending event */
3959 		A_TARGET_WRITE(scn, fw_indicator_address,
3960 			       fw_indicator & ~FW_IND_EVENT_PENDING);
3961 		if (Q_TARGET_ACCESS_END(scn) < 0)
3962 			return ATH_ISR_SCHED;
3963 
3964 		if (hif_state->started) {
3965 			hif_fw_event_handler(hif_state);
3966 		} else {
3967 			/*
3968 			 * Probable Target failure before we're prepared
3969 			 * to handle it.  Generally unexpected.
3970 			 * fw_indicator used as bitmap, and defined as below:
3971 			 *     FW_IND_EVENT_PENDING    0x1
3972 			 *     FW_IND_INITIALIZED      0x2
3973 			 *     FW_IND_NEEDRECOVER      0x4
3974 			 */
3975 			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
3976 				("%s: Early firmware event indicated 0x%x\n",
3977 				 __func__, fw_indicator));
3978 		}
3979 	} else {
3980 		if (Q_TARGET_ACCESS_END(scn) < 0)
3981 			return ATH_ISR_SCHED;
3982 	}
3983 
3984 	return ATH_ISR_SCHED;
3985 }
3986 #else
3987 irqreturn_t hif_fw_interrupt_handler(int irq, void *arg)
3988 {
3989 	return ATH_ISR_SCHED;
3990 }
3991 #endif /* #ifdef QCA_WIFI_3_0 */
3992 
3993 
3994 /**
3995  * hif_wlan_disable(): call the platform driver to disable wlan
3996  * @scn: HIF Context
3997  *
3998  * This function passes the con_mode to platform driver to disable
3999  * wlan.
4000  *
4001  * Return: void
4002  */
4003 void hif_wlan_disable(struct hif_softc *scn)
4004 {
4005 	enum pld_driver_mode mode;
4006 	uint32_t con_mode = hif_get_conparam(scn);
4007 
4008 	if (scn->target_status == TARGET_STATUS_RESET)
4009 		return;
4010 
4011 	if (QDF_GLOBAL_FTM_MODE == con_mode)
4012 		mode = PLD_FTM;
4013 	else if (QDF_IS_EPPING_ENABLED(con_mode))
4014 		mode = PLD_EPPING;
4015 	else
4016 		mode = PLD_MISSION;
4017 
4018 	pld_wlan_disable(scn->qdf_dev->dev, mode);
4019 }
4020 
4021 int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id)
4022 {
4023 	QDF_STATUS status;
4024 	uint8_t ul_pipe, dl_pipe;
4025 	int ul_is_polled, dl_is_polled;
4026 
4027 	/* DL pipe for HTC_CTRL_RSVD_SVC should map to the wake CE */
4028 	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
4029 					 HTC_CTRL_RSVD_SVC,
4030 					 &ul_pipe, &dl_pipe,
4031 					 &ul_is_polled, &dl_is_polled);
4032 	if (status) {
4033 		HIF_ERROR("%s: failed to map pipe: %d", __func__, status);
4034 		return qdf_status_to_os_return(status);
4035 	}
4036 
4037 	*ce_id = dl_pipe;
4038 
4039 	return 0;
4040 }
4041