xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 6d768494e5ce14eb1603a695c86739d12ecc6ec2)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include <linux/of_pci.h>
24 #ifdef CONFIG_PCI_MSM
25 #include <linux/msm_pcie.h>
26 #endif
27 #include <linux/version.h>
28 #include "hif_io32.h"
29 #include "if_pci.h"
30 #include "hif.h"
31 #include "target_type.h"
32 #include "hif_main.h"
33 #include "ce_main.h"
34 #include "ce_api.h"
35 #include "ce_internal.h"
36 #include "ce_reg.h"
37 #include "ce_bmi.h"
38 #include "regtable.h"
39 #include "hif_hw_version.h"
40 #include <linux/debugfs.h>
41 #include <linux/seq_file.h>
42 #include "qdf_status.h"
43 #include "qdf_atomic.h"
44 #include "qdf_platform.h"
45 #include "pld_common.h"
46 #include "mp_dev.h"
47 #include "hif_debug.h"
48 
49 #include "if_pci_internal.h"
50 #include "ce_tasklet.h"
51 #include "targaddrs.h"
52 #include "hif_exec.h"
53 
54 #include "pci_api.h"
55 #include "ahb_api.h"
56 #include "wlan_cfg.h"
57 
58 /* Maximum ms timeout for host to wake up target */
59 #define PCIE_WAKE_TIMEOUT 1000
60 #define RAMDUMP_EVENT_TIMEOUT 2500
61 
62 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
63  * PCIe data bus error
64  * As workaround for this issue - changing the reset sequence to
65  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
66  */
67 #define CPU_WARM_RESET_WAR
68 #define WLAN_CFG_MAX_PCIE_GROUPS 2
69 #define WLAN_CFG_MAX_CE_COUNT 12
70 
71 const char *dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS] = {
72 {
73 "pci0_wlan_grp_dp_0",
74 "pci0_wlan_grp_dp_1",
75 "pci0_wlan_grp_dp_2",
76 "pci0_wlan_grp_dp_3",
77 "pci0_wlan_grp_dp_4",
78 "pci0_wlan_grp_dp_5",
79 "pci0_wlan_grp_dp_6",
80 #if !defined(WLAN_MAX_PDEVS)
81 "pci0_wlan_grp_dp_7",
82 "pci0_wlan_grp_dp_8",
83 "pci0_wlan_grp_dp_9",
84 "pci0_wlan_grp_dp_10",
85 #endif
86 },
87 {
88 "pci1_wlan_grp_dp_0",
89 "pci1_wlan_grp_dp_1",
90 "pci1_wlan_grp_dp_2",
91 "pci1_wlan_grp_dp_3",
92 "pci1_wlan_grp_dp_4",
93 "pci1_wlan_grp_dp_5",
94 "pci1_wlan_grp_dp_6",
95 #if !defined(WLAN_MAX_PDEVS)
96 "pci1_wlan_grp_dp_7",
97 "pci1_wlan_grp_dp_8",
98 "pci1_wlan_grp_dp_9",
99 "pci1_wlan_grp_dp_10",
100 #endif
101 }
102 };
103 
104 const char *ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT] = {
105 {
106 "pci0_wlan_ce_0",
107 "pci0_wlan_ce_1",
108 "pci0_wlan_ce_2",
109 "pci0_wlan_ce_3",
110 "pci0_wlan_ce_4",
111 "pci0_wlan_ce_5",
112 "pci0_wlan_ce_6",
113 "pci0_wlan_ce_7",
114 "pci0_wlan_ce_8",
115 "pci0_wlan_ce_9",
116 "pci0_wlan_ce_10",
117 "pci0_wlan_ce_11",
118 },
119 {
120 "pci1_wlan_ce_0",
121 "pci1_wlan_ce_1",
122 "pci1_wlan_ce_2",
123 "pci1_wlan_ce_3",
124 "pci1_wlan_ce_4",
125 "pci1_wlan_ce_5",
126 "pci1_wlan_ce_6",
127 "pci1_wlan_ce_7",
128 "pci1_wlan_ce_8",
129 "pci1_wlan_ce_9",
130 "pci1_wlan_ce_10",
131 "pci1_wlan_ce_11",
132 }
133 };
134 
135 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
136 static inline int hif_get_pci_slot(struct hif_softc *scn)
137 {
138 	/*
139 	 * If WLAN_MAX_PDEVS is defined as 1, always return pci slot 0
140 	 * since there is only one pci device attached.
141 	 */
142 	return 0;
143 }
144 #else
145 static inline int hif_get_pci_slot(struct hif_softc *scn)
146 {
147 	uint32_t pci_id;
148 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
149 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
150 	uint32_t target_type = tgt_info->target_type;
151 	struct device_node *mhi_node;
152 	struct device_node *pcierp_node;
153 	struct device_node *pcie_node;
154 
155 	switch (target_type) {
156 	case TARGET_TYPE_QCN9000:
157 		/* of_node stored in qdf_dev points to the mhi node */
158 		mhi_node = scn->qdf_dev->dev->of_node;
159 		/*
160 		 * pcie id is stored in the main pci node which has to be taken
161 		 * from the second parent of mhi_node.
162 		 */
163 		pcierp_node = mhi_node->parent;
164 		pcie_node = pcierp_node->parent;
165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0))
166 		pci_id = pci_bus_find_domain_nr(NULL, scn->qdf_dev->dev);
167 #else
168 		pci_id = of_get_pci_domain_nr(pcie_node);
169 #endif
170 		if (pci_id < 0 || pci_id >= WLAN_CFG_MAX_PCIE_GROUPS) {
171 			HIF_ERROR("pci_id:%d is invalid", pci_id);
172 			QDF_ASSERT(0);
173 			return 0;
174 		}
175 		return pci_id;
176 	default:
177 		/* Send pci_id 0 for all other targets */
178 		return 0;
179 	}
180 }
181 #endif
182 
183 /*
184  * Top-level interrupt handler for all PCI interrupts from a Target.
185  * When a block of MSI interrupts is allocated, this top-level handler
186  * is not used; instead, we directly call the correct sub-handler.
187  */
188 struct ce_irq_reg_table {
189 	uint32_t irq_enable;
190 	uint32_t irq_status;
191 };
192 
193 #ifndef QCA_WIFI_3_0_ADRASTEA
194 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
195 {
196 }
197 #else
198 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
199 {
200 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
201 	unsigned int target_enable0, target_enable1;
202 	unsigned int target_cause0, target_cause1;
203 
204 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
205 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
206 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
207 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
208 
209 	if ((target_enable0 & target_cause0) ||
210 	    (target_enable1 & target_cause1)) {
211 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
212 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
213 
214 		if (scn->notice_send)
215 			pld_intr_notify_q6(sc->dev);
216 	}
217 }
218 #endif
219 
220 
221 /**
222  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
223  * @scn: scn
224  *
225  * Return: N/A
226  */
227 static void pci_dispatch_interrupt(struct hif_softc *scn)
228 {
229 	uint32_t intr_summary;
230 	int id;
231 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
232 
233 	if (scn->hif_init_done != true)
234 		return;
235 
236 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
237 		return;
238 
239 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
240 
241 	if (intr_summary == 0) {
242 		if ((scn->target_status != TARGET_STATUS_RESET) &&
243 			(!qdf_atomic_read(&scn->link_suspended))) {
244 
245 			hif_write32_mb(scn, scn->mem +
246 				(SOC_CORE_BASE_ADDRESS |
247 				PCIE_INTR_ENABLE_ADDRESS),
248 				HOST_GROUP0_MASK);
249 
250 			hif_read32_mb(scn, scn->mem +
251 					(SOC_CORE_BASE_ADDRESS |
252 					PCIE_INTR_ENABLE_ADDRESS));
253 		}
254 		Q_TARGET_ACCESS_END(scn);
255 		return;
256 	}
257 	Q_TARGET_ACCESS_END(scn);
258 
259 	scn->ce_irq_summary = intr_summary;
260 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
261 		if (intr_summary & (1 << id)) {
262 			intr_summary &= ~(1 << id);
263 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
264 		}
265 	}
266 }
267 
268 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
269 {
270 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
271 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
272 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
273 
274 	volatile int tmp;
275 	uint16_t val = 0;
276 	uint32_t bar0 = 0;
277 	uint32_t fw_indicator_address, fw_indicator;
278 	bool ssr_irq = false;
279 	unsigned int host_cause, host_enable;
280 
281 	if (LEGACY_INTERRUPTS(sc)) {
282 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
283 			return IRQ_HANDLED;
284 
285 		if (ADRASTEA_BU) {
286 			host_enable = hif_read32_mb(sc, sc->mem +
287 						    PCIE_INTR_ENABLE_ADDRESS);
288 			host_cause = hif_read32_mb(sc, sc->mem +
289 						   PCIE_INTR_CAUSE_ADDRESS);
290 			if (!(host_enable & host_cause)) {
291 				hif_pci_route_adrastea_interrupt(sc);
292 				return IRQ_HANDLED;
293 			}
294 		}
295 
296 		/* Clear Legacy PCI line interrupts
297 		 * IMPORTANT: INTR_CLR regiser has to be set
298 		 * after INTR_ENABLE is set to 0,
299 		 * otherwise interrupt can not be really cleared
300 		 */
301 		hif_write32_mb(sc, sc->mem +
302 			      (SOC_CORE_BASE_ADDRESS |
303 			       PCIE_INTR_ENABLE_ADDRESS), 0);
304 
305 		hif_write32_mb(sc, sc->mem +
306 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
307 			       ADRASTEA_BU ?
308 			       (host_enable & host_cause) :
309 			      HOST_GROUP0_MASK);
310 
311 		if (ADRASTEA_BU)
312 			hif_write32_mb(sc, sc->mem + 0x2f100c,
313 				       (host_cause >> 1));
314 
315 		/* IMPORTANT: this extra read transaction is required to
316 		 * flush the posted write buffer
317 		 */
318 		if (!ADRASTEA_BU) {
319 		tmp =
320 			hif_read32_mb(sc, sc->mem +
321 				     (SOC_CORE_BASE_ADDRESS |
322 				      PCIE_INTR_ENABLE_ADDRESS));
323 
324 		if (tmp == 0xdeadbeef) {
325 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
326 			       __func__);
327 
328 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
329 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
330 			       __func__, val);
331 
332 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
333 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
334 			       __func__, val);
335 
336 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
337 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
338 			       val);
339 
340 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
341 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
342 			       val);
343 
344 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
345 					      &bar0);
346 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
347 			       bar0);
348 
349 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
350 				  __func__,
351 				  hif_read32_mb(sc, sc->mem +
352 						PCIE_LOCAL_BASE_ADDRESS
353 						+ RTC_STATE_ADDRESS));
354 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
355 				  __func__,
356 				  hif_read32_mb(sc, sc->mem +
357 						PCIE_LOCAL_BASE_ADDRESS
358 						+ PCIE_SOC_WAKE_ADDRESS));
359 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
360 				  __func__,
361 				  hif_read32_mb(sc, sc->mem + 0x80008),
362 				  hif_read32_mb(sc, sc->mem + 0x8000c));
363 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
364 				  __func__,
365 				  hif_read32_mb(sc, sc->mem + 0x80010),
366 				  hif_read32_mb(sc, sc->mem + 0x80014));
367 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
368 				  __func__,
369 				  hif_read32_mb(sc, sc->mem + 0x80018),
370 				  hif_read32_mb(sc, sc->mem + 0x8001c));
371 			QDF_BUG(0);
372 		}
373 
374 		PCI_CLR_CAUSE0_REGISTER(sc);
375 		}
376 
377 		if (HAS_FW_INDICATOR) {
378 			fw_indicator_address = hif_state->fw_indicator_address;
379 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
380 			if ((fw_indicator != ~0) &&
381 			   (fw_indicator & FW_IND_EVENT_PENDING))
382 				ssr_irq = true;
383 		}
384 
385 		if (Q_TARGET_ACCESS_END(scn) < 0)
386 			return IRQ_HANDLED;
387 	}
388 	/* TBDXXX: Add support for WMAC */
389 
390 	if (ssr_irq) {
391 		sc->irq_event = irq;
392 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
393 
394 		qdf_atomic_inc(&scn->active_tasklet_cnt);
395 		tasklet_schedule(&sc->intr_tq);
396 	} else {
397 		pci_dispatch_interrupt(scn);
398 	}
399 
400 	return IRQ_HANDLED;
401 }
402 
403 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
404 {
405 	return 1;               /* FIX THIS */
406 }
407 
408 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
409 {
410 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
411 	int i = 0;
412 
413 	if (!irq || !size) {
414 		return -EINVAL;
415 	}
416 
417 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
418 		irq[0] = sc->irq;
419 		return 1;
420 	}
421 
422 	if (sc->num_msi_intrs > size) {
423 		qdf_print("Not enough space in irq buffer to return irqs");
424 		return -EINVAL;
425 	}
426 
427 	for (i = 0; i < sc->num_msi_intrs; i++) {
428 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
429 	}
430 
431 	return sc->num_msi_intrs;
432 }
433 
434 
435 /**
436  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
437  * @scn: hif_softc
438  *
439  * Return: void
440  */
441 #if CONFIG_ATH_PCIE_MAX_PERF == 0
442 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
443 {
444 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
445 	A_target_id_t pci_addr = scn->mem;
446 
447 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
448 	/*
449 	 * If the deferred sleep timer is running cancel it
450 	 * and put the soc into sleep.
451 	 */
452 	if (hif_state->fake_sleep == true) {
453 		qdf_timer_stop(&hif_state->sleep_timer);
454 		if (hif_state->verified_awake == false) {
455 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
456 				      PCIE_SOC_WAKE_ADDRESS,
457 				      PCIE_SOC_WAKE_RESET);
458 		}
459 		hif_state->fake_sleep = false;
460 	}
461 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
462 }
463 #else
464 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
465 {
466 }
467 #endif
468 
469 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
470 	hif_read32_mb(sc, (char *)(mem) + \
471 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
472 
473 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
474 	hif_write32_mb(sc, ((char *)(mem) + \
475 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
476 
477 #ifdef QCA_WIFI_3_0
478 /**
479  * hif_targ_is_awake() - check to see if the target is awake
480  * @hif_ctx: hif context
481  *
482  * emulation never goes to sleep
483  *
484  * Return: true if target is awake
485  */
486 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
487 {
488 	return true;
489 }
490 #else
491 /**
492  * hif_targ_is_awake() - check to see if the target is awake
493  * @hif_ctx: hif context
494  *
495  * Return: true if the targets clocks are on
496  */
497 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
498 {
499 	uint32_t val;
500 
501 	if (scn->recovery)
502 		return false;
503 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
504 		+ RTC_STATE_ADDRESS);
505 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
506 }
507 #endif
508 
509 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
510 static void hif_pci_device_reset(struct hif_pci_softc *sc)
511 {
512 	void __iomem *mem = sc->mem;
513 	int i;
514 	uint32_t val;
515 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
516 
517 	if (!scn->hostdef)
518 		return;
519 
520 	/* NB: Don't check resetok here.  This form of reset
521 	 * is integral to correct operation.
522 	 */
523 
524 	if (!SOC_GLOBAL_RESET_ADDRESS)
525 		return;
526 
527 	if (!mem)
528 		return;
529 
530 	HIF_ERROR("%s: Reset Device", __func__);
531 
532 	/*
533 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
534 	 * writing WAKE_V, the Target may scribble over Host memory!
535 	 */
536 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
537 			       PCIE_SOC_WAKE_V_MASK);
538 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
539 		if (hif_targ_is_awake(scn, mem))
540 			break;
541 
542 		qdf_mdelay(1);
543 	}
544 
545 	/* Put Target, including PCIe, into RESET. */
546 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
547 	val |= 1;
548 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
549 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
550 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
551 		    RTC_STATE_COLD_RESET_MASK)
552 			break;
553 
554 		qdf_mdelay(1);
555 	}
556 
557 	/* Pull Target, including PCIe, out of RESET. */
558 	val &= ~1;
559 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
560 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
561 		if (!
562 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
563 		     RTC_STATE_COLD_RESET_MASK))
564 			break;
565 
566 		qdf_mdelay(1);
567 	}
568 
569 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
570 			       PCIE_SOC_WAKE_RESET);
571 }
572 
573 /* CPU warm reset function
574  * Steps:
575  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
576  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
577  *    correctly on WARM reset
578  * 3. Clear TARGET CPU LF timer interrupt
579  * 4. Reset all CEs to clear any pending CE tarnsactions
580  * 5. Warm reset CPU
581  */
582 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
583 {
584 	void __iomem *mem = sc->mem;
585 	int i;
586 	uint32_t val;
587 	uint32_t fw_indicator;
588 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
589 
590 	/* NB: Don't check resetok here.  This form of reset is
591 	 * integral to correct operation.
592 	 */
593 
594 	if (!mem)
595 		return;
596 
597 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
598 
599 	/*
600 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
601 	 * writing WAKE_V, the Target may scribble over Host memory!
602 	 */
603 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
604 			       PCIE_SOC_WAKE_V_MASK);
605 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
606 		if (hif_targ_is_awake(scn, mem))
607 			break;
608 		qdf_mdelay(1);
609 	}
610 
611 	/*
612 	 * Disable Pending interrupts
613 	 */
614 	val =
615 		hif_read32_mb(sc, mem +
616 			     (SOC_CORE_BASE_ADDRESS |
617 			      PCIE_INTR_CAUSE_ADDRESS));
618 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
619 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
620 	/* Target CPU Intr Cause */
621 	val = hif_read32_mb(sc, mem +
622 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
623 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
624 
625 	val =
626 		hif_read32_mb(sc, mem +
627 			     (SOC_CORE_BASE_ADDRESS |
628 			      PCIE_INTR_ENABLE_ADDRESS));
629 	hif_write32_mb(sc, (mem +
630 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
631 	hif_write32_mb(sc, (mem +
632 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
633 		       HOST_GROUP0_MASK);
634 
635 	qdf_mdelay(100);
636 
637 	/* Clear FW_INDICATOR_ADDRESS */
638 	if (HAS_FW_INDICATOR) {
639 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
640 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
641 	}
642 
643 	/* Clear Target LF Timer interrupts */
644 	val =
645 		hif_read32_mb(sc, mem +
646 			     (RTC_SOC_BASE_ADDRESS +
647 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
648 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
649 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
650 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
651 	hif_write32_mb(sc, mem +
652 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
653 		      val);
654 
655 	/* Reset CE */
656 	val =
657 		hif_read32_mb(sc, mem +
658 			     (RTC_SOC_BASE_ADDRESS |
659 			      SOC_RESET_CONTROL_ADDRESS));
660 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
661 	hif_write32_mb(sc, (mem +
662 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
663 		      val);
664 	val =
665 		hif_read32_mb(sc, mem +
666 			     (RTC_SOC_BASE_ADDRESS |
667 			      SOC_RESET_CONTROL_ADDRESS));
668 	qdf_mdelay(10);
669 
670 	/* CE unreset */
671 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
672 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
673 		       SOC_RESET_CONTROL_ADDRESS), val);
674 	val =
675 		hif_read32_mb(sc, mem +
676 			     (RTC_SOC_BASE_ADDRESS |
677 			      SOC_RESET_CONTROL_ADDRESS));
678 	qdf_mdelay(10);
679 
680 	/* Read Target CPU Intr Cause */
681 	val = hif_read32_mb(sc, mem +
682 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
683 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
684 		    __func__, val);
685 
686 	/* CPU warm RESET */
687 	val =
688 		hif_read32_mb(sc, mem +
689 			     (RTC_SOC_BASE_ADDRESS |
690 			      SOC_RESET_CONTROL_ADDRESS));
691 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
692 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
693 		       SOC_RESET_CONTROL_ADDRESS), val);
694 	val =
695 		hif_read32_mb(sc, mem +
696 			     (RTC_SOC_BASE_ADDRESS |
697 			      SOC_RESET_CONTROL_ADDRESS));
698 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
699 		    __func__, val);
700 
701 	qdf_mdelay(100);
702 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
703 
704 }
705 
706 #ifndef QCA_WIFI_3_0
707 /* only applicable to legacy ce */
708 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
709 {
710 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
711 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
712 	void __iomem *mem = sc->mem;
713 	uint32_t val;
714 
715 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
716 		return ATH_ISR_NOSCHED;
717 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
718 	if (Q_TARGET_ACCESS_END(scn) < 0)
719 		return ATH_ISR_SCHED;
720 
721 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
722 
723 	if (val & FW_IND_HELPER)
724 		return 0;
725 
726 	return 1;
727 }
728 #endif
729 
730 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
731 {
732 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
733 	uint16_t device_id = 0;
734 	uint32_t val;
735 	uint16_t timeout_count = 0;
736 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
737 
738 	/* Check device ID from PCIe configuration space for link status */
739 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
740 	if (device_id != sc->devid) {
741 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
742 			  __func__, device_id, sc->devid);
743 		return -EACCES;
744 	}
745 
746 	/* Check PCIe local register for bar/memory access */
747 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
748 			   RTC_STATE_ADDRESS);
749 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
750 
751 	/* Try to wake up taget if it sleeps */
752 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
753 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
754 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
755 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
756 		PCIE_SOC_WAKE_ADDRESS));
757 
758 	/* Check if taget can be woken up */
759 	while (!hif_targ_is_awake(scn, sc->mem)) {
760 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
761 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
762 				__func__,
763 				hif_read32_mb(sc, sc->mem +
764 					     PCIE_LOCAL_BASE_ADDRESS +
765 					     RTC_STATE_ADDRESS),
766 				hif_read32_mb(sc, sc->mem +
767 					     PCIE_LOCAL_BASE_ADDRESS +
768 					PCIE_SOC_WAKE_ADDRESS));
769 			return -EACCES;
770 		}
771 
772 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
773 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
774 
775 		qdf_mdelay(100);
776 		timeout_count += 100;
777 	}
778 
779 	/* Check Power register for SoC internal bus issues */
780 	val =
781 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
782 			     SOC_POWER_REG_OFFSET);
783 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
784 
785 	return 0;
786 }
787 
788 /**
789  * __hif_pci_dump_registers(): dump other PCI debug registers
790  * @scn: struct hif_softc
791  *
792  * This function dumps pci debug registers.  The parrent function
793  * dumps the copy engine registers before calling this function.
794  *
795  * Return: void
796  */
797 static void __hif_pci_dump_registers(struct hif_softc *scn)
798 {
799 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
800 	void __iomem *mem = sc->mem;
801 	uint32_t val, i, j;
802 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
803 	uint32_t ce_base;
804 
805 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
806 		return;
807 
808 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
809 	val =
810 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
811 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
812 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
813 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
814 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
815 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
816 
817 	/* DEBUG_CONTROL_ENABLE = 0x1 */
818 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
819 			   WLAN_DEBUG_CONTROL_OFFSET);
820 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
821 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
822 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
823 		      WLAN_DEBUG_CONTROL_OFFSET, val);
824 
825 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
826 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
827 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
828 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
829 			    WLAN_DEBUG_CONTROL_OFFSET));
830 
831 	HIF_INFO_MED("%s: Debug CE", __func__);
832 	/* Loop CE debug output */
833 	/* AMBA_DEBUG_BUS_SEL = 0xc */
834 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
835 			    AMBA_DEBUG_BUS_OFFSET);
836 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
837 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
838 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
839 		       val);
840 
841 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
842 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
843 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
844 				   CE_WRAPPER_DEBUG_OFFSET);
845 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
846 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
847 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
848 			      CE_WRAPPER_DEBUG_OFFSET, val);
849 
850 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
851 			    __func__, wrapper_idx[i],
852 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
853 				AMBA_DEBUG_BUS_OFFSET),
854 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
855 				CE_WRAPPER_DEBUG_OFFSET));
856 
857 		if (wrapper_idx[i] <= 7) {
858 			for (j = 0; j <= 5; j++) {
859 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
860 				/* For (j=0~5) write CE_DEBUG_SEL = j */
861 				val =
862 					hif_read32_mb(sc, mem + ce_base +
863 						     CE_DEBUG_OFFSET);
864 				val &= ~CE_DEBUG_SEL_MASK;
865 				val |= CE_DEBUG_SEL_SET(j);
866 				hif_write32_mb(sc, mem + ce_base +
867 					       CE_DEBUG_OFFSET, val);
868 
869 				/* read (@gpio_athr_wlan_reg)
870 				 * WLAN_DEBUG_OUT_DATA
871 				 */
872 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
873 						    + WLAN_DEBUG_OUT_OFFSET);
874 				val = WLAN_DEBUG_OUT_DATA_GET(val);
875 
876 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
877 					    __func__, j,
878 					    hif_read32_mb(sc, mem + ce_base +
879 						    CE_DEBUG_OFFSET), val);
880 			}
881 		} else {
882 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
883 			val =
884 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
885 					     WLAN_DEBUG_OUT_OFFSET);
886 			val = WLAN_DEBUG_OUT_DATA_GET(val);
887 
888 			HIF_INFO_MED("%s: out: %x", __func__, val);
889 		}
890 	}
891 
892 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
893 	/* Loop PCIe debug output */
894 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
895 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
896 			    AMBA_DEBUG_BUS_OFFSET);
897 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
898 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
899 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
900 		       AMBA_DEBUG_BUS_OFFSET, val);
901 
902 	for (i = 0; i <= 8; i++) {
903 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
904 		val =
905 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
906 				     AMBA_DEBUG_BUS_OFFSET);
907 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
908 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
909 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
910 			       AMBA_DEBUG_BUS_OFFSET, val);
911 
912 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
913 		val =
914 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
915 				     WLAN_DEBUG_OUT_OFFSET);
916 		val = WLAN_DEBUG_OUT_DATA_GET(val);
917 
918 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
919 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
920 				    WLAN_DEBUG_OUT_OFFSET), val,
921 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
922 				    WLAN_DEBUG_OUT_OFFSET));
923 	}
924 
925 	Q_TARGET_ACCESS_END(scn);
926 }
927 
928 /**
929  * hif_dump_registers(): dump bus debug registers
930  * @scn: struct hif_opaque_softc
931  *
932  * This function dumps hif bus debug registers
933  *
934  * Return: 0 for success or error code
935  */
936 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
937 {
938 	int status;
939 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
940 
941 	status = hif_dump_ce_registers(scn);
942 
943 	if (status)
944 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
945 
946 	/* dump non copy engine pci registers */
947 	__hif_pci_dump_registers(scn);
948 
949 	return 0;
950 }
951 
952 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
953 
954 /* worker thread to schedule wlan_tasklet in SLUB debug build */
955 static void reschedule_tasklet_work_handler(void *arg)
956 {
957 	struct hif_pci_softc *sc = arg;
958 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
959 
960 	if (!scn) {
961 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
962 		return;
963 	}
964 
965 	if (scn->hif_init_done == false) {
966 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
967 		return;
968 	}
969 
970 	tasklet_schedule(&sc->intr_tq);
971 }
972 
973 /**
974  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
975  * work
976  * @sc: HIF PCI Context
977  *
978  * Return: void
979  */
980 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
981 {
982 	qdf_create_work(0, &sc->reschedule_tasklet_work,
983 				reschedule_tasklet_work_handler, NULL);
984 }
985 #else
986 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
987 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
988 
989 void wlan_tasklet(unsigned long data)
990 {
991 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
992 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
993 
994 	if (scn->hif_init_done == false)
995 		goto end;
996 
997 	if (qdf_atomic_read(&scn->link_suspended))
998 		goto end;
999 
1000 	if (!ADRASTEA_BU) {
1001 		hif_fw_interrupt_handler(sc->irq_event, scn);
1002 		if (scn->target_status == TARGET_STATUS_RESET)
1003 			goto end;
1004 	}
1005 
1006 end:
1007 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
1008 	qdf_atomic_dec(&scn->active_tasklet_cnt);
1009 }
1010 
1011 /**
1012  * hif_disable_power_gating() - disable HW power gating
1013  * @hif_ctx: hif context
1014  *
1015  * disables pcie L1 power states
1016  */
1017 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1018 {
1019 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1020 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1021 
1022 	if (!scn) {
1023 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1024 		       __func__);
1025 		return;
1026 	}
1027 
1028 	/* Disable ASPM when pkt log is enabled */
1029 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1030 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1031 }
1032 
1033 /**
1034  * hif_enable_power_gating() - enable HW power gating
1035  * @hif_ctx: hif context
1036  *
1037  * enables pcie L1 power states
1038  */
1039 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1040 {
1041 	if (!sc) {
1042 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1043 		       __func__);
1044 		return;
1045 	}
1046 
1047 	/* Re-enable ASPM after firmware/OTP download is complete */
1048 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1049 }
1050 
1051 /**
1052  * hif_enable_power_management() - enable power management
1053  * @hif_ctx: hif context
1054  *
1055  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1056  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1057  *
1058  * note: epping mode does not call this function as it does not
1059  *       care about saving power.
1060  */
1061 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1062 				 bool is_packet_log_enabled)
1063 {
1064 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1065 	uint32_t mode;
1066 
1067 	if (!pci_ctx) {
1068 		HIF_ERROR("%s, hif_ctx null", __func__);
1069 		return;
1070 	}
1071 
1072 	mode = hif_get_conparam(hif_sc);
1073 	if (mode == QDF_GLOBAL_FTM_MODE) {
1074 		HIF_INFO("%s: Enable power gating for FTM mode", __func__);
1075 		hif_enable_power_gating(pci_ctx);
1076 		return;
1077 	}
1078 
1079 	hif_pm_runtime_start(hif_sc);
1080 
1081 	if (!is_packet_log_enabled)
1082 		hif_enable_power_gating(pci_ctx);
1083 
1084 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1085 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1086 	    !ce_srng_based(hif_sc)) {
1087 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1088 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1089 			HIF_ERROR("%s, failed to set target to sleep",
1090 				  __func__);
1091 	}
1092 }
1093 
1094 /**
1095  * hif_disable_power_management() - disable power management
1096  * @hif_ctx: hif context
1097  *
1098  * Currently disables runtime pm. Should be updated to behave
1099  * if runtime pm is not started. Should be updated to take care
1100  * of aspm and soc sleep for driver load.
1101  */
1102 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1103 {
1104 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1105 
1106 	if (!pci_ctx) {
1107 		HIF_ERROR("%s, hif_ctx null", __func__);
1108 		return;
1109 	}
1110 
1111 	hif_pm_runtime_stop(hif_ctx);
1112 }
1113 
1114 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1115 {
1116 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1117 
1118 	if (!pci_ctx) {
1119 		HIF_ERROR("%s, hif_ctx null", __func__);
1120 		return;
1121 	}
1122 	hif_display_ce_stats(hif_ctx);
1123 
1124 	hif_print_pci_stats(pci_ctx);
1125 }
1126 
1127 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1128 {
1129 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1130 
1131 	if (!pci_ctx) {
1132 		HIF_ERROR("%s, hif_ctx null", __func__);
1133 		return;
1134 	}
1135 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1136 }
1137 
1138 #define ATH_PCI_PROBE_RETRY_MAX 3
1139 /**
1140  * hif_bus_open(): hif_bus_open
1141  * @scn: scn
1142  * @bus_type: bus type
1143  *
1144  * Return: n/a
1145  */
1146 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1147 {
1148 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1149 
1150 	hif_ctx->bus_type = bus_type;
1151 	hif_pm_runtime_open(hif_ctx);
1152 
1153 	qdf_spinlock_create(&sc->irq_lock);
1154 
1155 	return hif_ce_open(hif_ctx);
1156 }
1157 
1158 /**
1159  * hif_wake_target_cpu() - wake the target's cpu
1160  * @scn: hif context
1161  *
1162  * Send an interrupt to the device to wake up the Target CPU
1163  * so it has an opportunity to notice any changed state.
1164  */
1165 static void hif_wake_target_cpu(struct hif_softc *scn)
1166 {
1167 	QDF_STATUS rv;
1168 	uint32_t core_ctrl;
1169 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1170 
1171 	rv = hif_diag_read_access(hif_hdl,
1172 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1173 				  &core_ctrl);
1174 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1175 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1176 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1177 
1178 	rv = hif_diag_write_access(hif_hdl,
1179 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1180 				   core_ctrl);
1181 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1182 }
1183 
1184 /**
1185  * soc_wake_reset() - allow the target to go to sleep
1186  * @scn: hif_softc
1187  *
1188  * Clear the force wake register.  This is done by
1189  * hif_sleep_entry and cancel defered timer sleep.
1190  */
1191 static void soc_wake_reset(struct hif_softc *scn)
1192 {
1193 	hif_write32_mb(scn, scn->mem +
1194 		PCIE_LOCAL_BASE_ADDRESS +
1195 		PCIE_SOC_WAKE_ADDRESS,
1196 		PCIE_SOC_WAKE_RESET);
1197 }
1198 
1199 /**
1200  * hif_sleep_entry() - gate target sleep
1201  * @arg: hif context
1202  *
1203  * This function is the callback for the sleep timer.
1204  * Check if last force awake critical section was at least
1205  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1206  * allow the target to go to sleep and cancel the sleep timer.
1207  * otherwise reschedule the sleep timer.
1208  */
1209 static void hif_sleep_entry(void *arg)
1210 {
1211 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1212 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1213 	uint32_t idle_ms;
1214 
1215 	if (scn->recovery)
1216 		return;
1217 
1218 	if (hif_is_driver_unloading(scn))
1219 		return;
1220 
1221 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1222 	if (hif_state->fake_sleep) {
1223 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1224 						    - hif_state->sleep_ticks);
1225 		if (!hif_state->verified_awake &&
1226 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1227 			if (!qdf_atomic_read(&scn->link_suspended)) {
1228 				soc_wake_reset(scn);
1229 				hif_state->fake_sleep = false;
1230 			}
1231 		} else {
1232 			qdf_timer_stop(&hif_state->sleep_timer);
1233 			qdf_timer_start(&hif_state->sleep_timer,
1234 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1235 		}
1236 	}
1237 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1238 }
1239 
1240 #define HIF_HIA_MAX_POLL_LOOP    1000000
1241 #define HIF_HIA_POLLING_DELAY_MS 10
1242 
1243 #ifdef QCA_HIF_HIA_EXTND
1244 
1245 static void hif_set_hia_extnd(struct hif_softc *scn)
1246 {
1247 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1248 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1249 	uint32_t target_type = tgt_info->target_type;
1250 
1251 	HIF_TRACE("%s: E", __func__);
1252 
1253 	if ((target_type == TARGET_TYPE_AR900B) ||
1254 			target_type == TARGET_TYPE_QCA9984 ||
1255 			target_type == TARGET_TYPE_QCA9888) {
1256 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1257 		 * in RTC space
1258 		 */
1259 		tgt_info->target_revision
1260 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1261 					+ CHIP_ID_ADDRESS));
1262 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1263 			  target_type, tgt_info->target_revision);
1264 	}
1265 
1266 	{
1267 		uint32_t flag2_value = 0;
1268 		uint32_t flag2_targ_addr =
1269 			host_interest_item_address(target_type,
1270 			offsetof(struct host_interest_s, hi_skip_clock_init));
1271 
1272 		if ((ar900b_20_targ_clk != -1) &&
1273 			(frac != -1) && (intval != -1)) {
1274 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1275 				&flag2_value);
1276 			qdf_print("\n Setting clk_override");
1277 			flag2_value |= CLOCK_OVERRIDE;
1278 
1279 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1280 					flag2_value);
1281 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1282 		} else {
1283 			qdf_print("\n CLOCK PLL skipped");
1284 		}
1285 	}
1286 
1287 	if (target_type == TARGET_TYPE_AR900B
1288 			|| target_type == TARGET_TYPE_QCA9984
1289 			|| target_type == TARGET_TYPE_QCA9888) {
1290 
1291 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1292 		 * this would be supplied through module parameters,
1293 		 * if not supplied assumed default or same behavior as 1.0.
1294 		 * Assume 1.0 clock can't be tuned, reset to defaults
1295 		 */
1296 
1297 		qdf_print(KERN_INFO
1298 			  "%s: setting the target pll frac %x intval %x",
1299 			  __func__, frac, intval);
1300 
1301 		/* do not touch frac, and int val, let them be default -1,
1302 		 * if desired, host can supply these through module params
1303 		 */
1304 		if (frac != -1 || intval != -1) {
1305 			uint32_t flag2_value = 0;
1306 			uint32_t flag2_targ_addr;
1307 
1308 			flag2_targ_addr =
1309 				host_interest_item_address(target_type,
1310 				offsetof(struct host_interest_s,
1311 					hi_clock_info));
1312 			hif_diag_read_access(hif_hdl,
1313 				flag2_targ_addr, &flag2_value);
1314 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1315 				  flag2_value);
1316 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1317 			qdf_print("\n INT Val %x  Address %x",
1318 				  intval, flag2_value + 4);
1319 			hif_diag_write_access(hif_hdl,
1320 					flag2_value + 4, intval);
1321 		} else {
1322 			qdf_print(KERN_INFO
1323 				  "%s: no frac provided, skipping pre-configuring PLL",
1324 				  __func__);
1325 		}
1326 
1327 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1328 		if ((target_type == TARGET_TYPE_AR900B)
1329 			&& (tgt_info->target_revision == AR900B_REV_2)
1330 			&& ar900b_20_targ_clk != -1) {
1331 			uint32_t flag2_value = 0;
1332 			uint32_t flag2_targ_addr;
1333 
1334 			flag2_targ_addr
1335 				= host_interest_item_address(target_type,
1336 					offsetof(struct host_interest_s,
1337 					hi_desired_cpu_speed_hz));
1338 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1339 							&flag2_value);
1340 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1341 				  flag2_value);
1342 			hif_diag_write_access(hif_hdl, flag2_value,
1343 				ar900b_20_targ_clk/*300000000u*/);
1344 		} else if (target_type == TARGET_TYPE_QCA9888) {
1345 			uint32_t flag2_targ_addr;
1346 
1347 			if (200000000u != qca9888_20_targ_clk) {
1348 				qca9888_20_targ_clk = 300000000u;
1349 				/* Setting the target clock speed to 300 mhz */
1350 			}
1351 
1352 			flag2_targ_addr
1353 				= host_interest_item_address(target_type,
1354 					offsetof(struct host_interest_s,
1355 					hi_desired_cpu_speed_hz));
1356 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1357 				qca9888_20_targ_clk);
1358 		} else {
1359 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1360 				  __func__);
1361 		}
1362 	} else {
1363 		if (frac != -1 || intval != -1) {
1364 			uint32_t flag2_value = 0;
1365 			uint32_t flag2_targ_addr =
1366 				host_interest_item_address(target_type,
1367 					offsetof(struct host_interest_s,
1368 							hi_clock_info));
1369 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1370 						&flag2_value);
1371 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1372 				  flag2_value);
1373 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1374 			qdf_print("\n INT Val %x  Address %x", intval,
1375 				  flag2_value + 4);
1376 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1377 					      intval);
1378 		}
1379 	}
1380 }
1381 
1382 #else
1383 
1384 static void hif_set_hia_extnd(struct hif_softc *scn)
1385 {
1386 }
1387 
1388 #endif
1389 
1390 /**
1391  * hif_set_hia() - fill out the host interest area
1392  * @scn: hif context
1393  *
1394  * This is replaced by hif_wlan_enable for integrated targets.
1395  * This fills out the host interest area.  The firmware will
1396  * process these memory addresses when it is first brought out
1397  * of reset.
1398  *
1399  * Return: 0 for success.
1400  */
1401 static int hif_set_hia(struct hif_softc *scn)
1402 {
1403 	QDF_STATUS rv;
1404 	uint32_t interconnect_targ_addr = 0;
1405 	uint32_t pcie_state_targ_addr = 0;
1406 	uint32_t pipe_cfg_targ_addr = 0;
1407 	uint32_t svc_to_pipe_map = 0;
1408 	uint32_t pcie_config_flags = 0;
1409 	uint32_t flag2_value = 0;
1410 	uint32_t flag2_targ_addr = 0;
1411 #ifdef QCA_WIFI_3_0
1412 	uint32_t host_interest_area = 0;
1413 	uint8_t i;
1414 #else
1415 	uint32_t ealloc_value = 0;
1416 	uint32_t ealloc_targ_addr = 0;
1417 	uint8_t banks_switched = 1;
1418 	uint32_t chip_id;
1419 #endif
1420 	uint32_t pipe_cfg_addr;
1421 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1422 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1423 	uint32_t target_type = tgt_info->target_type;
1424 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1425 	static struct CE_pipe_config *target_ce_config;
1426 	struct service_to_pipe *target_service_to_ce_map;
1427 
1428 	HIF_TRACE("%s: E", __func__);
1429 
1430 	hif_get_target_ce_config(scn,
1431 				 &target_ce_config, &target_ce_config_sz,
1432 				 &target_service_to_ce_map,
1433 				 &target_service_to_ce_map_sz,
1434 				 NULL, NULL);
1435 
1436 	if (ADRASTEA_BU)
1437 		return QDF_STATUS_SUCCESS;
1438 
1439 #ifdef QCA_WIFI_3_0
1440 	i = 0;
1441 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1442 		host_interest_area = hif_read32_mb(scn, scn->mem +
1443 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1444 		if ((host_interest_area & 0x01) == 0) {
1445 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1446 			host_interest_area = 0;
1447 			i++;
1448 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1449 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1450 		} else {
1451 			host_interest_area &= (~0x01);
1452 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1453 			break;
1454 		}
1455 	}
1456 
1457 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1458 		HIF_ERROR("%s: hia polling timeout", __func__);
1459 		return -EIO;
1460 	}
1461 
1462 	if (host_interest_area == 0) {
1463 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1464 		return -EIO;
1465 	}
1466 
1467 	interconnect_targ_addr = host_interest_area +
1468 			offsetof(struct host_interest_area_t,
1469 			hi_interconnect_state);
1470 
1471 	flag2_targ_addr = host_interest_area +
1472 			offsetof(struct host_interest_area_t, hi_option_flag2);
1473 
1474 #else
1475 	interconnect_targ_addr = hif_hia_item_address(target_type,
1476 		offsetof(struct host_interest_s, hi_interconnect_state));
1477 	ealloc_targ_addr = hif_hia_item_address(target_type,
1478 		offsetof(struct host_interest_s, hi_early_alloc));
1479 	flag2_targ_addr = hif_hia_item_address(target_type,
1480 		offsetof(struct host_interest_s, hi_option_flag2));
1481 #endif
1482 	/* Supply Target-side CE configuration */
1483 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1484 			  &pcie_state_targ_addr);
1485 	if (rv != QDF_STATUS_SUCCESS) {
1486 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1487 			  __func__, interconnect_targ_addr, rv);
1488 		goto done;
1489 	}
1490 	if (pcie_state_targ_addr == 0) {
1491 		rv = QDF_STATUS_E_FAILURE;
1492 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1493 		goto done;
1494 	}
1495 	pipe_cfg_addr = pcie_state_targ_addr +
1496 			  offsetof(struct pcie_state_s,
1497 			  pipe_cfg_addr);
1498 	rv = hif_diag_read_access(hif_hdl,
1499 			  pipe_cfg_addr,
1500 			  &pipe_cfg_targ_addr);
1501 	if (rv != QDF_STATUS_SUCCESS) {
1502 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1503 			__func__, pipe_cfg_addr, rv);
1504 		goto done;
1505 	}
1506 	if (pipe_cfg_targ_addr == 0) {
1507 		rv = QDF_STATUS_E_FAILURE;
1508 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1509 		goto done;
1510 	}
1511 
1512 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1513 			(uint8_t *) target_ce_config,
1514 			target_ce_config_sz);
1515 
1516 	if (rv != QDF_STATUS_SUCCESS) {
1517 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1518 		goto done;
1519 	}
1520 
1521 	rv = hif_diag_read_access(hif_hdl,
1522 			  pcie_state_targ_addr +
1523 			  offsetof(struct pcie_state_s,
1524 			   svc_to_pipe_map),
1525 			  &svc_to_pipe_map);
1526 	if (rv != QDF_STATUS_SUCCESS) {
1527 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1528 		goto done;
1529 	}
1530 	if (svc_to_pipe_map == 0) {
1531 		rv = QDF_STATUS_E_FAILURE;
1532 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1533 		goto done;
1534 	}
1535 
1536 	rv = hif_diag_write_mem(hif_hdl,
1537 			svc_to_pipe_map,
1538 			(uint8_t *) target_service_to_ce_map,
1539 			target_service_to_ce_map_sz);
1540 	if (rv != QDF_STATUS_SUCCESS) {
1541 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1542 		goto done;
1543 	}
1544 
1545 	rv = hif_diag_read_access(hif_hdl,
1546 			pcie_state_targ_addr +
1547 			offsetof(struct pcie_state_s,
1548 			config_flags),
1549 			&pcie_config_flags);
1550 	if (rv != QDF_STATUS_SUCCESS) {
1551 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1552 		goto done;
1553 	}
1554 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1555 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1556 #else
1557 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1558 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1559 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1560 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1561 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1562 #endif
1563 	rv = hif_diag_write_mem(hif_hdl,
1564 			pcie_state_targ_addr +
1565 			offsetof(struct pcie_state_s,
1566 			config_flags),
1567 			(uint8_t *) &pcie_config_flags,
1568 			sizeof(pcie_config_flags));
1569 	if (rv != QDF_STATUS_SUCCESS) {
1570 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1571 		goto done;
1572 	}
1573 
1574 #ifndef QCA_WIFI_3_0
1575 	/* configure early allocation */
1576 	ealloc_targ_addr = hif_hia_item_address(target_type,
1577 						offsetof(
1578 						struct host_interest_s,
1579 						hi_early_alloc));
1580 
1581 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1582 			&ealloc_value);
1583 	if (rv != QDF_STATUS_SUCCESS) {
1584 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1585 		goto done;
1586 	}
1587 
1588 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1589 	ealloc_value |=
1590 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1591 		 HI_EARLY_ALLOC_MAGIC_MASK);
1592 
1593 	rv = hif_diag_read_access(hif_hdl,
1594 			  CHIP_ID_ADDRESS |
1595 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1596 	if (rv != QDF_STATUS_SUCCESS) {
1597 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1598 		goto done;
1599 	}
1600 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1601 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1602 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1603 		case 0x2:       /* ROME 1.3 */
1604 			/* 2 banks are switched to IRAM */
1605 			banks_switched = 2;
1606 			break;
1607 		case 0x4:       /* ROME 2.1 */
1608 		case 0x5:       /* ROME 2.2 */
1609 			banks_switched = 6;
1610 			break;
1611 		case 0x8:       /* ROME 3.0 */
1612 		case 0x9:       /* ROME 3.1 */
1613 		case 0xA:       /* ROME 3.2 */
1614 			banks_switched = 9;
1615 			break;
1616 		case 0x0:       /* ROME 1.0 */
1617 		case 0x1:       /* ROME 1.1 */
1618 		default:
1619 			/* 3 banks are switched to IRAM */
1620 			banks_switched = 3;
1621 			break;
1622 		}
1623 	}
1624 
1625 	ealloc_value |=
1626 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1627 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1628 
1629 	rv = hif_diag_write_access(hif_hdl,
1630 				ealloc_targ_addr,
1631 				ealloc_value);
1632 	if (rv != QDF_STATUS_SUCCESS) {
1633 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1634 		goto done;
1635 	}
1636 #endif
1637 	if ((target_type == TARGET_TYPE_AR900B)
1638 			|| (target_type == TARGET_TYPE_QCA9984)
1639 			|| (target_type == TARGET_TYPE_QCA9888)
1640 			|| (target_type == TARGET_TYPE_AR9888)) {
1641 		hif_set_hia_extnd(scn);
1642 	}
1643 
1644 	/* Tell Target to proceed with initialization */
1645 	flag2_targ_addr = hif_hia_item_address(target_type,
1646 						offsetof(
1647 						struct host_interest_s,
1648 						hi_option_flag2));
1649 
1650 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1651 			  &flag2_value);
1652 	if (rv != QDF_STATUS_SUCCESS) {
1653 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1654 		goto done;
1655 	}
1656 
1657 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1658 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1659 			   flag2_value);
1660 	if (rv != QDF_STATUS_SUCCESS) {
1661 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1662 		goto done;
1663 	}
1664 
1665 	hif_wake_target_cpu(scn);
1666 
1667 done:
1668 
1669 	return rv;
1670 }
1671 
1672 /**
1673  * hif_bus_configure() - configure the pcie bus
1674  * @hif_sc: pointer to the hif context.
1675  *
1676  * return: 0 for success. nonzero for failure.
1677  */
1678 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1679 {
1680 	int status = 0;
1681 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1682 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1683 
1684 	hif_ce_prepare_config(hif_sc);
1685 
1686 	/* initialize sleep state adjust variables */
1687 	hif_state->sleep_timer_init = true;
1688 	hif_state->keep_awake_count = 0;
1689 	hif_state->fake_sleep = false;
1690 	hif_state->sleep_ticks = 0;
1691 
1692 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1693 			       hif_sleep_entry, (void *)hif_state,
1694 			       QDF_TIMER_TYPE_WAKE_APPS);
1695 	hif_state->sleep_timer_init = true;
1696 
1697 	status = hif_wlan_enable(hif_sc);
1698 	if (status) {
1699 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1700 			  __func__, status);
1701 		goto timer_free;
1702 	}
1703 
1704 	A_TARGET_ACCESS_LIKELY(hif_sc);
1705 
1706 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1707 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1708 	    !ce_srng_based(hif_sc)) {
1709 		/*
1710 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1711 		 * prevent sleep when we want to keep firmware always awake
1712 		 * note: when we want to keep firmware always awake,
1713 		 *       hif_target_sleep_state_adjust will point to a dummy
1714 		 *       function, and hif_pci_target_sleep_state_adjust must
1715 		 *       be called instead.
1716 		 * note: bus type check is here because AHB bus is reusing
1717 		 *       hif_pci_bus_configure code.
1718 		 */
1719 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1720 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1721 					false, true) < 0) {
1722 				status = -EACCES;
1723 				goto disable_wlan;
1724 			}
1725 		}
1726 	}
1727 
1728 	/* todo: consider replacing this with an srng field */
1729 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1730 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1731 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1732 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1733 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1734 		hif_sc->per_ce_irq = true;
1735 	}
1736 
1737 	status = hif_config_ce(hif_sc);
1738 	if (status)
1739 		goto disable_wlan;
1740 
1741 	if (hif_needs_bmi(hif_osc)) {
1742 		status = hif_set_hia(hif_sc);
1743 		if (status)
1744 			goto unconfig_ce;
1745 
1746 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
1747 
1748 	}
1749 
1750 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1751 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1752 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1753 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1754 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
1755 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
1756 						__func__);
1757 	else {
1758 		status = hif_configure_irq(hif_sc);
1759 		if (status < 0)
1760 			goto unconfig_ce;
1761 	}
1762 
1763 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1764 
1765 	return status;
1766 
1767 unconfig_ce:
1768 	hif_unconfig_ce(hif_sc);
1769 disable_wlan:
1770 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1771 	hif_wlan_disable(hif_sc);
1772 
1773 timer_free:
1774 	qdf_timer_stop(&hif_state->sleep_timer);
1775 	qdf_timer_free(&hif_state->sleep_timer);
1776 	hif_state->sleep_timer_init = false;
1777 
1778 	HIF_ERROR("%s: failed, status = %d", __func__, status);
1779 	return status;
1780 }
1781 
1782 /**
1783  * hif_bus_close(): hif_bus_close
1784  *
1785  * Return: n/a
1786  */
1787 void hif_pci_close(struct hif_softc *hif_sc)
1788 {
1789 	hif_pm_runtime_close(hif_sc);
1790 	hif_ce_close(hif_sc);
1791 }
1792 
1793 #define BAR_NUM 0
1794 
1795 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
1796 				struct pci_dev *pdev,
1797 				const struct pci_device_id *id)
1798 {
1799 	void __iomem *mem;
1800 	int ret = 0;
1801 	uint16_t device_id = 0;
1802 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1803 
1804 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1805 	if (device_id != id->device)  {
1806 		HIF_ERROR(
1807 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
1808 		   __func__, device_id, id->device);
1809 		/* pci link is down, so returing with error code */
1810 		return -EIO;
1811 	}
1812 
1813 	/* FIXME: temp. commenting out assign_resource
1814 	 * call for dev_attach to work on 2.6.38 kernel
1815 	 */
1816 #if (!defined(__LINUX_ARM_ARCH__))
1817 	if (pci_assign_resource(pdev, BAR_NUM)) {
1818 		HIF_ERROR("%s: pci_assign_resource error", __func__);
1819 		return -EIO;
1820 	}
1821 #endif
1822 	if (pci_enable_device(pdev)) {
1823 		HIF_ERROR("%s: pci_enable_device error",
1824 			   __func__);
1825 		return -EIO;
1826 	}
1827 
1828 	/* Request MMIO resources */
1829 	ret = pci_request_region(pdev, BAR_NUM, "ath");
1830 	if (ret) {
1831 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
1832 		ret = -EIO;
1833 		goto err_region;
1834 	}
1835 
1836 #ifdef CONFIG_ARM_LPAE
1837 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1838 	 * for 32 bits device also.
1839 	 */
1840 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1841 	if (ret) {
1842 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
1843 		goto err_dma;
1844 	}
1845 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1846 	if (ret) {
1847 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
1848 		goto err_dma;
1849 	}
1850 #else
1851 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1852 	if (ret) {
1853 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
1854 		goto err_dma;
1855 	}
1856 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1857 	if (ret) {
1858 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
1859 			   __func__);
1860 		goto err_dma;
1861 	}
1862 #endif
1863 
1864 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1865 
1866 	/* Set bus master bit in PCI_COMMAND to enable DMA */
1867 	pci_set_master(pdev);
1868 
1869 	/* Arrange for access to Target SoC registers. */
1870 	mem = pci_iomap(pdev, BAR_NUM, 0);
1871 	if (!mem) {
1872 		HIF_ERROR("%s: PCI iomap error", __func__);
1873 		ret = -EIO;
1874 		goto err_iomap;
1875 	}
1876 
1877 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
1878 
1879 	sc->mem = mem;
1880 
1881 	/* Hawkeye emulation specific change */
1882 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
1883 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
1884 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
1885 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
1886 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
1887 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
1888 		mem = mem + 0x0c000000;
1889 		sc->mem = mem;
1890 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
1891 			__func__, sc->mem);
1892 	}
1893 
1894 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
1895 	ol_sc->mem = mem;
1896 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
1897 	sc->pci_enabled = true;
1898 	return ret;
1899 
1900 err_iomap:
1901 	pci_clear_master(pdev);
1902 err_dma:
1903 	pci_release_region(pdev, BAR_NUM);
1904 err_region:
1905 	pci_disable_device(pdev);
1906 	return ret;
1907 }
1908 
1909 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
1910 			      struct pci_dev *pdev,
1911 			      const struct pci_device_id *id)
1912 {
1913 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1914 	sc->pci_enabled = true;
1915 	return 0;
1916 }
1917 
1918 
1919 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
1920 {
1921 	pci_disable_msi(sc->pdev);
1922 	pci_iounmap(sc->pdev, sc->mem);
1923 	pci_clear_master(sc->pdev);
1924 	pci_release_region(sc->pdev, BAR_NUM);
1925 	pci_disable_device(sc->pdev);
1926 }
1927 
1928 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
1929 
1930 static void hif_disable_pci(struct hif_pci_softc *sc)
1931 {
1932 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1933 
1934 	if (!ol_sc) {
1935 		HIF_ERROR("%s: ol_sc = NULL", __func__);
1936 		return;
1937 	}
1938 	hif_pci_device_reset(sc);
1939 	sc->hif_pci_deinit(sc);
1940 
1941 	sc->mem = NULL;
1942 	ol_sc->mem = NULL;
1943 }
1944 
1945 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1946 {
1947 	int ret = 0;
1948 	int targ_awake_limit = 500;
1949 #ifndef QCA_WIFI_3_0
1950 	uint32_t fw_indicator;
1951 #endif
1952 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1953 
1954 	/*
1955 	 * Verify that the Target was started cleanly.*
1956 	 * The case where this is most likely is with an AUX-powered
1957 	 * Target and a Host in WoW mode. If the Host crashes,
1958 	 * loses power, or is restarted (without unloading the driver)
1959 	 * then the Target is left (aux) powered and running.  On a
1960 	 * subsequent driver load, the Target is in an unexpected state.
1961 	 * We try to catch that here in order to reset the Target and
1962 	 * retry the probe.
1963 	 */
1964 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1965 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1966 	while (!hif_targ_is_awake(scn, sc->mem)) {
1967 		if (0 == targ_awake_limit) {
1968 			HIF_ERROR("%s: target awake timeout", __func__);
1969 			ret = -EAGAIN;
1970 			goto end;
1971 		}
1972 		qdf_mdelay(1);
1973 		targ_awake_limit--;
1974 	}
1975 
1976 #if PCIE_BAR0_READY_CHECKING
1977 	{
1978 		int wait_limit = 200;
1979 		/* Synchronization point: wait the BAR0 is configured */
1980 		while (wait_limit-- &&
1981 			   !(hif_read32_mb(sc, c->mem +
1982 					  PCIE_LOCAL_BASE_ADDRESS +
1983 					  PCIE_SOC_RDY_STATUS_ADDRESS)
1984 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
1985 			qdf_mdelay(10);
1986 		}
1987 		if (wait_limit < 0) {
1988 			/* AR6320v1 doesn't support checking of BAR0
1989 			 * configuration, takes one sec to wait BAR0 ready
1990 			 */
1991 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
1992 				    __func__);
1993 		}
1994 	}
1995 #endif
1996 
1997 #ifndef QCA_WIFI_3_0
1998 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
1999 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2000 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2001 
2002 	if (fw_indicator & FW_IND_INITIALIZED) {
2003 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2004 			   __func__);
2005 		ret = -EAGAIN;
2006 		goto end;
2007 	}
2008 #endif
2009 
2010 end:
2011 	return ret;
2012 }
2013 
2014 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2015 {
2016 	int ret = 0;
2017 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2018 	uint32_t target_type = scn->target_info.target_type;
2019 
2020 	HIF_TRACE("%s: E", __func__);
2021 
2022 	/* do notn support MSI or MSI IRQ failed */
2023 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2024 	ret = request_irq(sc->pdev->irq,
2025 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2026 			  "wlan_pci", sc);
2027 	if (ret) {
2028 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2029 		goto end;
2030 	}
2031 	scn->wake_irq = sc->pdev->irq;
2032 	/* Use sc->irq instead of sc->pdev-irq
2033 	 * platform_device pdev doesn't have an irq field
2034 	 */
2035 	sc->irq = sc->pdev->irq;
2036 	/* Use Legacy PCI Interrupts */
2037 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2038 		  PCIE_INTR_ENABLE_ADDRESS),
2039 		  HOST_GROUP0_MASK);
2040 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2041 			       PCIE_INTR_ENABLE_ADDRESS));
2042 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2043 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2044 
2045 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2046 			(target_type == TARGET_TYPE_AR900B)  ||
2047 			(target_type == TARGET_TYPE_QCA9984) ||
2048 			(target_type == TARGET_TYPE_AR9888) ||
2049 			(target_type == TARGET_TYPE_QCA9888) ||
2050 			(target_type == TARGET_TYPE_AR6320V1) ||
2051 			(target_type == TARGET_TYPE_AR6320V2) ||
2052 			(target_type == TARGET_TYPE_AR6320V3)) {
2053 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2054 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2055 	}
2056 end:
2057 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2058 			  "%s: X, ret = %d", __func__, ret);
2059 	return ret;
2060 }
2061 
2062 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2063 {
2064 	int ret;
2065 	int ce_id, irq;
2066 	uint32_t msi_data_start;
2067 	uint32_t msi_data_count;
2068 	uint32_t msi_irq_start;
2069 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2070 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2071 
2072 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2073 					    &msi_data_count, &msi_data_start,
2074 					    &msi_irq_start);
2075 	if (ret)
2076 		return ret;
2077 
2078 	/* needs to match the ce_id -> irq data mapping
2079 	 * used in the srng parameter configuration
2080 	 */
2081 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2082 		unsigned int msi_data;
2083 
2084 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2085 			continue;
2086 
2087 		if (!ce_sc->tasklets[ce_id].inited)
2088 			continue;
2089 
2090 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2091 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2092 
2093 		hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2094 			  ce_id, msi_data, irq);
2095 
2096 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2097 	}
2098 
2099 	return ret;
2100 }
2101 
2102 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2103 {
2104 	int i, j, irq;
2105 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2106 	struct hif_exec_context *hif_ext_group;
2107 
2108 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2109 		hif_ext_group = hif_state->hif_ext_group[i];
2110 		if (hif_ext_group->irq_requested) {
2111 			hif_ext_group->irq_requested = false;
2112 			for (j = 0; j < hif_ext_group->numirq; j++) {
2113 				irq = hif_ext_group->os_irq[j];
2114 				if (scn->irq_unlazy_disable)
2115 					irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
2116 				pfrm_free_irq(scn->qdf_dev->dev,
2117 					      irq, hif_ext_group);
2118 			}
2119 			hif_ext_group->numirq = 0;
2120 		}
2121 	}
2122 }
2123 
2124 /**
2125  * hif_nointrs(): disable IRQ
2126  *
2127  * This function stops interrupt(s)
2128  *
2129  * @scn: struct hif_softc
2130  *
2131  * Return: none
2132  */
2133 void hif_pci_nointrs(struct hif_softc *scn)
2134 {
2135 	int i, ret;
2136 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2137 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2138 
2139 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2140 
2141 	if (scn->request_irq_done == false)
2142 		return;
2143 
2144 	hif_pci_deconfigure_grp_irq(scn);
2145 
2146 	ret = hif_ce_srng_msi_free_irq(scn);
2147 	if (ret != -EINVAL) {
2148 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2149 
2150 		if (scn->wake_irq)
2151 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2152 		scn->wake_irq = 0;
2153 	} else if (sc->num_msi_intrs > 0) {
2154 		/* MSI interrupt(s) */
2155 		for (i = 0; i < sc->num_msi_intrs; i++)
2156 			free_irq(sc->irq + i, sc);
2157 		sc->num_msi_intrs = 0;
2158 	} else {
2159 		/* Legacy PCI line interrupt
2160 		 * Use sc->irq instead of sc->pdev-irq
2161 		 * platform_device pdev doesn't have an irq field
2162 		 */
2163 		free_irq(sc->irq, sc);
2164 	}
2165 	scn->request_irq_done = false;
2166 }
2167 
2168 static inline
2169 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2170 {
2171 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2172 		return true;
2173 	else
2174 		return false;
2175 }
2176 /**
2177  * hif_disable_bus(): hif_disable_bus
2178  *
2179  * This function disables the bus
2180  *
2181  * @bdev: bus dev
2182  *
2183  * Return: none
2184  */
2185 void hif_pci_disable_bus(struct hif_softc *scn)
2186 {
2187 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2188 	struct pci_dev *pdev;
2189 	void __iomem *mem;
2190 	struct hif_target_info *tgt_info = &scn->target_info;
2191 
2192 	/* Attach did not succeed, all resources have been
2193 	 * freed in error handler
2194 	 */
2195 	if (!sc)
2196 		return;
2197 
2198 	pdev = sc->pdev;
2199 	if (hif_pci_default_link_up(tgt_info)) {
2200 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2201 
2202 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2203 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2204 			       HOST_GROUP0_MASK);
2205 	}
2206 
2207 #if defined(CPU_WARM_RESET_WAR)
2208 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2209 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2210 	 * verified for AR9888_REV1
2211 	 */
2212 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2213 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2214 		hif_pci_device_warm_reset(sc);
2215 	else
2216 		hif_pci_device_reset(sc);
2217 #else
2218 	hif_pci_device_reset(sc);
2219 #endif
2220 	mem = (void __iomem *)sc->mem;
2221 	if (mem) {
2222 		hif_dump_pipe_debug_count(scn);
2223 		if (scn->athdiag_procfs_inited) {
2224 			athdiag_procfs_remove();
2225 			scn->athdiag_procfs_inited = false;
2226 		}
2227 		sc->hif_pci_deinit(sc);
2228 		scn->mem = NULL;
2229 	}
2230 	HIF_INFO("%s: X", __func__);
2231 }
2232 
2233 #ifdef FEATURE_RUNTIME_PM
2234 /**
2235  * hif_pci_get_rpm_ctx() - Map corresponding hif_runtime_pm_ctx
2236  * @scn: hif context
2237  *
2238  * This function will map and return the corresponding
2239  * hif_runtime_pm_ctx based on pcie interface.
2240  *
2241  * Return: struct hif_runtime_pm_ctx pointer
2242  */
2243 struct hif_runtime_pm_ctx *hif_pci_get_rpm_ctx(struct hif_softc *scn)
2244 {
2245 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2246 
2247 	return &sc->rpm_ctx;
2248 }
2249 
2250 /**
2251  * hif_pci_get_dev() - Map corresponding device structure
2252  * @scn: hif context
2253  *
2254  * This function will map and return the corresponding
2255  * device structure based on pcie interface.
2256  *
2257  * Return: struct device pointer
2258  */
2259 struct device *hif_pci_get_dev(struct hif_softc *scn)
2260 {
2261 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2262 
2263 	return sc->dev;
2264 }
2265 #endif
2266 
2267 #define OL_ATH_PCI_PM_CONTROL 0x44
2268 
2269 #if defined(CONFIG_PCI_MSM)
2270 /**
2271  * hif_bus_prevent_linkdown(): allow or permit linkdown
2272  * @flag: true prevents linkdown, false allows
2273  *
2274  * Calls into the platform driver to vote against taking down the
2275  * pcie link.
2276  *
2277  * Return: n/a
2278  */
2279 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2280 {
2281 	int errno;
2282 
2283 	HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2284 	hif_runtime_prevent_linkdown(scn, flag);
2285 
2286 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2287 	if (errno)
2288 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2289 			  __func__, errno);
2290 }
2291 #else
2292 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2293 {
2294 	HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
2295 	hif_runtime_prevent_linkdown(scn, flag);
2296 }
2297 #endif
2298 
2299 /**
2300  * hif_pci_bus_suspend(): prepare hif for suspend
2301  *
2302  * Return: Errno
2303  */
2304 int hif_pci_bus_suspend(struct hif_softc *scn)
2305 {
2306 	QDF_STATUS ret;
2307 
2308 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2309 
2310 	ret = hif_try_complete_tasks(scn);
2311 	if (QDF_IS_STATUS_ERROR(ret)) {
2312 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2313 		return -EBUSY;
2314 	}
2315 
2316 	/* Stop the HIF Sleep Timer */
2317 	hif_cancel_deferred_target_sleep(scn);
2318 
2319 	return 0;
2320 }
2321 
2322 /**
2323  * __hif_check_link_status() - API to check if PCIe link is active/not
2324  * @scn: HIF Context
2325  *
2326  * API reads the PCIe config space to verify if PCIe link training is
2327  * successful or not.
2328  *
2329  * Return: Success/Failure
2330  */
2331 static int __hif_check_link_status(struct hif_softc *scn)
2332 {
2333 	uint16_t dev_id = 0;
2334 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2335 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2336 
2337 	if (!sc) {
2338 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2339 		return -EINVAL;
2340 	}
2341 
2342 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2343 
2344 	if (dev_id == sc->devid)
2345 		return 0;
2346 
2347 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2348 	       __func__, dev_id);
2349 
2350 	scn->recovery = true;
2351 
2352 	if (cbk && cbk->set_recovery_in_progress)
2353 		cbk->set_recovery_in_progress(cbk->context, true);
2354 	else
2355 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2356 
2357 	pld_is_pci_link_down(sc->dev);
2358 	return -EACCES;
2359 }
2360 
2361 /**
2362  * hif_pci_bus_resume(): prepare hif for resume
2363  *
2364  * Return: Errno
2365  */
2366 int hif_pci_bus_resume(struct hif_softc *scn)
2367 {
2368 	int errno;
2369 
2370 	errno = __hif_check_link_status(scn);
2371 	if (errno)
2372 		return errno;
2373 
2374 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2375 
2376 	return 0;
2377 }
2378 
2379 /**
2380  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2381  * @scn: hif context
2382  *
2383  * Ensure that if we received the wakeup message before the irq
2384  * was disabled that the message is pocessed before suspending.
2385  *
2386  * Return: -EBUSY if we fail to flush the tasklets.
2387  */
2388 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2389 {
2390 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2391 		qdf_atomic_set(&scn->link_suspended, 1);
2392 
2393 	hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
2394 
2395 	return 0;
2396 }
2397 
2398 /**
2399  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2400  * @scn: hif context
2401  *
2402  * Ensure that if we received the wakeup message before the irq
2403  * was disabled that the message is pocessed before suspending.
2404  *
2405  * Return: -EBUSY if we fail to flush the tasklets.
2406  */
2407 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2408 {
2409 	hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
2410 
2411 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2412 		qdf_atomic_set(&scn->link_suspended, 0);
2413 
2414 	return 0;
2415 }
2416 
2417 #if CONFIG_PCIE_64BIT_MSI
2418 static void hif_free_msi_ctx(struct hif_softc *scn)
2419 {
2420 	struct hif_pci_softc *sc = scn->hif_sc;
2421 	struct hif_msi_info *info = &sc->msi_info;
2422 	struct device *dev = scn->qdf_dev->dev;
2423 
2424 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2425 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2426 	info->magic = NULL;
2427 	info->magic_dma = 0;
2428 }
2429 #else
2430 static void hif_free_msi_ctx(struct hif_softc *scn)
2431 {
2432 }
2433 #endif
2434 
2435 void hif_pci_disable_isr(struct hif_softc *scn)
2436 {
2437 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2438 
2439 	hif_exec_kill(&scn->osc);
2440 	hif_nointrs(scn);
2441 	hif_free_msi_ctx(scn);
2442 	/* Cancel the pending tasklet */
2443 	ce_tasklet_kill(scn);
2444 	tasklet_kill(&sc->intr_tq);
2445 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2446 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2447 }
2448 
2449 /* Function to reset SoC */
2450 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2451 {
2452 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2453 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2454 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2455 
2456 #if defined(CPU_WARM_RESET_WAR)
2457 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2458 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2459 	 * verified for AR9888_REV1
2460 	 */
2461 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2462 		hif_pci_device_warm_reset(sc);
2463 	else
2464 		hif_pci_device_reset(sc);
2465 #else
2466 	hif_pci_device_reset(sc);
2467 #endif
2468 }
2469 
2470 #ifdef CONFIG_PCI_MSM
2471 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2472 {
2473 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2474 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2475 }
2476 #else
2477 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2478 #endif
2479 
2480 /**
2481  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2482  * @sc: HIF PCIe Context
2483  *
2484  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2485  *
2486  * Return: Failure to caller
2487  */
2488 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2489 {
2490 	uint16_t val = 0;
2491 	uint32_t bar = 0;
2492 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2493 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2494 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2495 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2496 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2497 	A_target_id_t pci_addr = scn->mem;
2498 
2499 	HIF_ERROR("%s: keep_awake_count = %d",
2500 			__func__, hif_state->keep_awake_count);
2501 
2502 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2503 
2504 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
2505 
2506 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2507 
2508 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
2509 
2510 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
2511 
2512 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
2513 
2514 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
2515 
2516 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
2517 
2518 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2519 
2520 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
2521 
2522 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
2523 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2524 						PCIE_SOC_WAKE_ADDRESS));
2525 
2526 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
2527 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2528 							RTC_STATE_ADDRESS));
2529 
2530 	HIF_ERROR("%s:error, wakeup target", __func__);
2531 	hif_msm_pcie_debug_info(sc);
2532 
2533 	if (!cfg->enable_self_recovery)
2534 		QDF_BUG(0);
2535 
2536 	scn->recovery = true;
2537 
2538 	if (cbk->set_recovery_in_progress)
2539 		cbk->set_recovery_in_progress(cbk->context, true);
2540 
2541 	pld_is_pci_link_down(sc->dev);
2542 	return -EACCES;
2543 }
2544 
2545 /*
2546  * For now, we use simple on-demand sleep/wake.
2547  * Some possible improvements:
2548  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2549  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2550  *   Careful, though, these functions may be used by
2551  *  interrupt handlers ("atomic")
2552  *  -Don't use host_reg_table for this code; instead use values directly
2553  *  -Use a separate timer to track activity and allow Target to sleep only
2554  *   if it hasn't done anything for a while; may even want to delay some
2555  *   processing for a short while in order to "batch" (e.g.) transmit
2556  *   requests with completion processing into "windows of up time".  Costs
2557  *   some performance, but improves power utilization.
2558  *  -On some platforms, it might be possible to eliminate explicit
2559  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
2560  *   recover from the failure by forcing the Target awake.
2561  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
2562  *   overhead in some cases. Perhaps this makes more sense when
2563  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2564  *   disabled.
2565  *  -It is possible to compile this code out and simply force the Target
2566  *   to remain awake.  That would yield optimal performance at the cost of
2567  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2568  *
2569  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2570  */
2571 /**
2572  * hif_target_sleep_state_adjust() - on-demand sleep/wake
2573  * @scn: hif_softc pointer.
2574  * @sleep_ok: bool
2575  * @wait_for_it: bool
2576  *
2577  * Output the pipe error counts of each pipe to log file
2578  *
2579  * Return: int
2580  */
2581 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
2582 			      bool sleep_ok, bool wait_for_it)
2583 {
2584 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2585 	A_target_id_t pci_addr = scn->mem;
2586 	static int max_delay;
2587 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2588 	static int debug;
2589 	if (scn->recovery)
2590 		return -EACCES;
2591 
2592 	if (qdf_atomic_read(&scn->link_suspended)) {
2593 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
2594 		debug = true;
2595 		QDF_ASSERT(0);
2596 		return -EACCES;
2597 	}
2598 
2599 	if (debug) {
2600 		wait_for_it = true;
2601 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
2602 				__func__);
2603 		QDF_ASSERT(0);
2604 	}
2605 
2606 	if (sleep_ok) {
2607 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2608 		hif_state->keep_awake_count--;
2609 		if (hif_state->keep_awake_count == 0) {
2610 			/* Allow sleep */
2611 			hif_state->verified_awake = false;
2612 			hif_state->sleep_ticks = qdf_system_ticks();
2613 		}
2614 		if (hif_state->fake_sleep == false) {
2615 			/* Set the Fake Sleep */
2616 			hif_state->fake_sleep = true;
2617 
2618 			/* Start the Sleep Timer */
2619 			qdf_timer_stop(&hif_state->sleep_timer);
2620 			qdf_timer_start(&hif_state->sleep_timer,
2621 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2622 		}
2623 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2624 	} else {
2625 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2626 
2627 		if (hif_state->fake_sleep) {
2628 			hif_state->verified_awake = true;
2629 		} else {
2630 			if (hif_state->keep_awake_count == 0) {
2631 				/* Force AWAKE */
2632 				hif_write32_mb(sc, pci_addr +
2633 					      PCIE_LOCAL_BASE_ADDRESS +
2634 					      PCIE_SOC_WAKE_ADDRESS,
2635 					      PCIE_SOC_WAKE_V_MASK);
2636 			}
2637 		}
2638 		hif_state->keep_awake_count++;
2639 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2640 
2641 		if (wait_for_it && !hif_state->verified_awake) {
2642 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
2643 			int tot_delay = 0;
2644 			int curr_delay = 5;
2645 
2646 			for (;; ) {
2647 				if (hif_targ_is_awake(scn, pci_addr)) {
2648 					hif_state->verified_awake = true;
2649 					break;
2650 				}
2651 				if (!hif_pci_targ_is_present(scn, pci_addr))
2652 					break;
2653 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2654 					return hif_log_soc_wakeup_timeout(sc);
2655 
2656 				OS_DELAY(curr_delay);
2657 				tot_delay += curr_delay;
2658 
2659 				if (curr_delay < 50)
2660 					curr_delay += 5;
2661 			}
2662 
2663 			/*
2664 			 * NB: If Target has to come out of Deep Sleep,
2665 			 * this may take a few Msecs. Typically, though
2666 			 * this delay should be <30us.
2667 			 */
2668 			if (tot_delay > max_delay)
2669 				max_delay = tot_delay;
2670 		}
2671 	}
2672 
2673 	if (debug && hif_state->verified_awake) {
2674 		debug = 0;
2675 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2676 			__func__,
2677 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2678 				PCIE_INTR_ENABLE_ADDRESS),
2679 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2680 				PCIE_INTR_CAUSE_ADDRESS),
2681 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2682 				CPU_INTR_ADDRESS),
2683 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2684 				PCIE_INTR_CLR_ADDRESS),
2685 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
2686 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2687 	}
2688 
2689 	return 0;
2690 }
2691 
2692 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2693 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
2694 {
2695 	uint32_t value;
2696 	void *addr;
2697 
2698 	addr = scn->mem + offset;
2699 	value = hif_read32_mb(scn, addr);
2700 
2701 	{
2702 		unsigned long irq_flags;
2703 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2704 
2705 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2706 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2707 		pcie_access_log[idx].is_write = false;
2708 		pcie_access_log[idx].addr = addr;
2709 		pcie_access_log[idx].value = value;
2710 		pcie_access_log_seqnum++;
2711 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2712 	}
2713 
2714 	return value;
2715 }
2716 
2717 void
2718 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
2719 {
2720 	void *addr;
2721 
2722 	addr = scn->mem + (offset);
2723 	hif_write32_mb(scn, addr, value);
2724 
2725 	{
2726 		unsigned long irq_flags;
2727 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2728 
2729 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2730 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2731 		pcie_access_log[idx].is_write = true;
2732 		pcie_access_log[idx].addr = addr;
2733 		pcie_access_log[idx].value = value;
2734 		pcie_access_log_seqnum++;
2735 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2736 	}
2737 }
2738 
2739 /**
2740  * hif_target_dump_access_log() - dump access log
2741  *
2742  * dump access log
2743  *
2744  * Return: n/a
2745  */
2746 void hif_target_dump_access_log(void)
2747 {
2748 	int idx, len, start_idx, cur_idx;
2749 	unsigned long irq_flags;
2750 
2751 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2752 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2753 		len = PCIE_ACCESS_LOG_NUM;
2754 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2755 	} else {
2756 		len = pcie_access_log_seqnum;
2757 		start_idx = 0;
2758 	}
2759 
2760 	for (idx = 0; idx < len; idx++) {
2761 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2762 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
2763 		       __func__, idx,
2764 		       pcie_access_log[cur_idx].seqnum,
2765 		       pcie_access_log[cur_idx].is_write,
2766 		       pcie_access_log[cur_idx].addr,
2767 		       pcie_access_log[cur_idx].value);
2768 	}
2769 
2770 	pcie_access_log_seqnum = 0;
2771 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2772 }
2773 #endif
2774 
2775 #ifndef HIF_AHB
2776 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
2777 {
2778 	QDF_BUG(0);
2779 	return -EINVAL;
2780 }
2781 
2782 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
2783 {
2784 	QDF_BUG(0);
2785 	return -EINVAL;
2786 }
2787 #endif
2788 
2789 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
2790 {
2791 	struct ce_tasklet_entry *tasklet_entry = context;
2792 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
2793 }
2794 extern const char *ce_name[];
2795 
2796 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
2797 {
2798 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
2799 
2800 	return pci_scn->ce_msi_irq_num[ce_id];
2801 }
2802 
2803 /* hif_srng_msi_irq_disable() - disable the irq for msi
2804  * @hif_sc: hif context
2805  * @ce_id: which ce to disable copy complete interrupts for
2806  *
2807  * since MSI interrupts are not level based, the system can function
2808  * without disabling these interrupts.  Interrupt mitigation can be
2809  * added here for better system performance.
2810  */
2811 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2812 {
2813 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
2814 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2815 }
2816 
2817 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2818 {
2819 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
2820 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2821 }
2822 
2823 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2824 {
2825 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2826 }
2827 
2828 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2829 {
2830 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2831 }
2832 
2833 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
2834 {
2835 	int ret;
2836 	int ce_id, irq;
2837 	uint32_t msi_data_start;
2838 	uint32_t msi_data_count;
2839 	uint32_t msi_irq_start;
2840 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2841 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
2842 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2843 	int pci_slot;
2844 
2845 	if (!scn->disable_wake_irq) {
2846 		/* do wake irq assignment */
2847 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
2848 						  &msi_data_count,
2849 						  &msi_data_start,
2850 						  &msi_irq_start);
2851 		if (ret)
2852 			return ret;
2853 
2854 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
2855 						msi_irq_start);
2856 
2857 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
2858 				       hif_wake_interrupt_handler,
2859 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
2860 
2861 		if (ret)
2862 			return ret;
2863 	}
2864 
2865 	/* do ce irq assignments */
2866 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2867 					    &msi_data_count, &msi_data_start,
2868 					    &msi_irq_start);
2869 	if (ret)
2870 		goto free_wake_irq;
2871 
2872 	if (ce_srng_based(scn)) {
2873 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
2874 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
2875 	} else {
2876 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
2877 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
2878 	}
2879 
2880 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
2881 
2882 	/* needs to match the ce_id -> irq data mapping
2883 	 * used in the srng parameter configuration
2884 	 */
2885 	pci_slot = hif_get_pci_slot(scn);
2886 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2887 		unsigned int msi_data = (ce_id % msi_data_count) +
2888 			msi_irq_start;
2889 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2890 			continue;
2891 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2892 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
2893 			 __func__, ce_id, msi_data, irq,
2894 			 &ce_sc->tasklets[ce_id]);
2895 
2896 		/* implies the ce is also initialized */
2897 		if (!ce_sc->tasklets[ce_id].inited)
2898 			continue;
2899 
2900 		pci_sc->ce_msi_irq_num[ce_id] = irq;
2901 		ret = pfrm_request_irq(scn->qdf_dev->dev,
2902 				       irq, hif_ce_interrupt_handler,
2903 				       IRQF_SHARED,
2904 				       ce_irqname[pci_slot][ce_id],
2905 				       &ce_sc->tasklets[ce_id]);
2906 		if (ret)
2907 			goto free_irq;
2908 	}
2909 
2910 	return ret;
2911 
2912 free_irq:
2913 	/* the request_irq for the last ce_id failed so skip it. */
2914 	while (ce_id > 0 && ce_id < scn->ce_count) {
2915 		unsigned int msi_data;
2916 
2917 		ce_id--;
2918 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2919 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2920 		pfrm_free_irq(scn->qdf_dev->dev,
2921 			      irq, &ce_sc->tasklets[ce_id]);
2922 	}
2923 
2924 free_wake_irq:
2925 	if (!scn->disable_wake_irq) {
2926 		pfrm_free_irq(scn->qdf_dev->dev,
2927 			      scn->wake_irq, scn->qdf_dev->dev);
2928 		scn->wake_irq = 0;
2929 	}
2930 
2931 	return ret;
2932 }
2933 
2934 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
2935 {
2936 	int i;
2937 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
2938 
2939 	for (i = 0; i < hif_ext_group->numirq; i++)
2940 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
2941 					hif_ext_group->os_irq[i]);
2942 }
2943 
2944 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
2945 {
2946 	int i;
2947 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
2948 
2949 	for (i = 0; i < hif_ext_group->numirq; i++)
2950 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
2951 }
2952 
2953 /**
2954  * hif_pci_get_irq_name() - get irqname
2955  * This function gives irqnumber to irqname
2956  * mapping.
2957  *
2958  * @irq_no: irq number
2959  *
2960  * Return: irq name
2961  */
2962 const char *hif_pci_get_irq_name(int irq_no)
2963 {
2964 	return "pci-dummy";
2965 }
2966 
2967 #ifdef HIF_CPU_PERF_AFFINE_MASK
2968 /**
2969  * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity
2970  * @hif_ext_group: hif_ext_group to extract the irq info
2971  *
2972  * This function will set the IRQ affinity to the gold cores
2973  * only for defconfig builds
2974  *
2975  * @hif_ext_group: hif_ext_group to extract the irq info
2976  *
2977  * Return: none
2978  */
2979 void hif_pci_irq_set_affinity_hint(
2980 	struct hif_exec_context *hif_ext_group)
2981 {
2982 	int i, ret;
2983 	unsigned int cpus;
2984 	bool mask_set = false;
2985 
2986 	for (i = 0; i < hif_ext_group->numirq; i++)
2987 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
2988 
2989 	for (i = 0; i < hif_ext_group->numirq; i++) {
2990 		qdf_for_each_online_cpu(cpus) {
2991 			if (qdf_topology_physical_package_id(cpus) ==
2992 				CPU_CLUSTER_TYPE_PERF) {
2993 				qdf_cpumask_set_cpu(cpus,
2994 						    &hif_ext_group->
2995 						    new_cpu_mask[i]);
2996 				mask_set = true;
2997 			}
2998 		}
2999 	}
3000 	for (i = 0; i < hif_ext_group->numirq; i++) {
3001 		if (mask_set) {
3002 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3003 						  IRQ_NO_BALANCING, 0);
3004 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3005 						       (struct qdf_cpu_mask *)
3006 						       &hif_ext_group->
3007 						       new_cpu_mask[i]);
3008 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3009 						  0, IRQ_NO_BALANCING);
3010 			if (ret)
3011 				qdf_err("Set affinity %*pbl fails for IRQ %d ",
3012 					qdf_cpumask_pr_args(&hif_ext_group->
3013 							    new_cpu_mask[i]),
3014 					hif_ext_group->os_irq[i]);
3015 			else
3016 				qdf_debug("Set affinity %*pbl for IRQ: %d",
3017 					  qdf_cpumask_pr_args(&hif_ext_group->
3018 							      new_cpu_mask[i]),
3019 					  hif_ext_group->os_irq[i]);
3020 		} else {
3021 			qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
3022 				hif_ext_group->os_irq[i]);
3023 		}
3024 	}
3025 }
3026 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3027 
3028 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3029 {
3030 	int i;
3031 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3032 	struct hif_exec_context *hif_ext_group;
3033 
3034 	hif_core_ctl_set_boost(true);
3035 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3036 		hif_ext_group = hif_state->hif_ext_group[i];
3037 		hif_pci_irq_set_affinity_hint(hif_ext_group);
3038 	}
3039 }
3040 
3041 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3042 			      struct hif_exec_context *hif_ext_group)
3043 {
3044 	int ret = 0;
3045 	int irq = 0;
3046 	int j;
3047 	int pci_slot;
3048 
3049 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3050 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3051 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3052 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3053 
3054 	pci_slot = hif_get_pci_slot(scn);
3055 	for (j = 0; j < hif_ext_group->numirq; j++) {
3056 		irq = hif_ext_group->irq[j];
3057 		if (scn->irq_unlazy_disable)
3058 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
3059 		hif_debug("request_irq = %d for grp %d",
3060 			  irq, hif_ext_group->grp_id);
3061 		ret = pfrm_request_irq(
3062 				scn->qdf_dev->dev, irq,
3063 				hif_ext_group_interrupt_handler,
3064 				IRQF_SHARED | IRQF_NO_SUSPEND,
3065 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3066 				hif_ext_group);
3067 		if (ret) {
3068 			HIF_ERROR("%s: request_irq failed ret = %d",
3069 				  __func__, ret);
3070 			return -EFAULT;
3071 		}
3072 		hif_ext_group->os_irq[j] = irq;
3073 	}
3074 	hif_ext_group->irq_requested = true;
3075 	return 0;
3076 }
3077 
3078 /**
3079  * hif_configure_irq() - configure interrupt
3080  *
3081  * This function configures interrupt(s)
3082  *
3083  * @sc: PCIe control struct
3084  * @hif_hdl: struct HIF_CE_state
3085  *
3086  * Return: 0 - for success
3087  */
3088 int hif_configure_irq(struct hif_softc *scn)
3089 {
3090 	int ret = 0;
3091 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3092 
3093 	HIF_TRACE("%s: E", __func__);
3094 
3095 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3096 		scn->request_irq_done = false;
3097 		return 0;
3098 	}
3099 
3100 	hif_init_reschedule_tasklet_work(sc);
3101 
3102 	ret = hif_ce_msi_configure_irq(scn);
3103 	if (ret == 0) {
3104 		goto end;
3105 	}
3106 
3107 	switch (scn->target_info.target_type) {
3108 	case TARGET_TYPE_IPQ4019:
3109 		ret = hif_ahb_configure_legacy_irq(sc);
3110 		break;
3111 	case TARGET_TYPE_QCA8074:
3112 	case TARGET_TYPE_QCA8074V2:
3113 	case TARGET_TYPE_QCA6018:
3114 	case TARGET_TYPE_QCA5018:
3115 		ret = hif_ahb_configure_irq(sc);
3116 		break;
3117 	default:
3118 		ret = hif_pci_configure_legacy_irq(sc);
3119 		break;
3120 	}
3121 	if (ret < 0) {
3122 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3123 			__func__, ret);
3124 		return ret;
3125 	}
3126 end:
3127 	scn->request_irq_done = true;
3128 	return 0;
3129 }
3130 
3131 /**
3132  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3133  * @scn: hif control structure
3134  *
3135  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3136  * stuck at a polling loop in pcie_address_config in FW
3137  *
3138  * Return: none
3139  */
3140 static void hif_trigger_timer_irq(struct hif_softc *scn)
3141 {
3142 	int tmp;
3143 	/* Trigger IRQ on Peregrine/Swift by setting
3144 	 * IRQ Bit of LF_TIMER 0
3145 	 */
3146 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3147 						SOC_LF_TIMER_STATUS0_ADDRESS));
3148 	/* Set Raw IRQ Bit */
3149 	tmp |= 1;
3150 	/* SOC_LF_TIMER_STATUS0 */
3151 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3152 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3153 }
3154 
3155 /**
3156  * hif_target_sync() : ensure the target is ready
3157  * @scn: hif control structure
3158  *
3159  * Informs fw that we plan to use legacy interupts so that
3160  * it can begin booting. Ensures that the fw finishes booting
3161  * before continuing. Should be called before trying to write
3162  * to the targets other registers for the first time.
3163  *
3164  * Return: none
3165  */
3166 static void hif_target_sync(struct hif_softc *scn)
3167 {
3168 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3169 			    PCIE_INTR_ENABLE_ADDRESS),
3170 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3171 	/* read to flush pcie write */
3172 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3173 			PCIE_INTR_ENABLE_ADDRESS));
3174 
3175 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3176 			PCIE_SOC_WAKE_ADDRESS,
3177 			PCIE_SOC_WAKE_V_MASK);
3178 	while (!hif_targ_is_awake(scn, scn->mem))
3179 		;
3180 
3181 	if (HAS_FW_INDICATOR) {
3182 		int wait_limit = 500;
3183 		int fw_ind = 0;
3184 		int retry_count = 0;
3185 		uint32_t target_type = scn->target_info.target_type;
3186 fw_retry:
3187 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3188 		while (1) {
3189 			fw_ind = hif_read32_mb(scn, scn->mem +
3190 					FW_INDICATOR_ADDRESS);
3191 			if (fw_ind & FW_IND_INITIALIZED)
3192 				break;
3193 			if (wait_limit-- < 0)
3194 				break;
3195 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3196 			    PCIE_INTR_ENABLE_ADDRESS),
3197 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3198 			    /* read to flush pcie write */
3199 			(void)hif_read32_mb(scn, scn->mem +
3200 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3201 
3202 			qdf_mdelay(10);
3203 		}
3204 		if (wait_limit < 0) {
3205 			if (target_type == TARGET_TYPE_AR9888 &&
3206 			    retry_count++ < 2) {
3207 				hif_trigger_timer_irq(scn);
3208 				wait_limit = 500;
3209 				goto fw_retry;
3210 			}
3211 			HIF_TRACE("%s: FW signal timed out",
3212 					__func__);
3213 			qdf_assert_always(0);
3214 		} else {
3215 			HIF_TRACE("%s: Got FW signal, retries = %x",
3216 					__func__, 500-wait_limit);
3217 		}
3218 	}
3219 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3220 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3221 }
3222 
3223 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3224 				     struct device *dev)
3225 {
3226 	struct pld_soc_info info;
3227 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3228 
3229 	pld_get_soc_info(dev, &info);
3230 	sc->mem = info.v_addr;
3231 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3232 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3233 	scn->target_info.target_version = info.soc_id;
3234 	scn->target_info.target_revision = 0;
3235 }
3236 
3237 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3238 				       struct device *dev)
3239 {}
3240 
3241 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3242 				    int device_id)
3243 {
3244 	if (!pld_have_platform_driver_support(sc->dev))
3245 		return false;
3246 
3247 	switch (device_id) {
3248 	case QCA6290_DEVICE_ID:
3249 	case QCN9000_DEVICE_ID:
3250 	case QCA6290_EMULATION_DEVICE_ID:
3251 	case QCA6390_DEVICE_ID:
3252 	case QCA6490_DEVICE_ID:
3253 	case AR6320_DEVICE_ID:
3254 	case QCN7605_DEVICE_ID:
3255 		return true;
3256 	}
3257 	return false;
3258 }
3259 
3260 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3261 					   int device_id)
3262 {
3263 	if (hif_is_pld_based_target(sc, device_id)) {
3264 		sc->hif_enable_pci = hif_enable_pci_pld;
3265 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3266 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3267 	} else {
3268 		sc->hif_enable_pci = hif_enable_pci_nopld;
3269 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3270 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3271 	}
3272 }
3273 
3274 #ifdef HIF_REG_WINDOW_SUPPORT
3275 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3276 					       u32 target_type)
3277 {
3278 	switch (target_type) {
3279 	case TARGET_TYPE_QCN7605:
3280 		sc->use_register_windowing = true;
3281 		qdf_spinlock_create(&sc->register_access_lock);
3282 		sc->register_window = 0;
3283 		break;
3284 	default:
3285 		sc->use_register_windowing = false;
3286 	}
3287 }
3288 #else
3289 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3290 					       u32 target_type)
3291 {
3292 	sc->use_register_windowing = false;
3293 }
3294 #endif
3295 
3296 /**
3297  * hif_enable_bus(): enable bus
3298  *
3299  * This function enables the bus
3300  *
3301  * @ol_sc: soft_sc struct
3302  * @dev: device pointer
3303  * @bdev: bus dev pointer
3304  * bid: bus id pointer
3305  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3306  * Return: QDF_STATUS
3307  */
3308 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3309 			  struct device *dev, void *bdev,
3310 			  const struct hif_bus_id *bid,
3311 			  enum hif_enable_type type)
3312 {
3313 	int ret = 0;
3314 	uint32_t hif_type, target_type;
3315 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3316 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3317 	uint16_t revision_id = 0;
3318 	int probe_again = 0;
3319 	struct pci_dev *pdev = bdev;
3320 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3321 	struct hif_target_info *tgt_info;
3322 
3323 	if (!ol_sc) {
3324 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3325 		return QDF_STATUS_E_NOMEM;
3326 	}
3327 	/* Following print is used by various tools to identify
3328 	 * WLAN SOC (e.g. crash dump analysis and reporting tool).
3329 	 */
3330 	HIF_TRACE("%s: con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
3331 		  __func__, hif_get_conparam(ol_sc), id->device);
3332 
3333 	sc->pdev = pdev;
3334 	sc->dev = &pdev->dev;
3335 	sc->devid = id->device;
3336 	sc->cacheline_sz = dma_get_cache_alignment();
3337 	tgt_info = hif_get_target_info_handle(hif_hdl);
3338 	hif_pci_init_deinit_ops_attach(sc, id->device);
3339 	sc->hif_pci_get_soc_info(sc, dev);
3340 again:
3341 	ret = sc->hif_enable_pci(sc, pdev, id);
3342 	if (ret < 0) {
3343 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3344 		       __func__, ret);
3345 		goto err_enable_pci;
3346 	}
3347 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3348 
3349 	/* Temporary FIX: disable ASPM on peregrine.
3350 	 * Will be removed after the OTP is programmed
3351 	 */
3352 	hif_disable_power_gating(hif_hdl);
3353 
3354 	device_disable_async_suspend(&pdev->dev);
3355 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3356 
3357 	ret = hif_get_device_type(id->device, revision_id,
3358 						&hif_type, &target_type);
3359 	if (ret < 0) {
3360 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3361 		goto err_tgtstate;
3362 	}
3363 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3364 		  __func__, hif_type, target_type);
3365 
3366 	hif_register_tbl_attach(ol_sc, hif_type);
3367 	hif_target_register_tbl_attach(ol_sc, target_type);
3368 
3369 	hif_pci_init_reg_windowing_support(sc, target_type);
3370 
3371 	tgt_info->target_type = target_type;
3372 
3373 	/*
3374 	 * Disable unlzay interrupt registration for QCN9000
3375 	 */
3376 	if (target_type == TARGET_TYPE_QCN9000)
3377 		ol_sc->irq_unlazy_disable = 1;
3378 
3379 	if (ce_srng_based(ol_sc)) {
3380 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3381 	} else {
3382 		ret = hif_pci_probe_tgt_wakeup(sc);
3383 		if (ret < 0) {
3384 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3385 					__func__, ret);
3386 			if (ret == -EAGAIN)
3387 				probe_again++;
3388 			goto err_tgtstate;
3389 		}
3390 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3391 	}
3392 
3393 	if (!ol_sc->mem_pa) {
3394 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3395 		ret = -EIO;
3396 		goto err_tgtstate;
3397 	}
3398 
3399 	if (!ce_srng_based(ol_sc)) {
3400 		hif_target_sync(ol_sc);
3401 
3402 		if (hif_pci_default_link_up(tgt_info))
3403 			hif_vote_link_up(hif_hdl);
3404 	}
3405 
3406 	return 0;
3407 
3408 err_tgtstate:
3409 	hif_disable_pci(sc);
3410 	sc->pci_enabled = false;
3411 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3412 	return QDF_STATUS_E_ABORTED;
3413 
3414 err_enable_pci:
3415 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3416 		int delay_time;
3417 
3418 		HIF_INFO("%s: pci reprobe", __func__);
3419 		/* 10, 40, 90, 100, 100, ... */
3420 		delay_time = max(100, 10 * (probe_again * probe_again));
3421 		qdf_mdelay(delay_time);
3422 		goto again;
3423 	}
3424 	return ret;
3425 }
3426 
3427 /**
3428  * hif_pci_irq_enable() - ce_irq_enable
3429  * @scn: hif_softc
3430  * @ce_id: ce_id
3431  *
3432  * Return: void
3433  */
3434 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3435 {
3436 	uint32_t tmp = 1 << ce_id;
3437 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3438 
3439 	qdf_spin_lock_irqsave(&sc->irq_lock);
3440 	scn->ce_irq_summary &= ~tmp;
3441 	if (scn->ce_irq_summary == 0) {
3442 		/* Enable Legacy PCI line interrupts */
3443 		if (LEGACY_INTERRUPTS(sc) &&
3444 			(scn->target_status != TARGET_STATUS_RESET) &&
3445 			(!qdf_atomic_read(&scn->link_suspended))) {
3446 
3447 			hif_write32_mb(scn, scn->mem +
3448 				(SOC_CORE_BASE_ADDRESS |
3449 				PCIE_INTR_ENABLE_ADDRESS),
3450 				HOST_GROUP0_MASK);
3451 
3452 			hif_read32_mb(scn, scn->mem +
3453 					(SOC_CORE_BASE_ADDRESS |
3454 					PCIE_INTR_ENABLE_ADDRESS));
3455 		}
3456 	}
3457 	if (scn->hif_init_done == true)
3458 		Q_TARGET_ACCESS_END(scn);
3459 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3460 
3461 	/* check for missed firmware crash */
3462 	hif_fw_interrupt_handler(0, scn);
3463 }
3464 
3465 /**
3466  * hif_pci_irq_disable() - ce_irq_disable
3467  * @scn: hif_softc
3468  * @ce_id: ce_id
3469  *
3470  * only applicable to legacy copy engine...
3471  *
3472  * Return: void
3473  */
3474 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3475 {
3476 	/* For Rome only need to wake up target */
3477 	/* target access is maintained until interrupts are re-enabled */
3478 	Q_TARGET_ACCESS_BEGIN(scn);
3479 }
3480 
3481 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3482 {
3483 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3484 
3485 	/* legacy case only has one irq */
3486 	return pci_scn->irq;
3487 }
3488 
3489 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
3490 {
3491 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3492 	struct hif_target_info *tgt_info;
3493 
3494 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
3495 
3496 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
3497 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
3498 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
3499 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
3500 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
3501 		/*
3502 		 * Need to consider offset's memtype for QCA6290/QCA8074,
3503 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
3504 		 * well initialized/defined.
3505 		 */
3506 		return 0;
3507 	}
3508 
3509 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
3510 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
3511 		return 0;
3512 	}
3513 
3514 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
3515 		  offset, (uint32_t)(offset + sizeof(unsigned int)),
3516 		  sc->mem_len);
3517 
3518 	return -EINVAL;
3519 }
3520 
3521 /**
3522  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
3523  * @scn: hif context
3524  *
3525  * Return: true if soc needs driver bmi otherwise false
3526  */
3527 bool hif_pci_needs_bmi(struct hif_softc *scn)
3528 {
3529 	return !ce_srng_based(scn);
3530 }
3531 
3532 #ifdef FORCE_WAKE
3533 #ifdef DEVICE_FORCE_WAKE_ENABLE
3534 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
3535 {
3536 	uint32_t timeout = 0, value;
3537 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3538 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3539 
3540 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
3541 		hif_err("force wake request send failed");
3542 		return -EINVAL;
3543 	}
3544 
3545 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
3546 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
3547 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
3548 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
3549 		timeout += FORCE_WAKE_DELAY_MS;
3550 	}
3551 
3552 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
3553 		hif_err("Unable to wake up mhi");
3554 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
3555 		return -EINVAL;
3556 	}
3557 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
3558 	hif_write32_mb(scn,
3559 		       scn->mem +
3560 		       PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG,
3561 		       0);
3562 	hif_write32_mb(scn,
3563 		       scn->mem +
3564 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
3565 		       1);
3566 
3567 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
3568 	/*
3569 	 * do not reset the timeout
3570 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
3571 	 */
3572 	do {
3573 		value =
3574 		hif_read32_mb(scn,
3575 			      scn->mem +
3576 			      PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
3577 		if (value)
3578 			break;
3579 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
3580 		timeout += FORCE_WAKE_DELAY_MS;
3581 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
3582 
3583 	if (!value) {
3584 		hif_err("failed handshake mechanism");
3585 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
3586 		return -ETIMEDOUT;
3587 	}
3588 
3589 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
3590 	return 0;
3591 }
3592 
3593 #else /* DEVICE_FORCE_WAKE_ENABLE */
3594 /** hif_force_wake_request() - Disable the PCIE scratch register
3595  * write/read
3596  *
3597  * Return: 0
3598  */
3599 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
3600 {
3601 	uint32_t timeout = 0;
3602 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3603 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3604 
3605 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
3606 		hif_err("force wake request send failed");
3607 		return -EINVAL;
3608 	}
3609 
3610 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
3611 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
3612 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
3613 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
3614 		timeout += FORCE_WAKE_DELAY_MS;
3615 	}
3616 
3617 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
3618 		hif_err("Unable to wake up mhi");
3619 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
3620 		return -EINVAL;
3621 	}
3622 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
3623 	return 0;
3624 }
3625 #endif /* DEVICE_FORCE_WAKE_ENABLE */
3626 
3627 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
3628 {
3629 	int ret;
3630 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3631 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3632 
3633 	ret = pld_force_wake_release(scn->qdf_dev->dev);
3634 	if (ret) {
3635 		hif_err("force wake release failure");
3636 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
3637 		return ret;
3638 	}
3639 
3640 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
3641 	hif_write32_mb(scn,
3642 		       scn->mem +
3643 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
3644 		       0);
3645 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
3646 	return 0;
3647 }
3648 
3649 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
3650 {
3651 	hif_debug("mhi_force_wake_request_vote: %d",
3652 		  pci_handle->stats.mhi_force_wake_request_vote);
3653 	hif_debug("mhi_force_wake_failure: %d",
3654 		  pci_handle->stats.mhi_force_wake_failure);
3655 	hif_debug("mhi_force_wake_success: %d",
3656 		  pci_handle->stats.mhi_force_wake_success);
3657 	hif_debug("soc_force_wake_register_write_success: %d",
3658 		  pci_handle->stats.soc_force_wake_register_write_success);
3659 	hif_debug("soc_force_wake_failure: %d",
3660 		  pci_handle->stats.soc_force_wake_failure);
3661 	hif_debug("soc_force_wake_success: %d",
3662 		  pci_handle->stats.soc_force_wake_success);
3663 	hif_debug("mhi_force_wake_release_failure: %d",
3664 		  pci_handle->stats.mhi_force_wake_release_failure);
3665 	hif_debug("mhi_force_wake_release_success: %d",
3666 		  pci_handle->stats.mhi_force_wake_release_success);
3667 	hif_debug("oc_force_wake_release_success: %d",
3668 		  pci_handle->stats.soc_force_wake_release_success);
3669 }
3670 #endif /* FORCE_WAKE */
3671 
3672 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3673 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
3674 {
3675 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
3676 }
3677 
3678 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
3679 {
3680 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
3681 }
3682 #endif
3683