xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision abdb33bb009aab537f78f07c738e48e6661fd0e0)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include <linux/of_pci.h>
24 #ifdef CONFIG_PCI_MSM
25 #include <linux/msm_pcie.h>
26 #endif
27 #include "hif_io32.h"
28 #include "if_pci.h"
29 #include "hif.h"
30 #include "target_type.h"
31 #include "hif_main.h"
32 #include "ce_main.h"
33 #include "ce_api.h"
34 #include "ce_internal.h"
35 #include "ce_reg.h"
36 #include "ce_bmi.h"
37 #include "regtable.h"
38 #include "hif_hw_version.h"
39 #include <linux/debugfs.h>
40 #include <linux/seq_file.h>
41 #include "qdf_status.h"
42 #include "qdf_atomic.h"
43 #include "qdf_platform.h"
44 #include "pld_common.h"
45 #include "mp_dev.h"
46 #include "hif_debug.h"
47 
48 #include "if_pci_internal.h"
49 #include "ce_tasklet.h"
50 #include "targaddrs.h"
51 #include "hif_exec.h"
52 
53 #include "pci_api.h"
54 #include "ahb_api.h"
55 #include "wlan_cfg.h"
56 
57 /* Maximum ms timeout for host to wake up target */
58 #define PCIE_WAKE_TIMEOUT 1000
59 #define RAMDUMP_EVENT_TIMEOUT 2500
60 
61 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
62  * PCIe data bus error
63  * As workaround for this issue - changing the reset sequence to
64  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
65  */
66 #define CPU_WARM_RESET_WAR
67 #define WLAN_CFG_MAX_PCIE_GROUPS 2
68 #define WLAN_CFG_MAX_CE_COUNT 12
69 
70 const char *dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS] = {
71 {
72 "pci0_wlan_grp_dp_0",
73 "pci0_wlan_grp_dp_1",
74 "pci0_wlan_grp_dp_2",
75 "pci0_wlan_grp_dp_3",
76 "pci0_wlan_grp_dp_4",
77 "pci0_wlan_grp_dp_5",
78 "pci0_wlan_grp_dp_6",
79 #if !defined(WLAN_MAX_PDEVS)
80 "pci0_wlan_grp_dp_7",
81 "pci0_wlan_grp_dp_8",
82 "pci0_wlan_grp_dp_9",
83 "pci0_wlan_grp_dp_10",
84 #endif
85 },
86 {
87 "pci1_wlan_grp_dp_0",
88 "pci1_wlan_grp_dp_1",
89 "pci1_wlan_grp_dp_2",
90 "pci1_wlan_grp_dp_3",
91 "pci1_wlan_grp_dp_4",
92 "pci1_wlan_grp_dp_5",
93 "pci1_wlan_grp_dp_6",
94 #if !defined(WLAN_MAX_PDEVS)
95 "pci1_wlan_grp_dp_7",
96 "pci1_wlan_grp_dp_8",
97 "pci1_wlan_grp_dp_9",
98 "pci1_wlan_grp_dp_10",
99 #endif
100 }
101 };
102 
103 const char *ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT] = {
104 {
105 "pci0_wlan_ce_0",
106 "pci0_wlan_ce_1",
107 "pci0_wlan_ce_2",
108 "pci0_wlan_ce_3",
109 "pci0_wlan_ce_4",
110 "pci0_wlan_ce_5",
111 "pci0_wlan_ce_6",
112 "pci0_wlan_ce_7",
113 "pci0_wlan_ce_8",
114 "pci0_wlan_ce_9",
115 "pci0_wlan_ce_10",
116 "pci0_wlan_ce_11",
117 },
118 {
119 "pci1_wlan_ce_0",
120 "pci1_wlan_ce_1",
121 "pci1_wlan_ce_2",
122 "pci1_wlan_ce_3",
123 "pci1_wlan_ce_4",
124 "pci1_wlan_ce_5",
125 "pci1_wlan_ce_6",
126 "pci1_wlan_ce_7",
127 "pci1_wlan_ce_8",
128 "pci1_wlan_ce_9",
129 "pci1_wlan_ce_10",
130 "pci1_wlan_ce_11",
131 }
132 };
133 
134 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
135 static inline int hif_get_pci_slot(struct hif_softc *scn)
136 {
137 	/*
138 	 * If WLAN_MAX_PDEVS is defined as 1, always return pci slot 0
139 	 * since there is only one pci device attached.
140 	 */
141 	return 0;
142 }
143 #else
144 static inline int hif_get_pci_slot(struct hif_softc *scn)
145 {
146 	uint32_t pci_id;
147 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
148 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
149 	uint32_t target_type = tgt_info->target_type;
150 	struct device_node *mhi_node;
151 	struct device_node *pcierp_node;
152 	struct device_node *pcie_node;
153 
154 	switch (target_type) {
155 	case TARGET_TYPE_QCN9000:
156 		/* of_node stored in qdf_dev points to the mhi node */
157 		mhi_node = scn->qdf_dev->dev->of_node;
158 		/*
159 		 * pcie id is stored in the main pci node which has to be taken
160 		 * from the second parent of mhi_node.
161 		 */
162 		pcierp_node = mhi_node->parent;
163 		pcie_node = pcierp_node->parent;
164 		pci_id = of_get_pci_domain_nr(pcie_node);
165 		if (pci_id < 0 || pci_id >= WLAN_CFG_MAX_PCIE_GROUPS) {
166 			HIF_ERROR("pci_id:%d is invalid", pci_id);
167 			QDF_ASSERT(0);
168 			return 0;
169 		}
170 		return pci_id;
171 	default:
172 		/* Send pci_id 0 for all other targets */
173 		return 0;
174 	}
175 }
176 #endif
177 
178 /*
179  * Top-level interrupt handler for all PCI interrupts from a Target.
180  * When a block of MSI interrupts is allocated, this top-level handler
181  * is not used; instead, we directly call the correct sub-handler.
182  */
183 struct ce_irq_reg_table {
184 	uint32_t irq_enable;
185 	uint32_t irq_status;
186 };
187 
188 #ifndef QCA_WIFI_3_0_ADRASTEA
189 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
190 {
191 }
192 #else
193 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
194 {
195 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
196 	unsigned int target_enable0, target_enable1;
197 	unsigned int target_cause0, target_cause1;
198 
199 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
200 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
201 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
202 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
203 
204 	if ((target_enable0 & target_cause0) ||
205 	    (target_enable1 & target_cause1)) {
206 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
207 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
208 
209 		if (scn->notice_send)
210 			pld_intr_notify_q6(sc->dev);
211 	}
212 }
213 #endif
214 
215 
216 /**
217  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
218  * @scn: scn
219  *
220  * Return: N/A
221  */
222 static void pci_dispatch_interrupt(struct hif_softc *scn)
223 {
224 	uint32_t intr_summary;
225 	int id;
226 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
227 
228 	if (scn->hif_init_done != true)
229 		return;
230 
231 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
232 		return;
233 
234 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
235 
236 	if (intr_summary == 0) {
237 		if ((scn->target_status != TARGET_STATUS_RESET) &&
238 			(!qdf_atomic_read(&scn->link_suspended))) {
239 
240 			hif_write32_mb(scn, scn->mem +
241 				(SOC_CORE_BASE_ADDRESS |
242 				PCIE_INTR_ENABLE_ADDRESS),
243 				HOST_GROUP0_MASK);
244 
245 			hif_read32_mb(scn, scn->mem +
246 					(SOC_CORE_BASE_ADDRESS |
247 					PCIE_INTR_ENABLE_ADDRESS));
248 		}
249 		Q_TARGET_ACCESS_END(scn);
250 		return;
251 	}
252 	Q_TARGET_ACCESS_END(scn);
253 
254 	scn->ce_irq_summary = intr_summary;
255 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
256 		if (intr_summary & (1 << id)) {
257 			intr_summary &= ~(1 << id);
258 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
259 		}
260 	}
261 }
262 
263 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
264 {
265 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
266 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
267 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
268 
269 	volatile int tmp;
270 	uint16_t val = 0;
271 	uint32_t bar0 = 0;
272 	uint32_t fw_indicator_address, fw_indicator;
273 	bool ssr_irq = false;
274 	unsigned int host_cause, host_enable;
275 
276 	if (LEGACY_INTERRUPTS(sc)) {
277 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
278 			return IRQ_HANDLED;
279 
280 		if (ADRASTEA_BU) {
281 			host_enable = hif_read32_mb(sc, sc->mem +
282 						    PCIE_INTR_ENABLE_ADDRESS);
283 			host_cause = hif_read32_mb(sc, sc->mem +
284 						   PCIE_INTR_CAUSE_ADDRESS);
285 			if (!(host_enable & host_cause)) {
286 				hif_pci_route_adrastea_interrupt(sc);
287 				return IRQ_HANDLED;
288 			}
289 		}
290 
291 		/* Clear Legacy PCI line interrupts
292 		 * IMPORTANT: INTR_CLR regiser has to be set
293 		 * after INTR_ENABLE is set to 0,
294 		 * otherwise interrupt can not be really cleared
295 		 */
296 		hif_write32_mb(sc, sc->mem +
297 			      (SOC_CORE_BASE_ADDRESS |
298 			       PCIE_INTR_ENABLE_ADDRESS), 0);
299 
300 		hif_write32_mb(sc, sc->mem +
301 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
302 			       ADRASTEA_BU ?
303 			       (host_enable & host_cause) :
304 			      HOST_GROUP0_MASK);
305 
306 		if (ADRASTEA_BU)
307 			hif_write32_mb(sc, sc->mem + 0x2f100c,
308 				       (host_cause >> 1));
309 
310 		/* IMPORTANT: this extra read transaction is required to
311 		 * flush the posted write buffer
312 		 */
313 		if (!ADRASTEA_BU) {
314 		tmp =
315 			hif_read32_mb(sc, sc->mem +
316 				     (SOC_CORE_BASE_ADDRESS |
317 				      PCIE_INTR_ENABLE_ADDRESS));
318 
319 		if (tmp == 0xdeadbeef) {
320 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
321 			       __func__);
322 
323 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
324 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
325 			       __func__, val);
326 
327 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
328 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
329 			       __func__, val);
330 
331 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
332 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
333 			       val);
334 
335 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
336 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
337 			       val);
338 
339 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
340 					      &bar0);
341 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
342 			       bar0);
343 
344 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
345 				  __func__,
346 				  hif_read32_mb(sc, sc->mem +
347 						PCIE_LOCAL_BASE_ADDRESS
348 						+ RTC_STATE_ADDRESS));
349 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
350 				  __func__,
351 				  hif_read32_mb(sc, sc->mem +
352 						PCIE_LOCAL_BASE_ADDRESS
353 						+ PCIE_SOC_WAKE_ADDRESS));
354 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
355 				  __func__,
356 				  hif_read32_mb(sc, sc->mem + 0x80008),
357 				  hif_read32_mb(sc, sc->mem + 0x8000c));
358 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
359 				  __func__,
360 				  hif_read32_mb(sc, sc->mem + 0x80010),
361 				  hif_read32_mb(sc, sc->mem + 0x80014));
362 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
363 				  __func__,
364 				  hif_read32_mb(sc, sc->mem + 0x80018),
365 				  hif_read32_mb(sc, sc->mem + 0x8001c));
366 			QDF_BUG(0);
367 		}
368 
369 		PCI_CLR_CAUSE0_REGISTER(sc);
370 		}
371 
372 		if (HAS_FW_INDICATOR) {
373 			fw_indicator_address = hif_state->fw_indicator_address;
374 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
375 			if ((fw_indicator != ~0) &&
376 			   (fw_indicator & FW_IND_EVENT_PENDING))
377 				ssr_irq = true;
378 		}
379 
380 		if (Q_TARGET_ACCESS_END(scn) < 0)
381 			return IRQ_HANDLED;
382 	}
383 	/* TBDXXX: Add support for WMAC */
384 
385 	if (ssr_irq) {
386 		sc->irq_event = irq;
387 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
388 
389 		qdf_atomic_inc(&scn->active_tasklet_cnt);
390 		tasklet_schedule(&sc->intr_tq);
391 	} else {
392 		pci_dispatch_interrupt(scn);
393 	}
394 
395 	return IRQ_HANDLED;
396 }
397 
398 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
399 {
400 	return 1;               /* FIX THIS */
401 }
402 
403 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
404 {
405 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
406 	int i = 0;
407 
408 	if (!irq || !size) {
409 		return -EINVAL;
410 	}
411 
412 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
413 		irq[0] = sc->irq;
414 		return 1;
415 	}
416 
417 	if (sc->num_msi_intrs > size) {
418 		qdf_print("Not enough space in irq buffer to return irqs");
419 		return -EINVAL;
420 	}
421 
422 	for (i = 0; i < sc->num_msi_intrs; i++) {
423 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
424 	}
425 
426 	return sc->num_msi_intrs;
427 }
428 
429 
430 /**
431  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
432  * @scn: hif_softc
433  *
434  * Return: void
435  */
436 #if CONFIG_ATH_PCIE_MAX_PERF == 0
437 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
438 {
439 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
440 	A_target_id_t pci_addr = scn->mem;
441 
442 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
443 	/*
444 	 * If the deferred sleep timer is running cancel it
445 	 * and put the soc into sleep.
446 	 */
447 	if (hif_state->fake_sleep == true) {
448 		qdf_timer_stop(&hif_state->sleep_timer);
449 		if (hif_state->verified_awake == false) {
450 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
451 				      PCIE_SOC_WAKE_ADDRESS,
452 				      PCIE_SOC_WAKE_RESET);
453 		}
454 		hif_state->fake_sleep = false;
455 	}
456 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
457 }
458 #else
459 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
460 {
461 }
462 #endif
463 
464 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
465 	hif_read32_mb(sc, (char *)(mem) + \
466 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
467 
468 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
469 	hif_write32_mb(sc, ((char *)(mem) + \
470 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
471 
472 #ifdef QCA_WIFI_3_0
473 /**
474  * hif_targ_is_awake() - check to see if the target is awake
475  * @hif_ctx: hif context
476  *
477  * emulation never goes to sleep
478  *
479  * Return: true if target is awake
480  */
481 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
482 {
483 	return true;
484 }
485 #else
486 /**
487  * hif_targ_is_awake() - check to see if the target is awake
488  * @hif_ctx: hif context
489  *
490  * Return: true if the targets clocks are on
491  */
492 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
493 {
494 	uint32_t val;
495 
496 	if (scn->recovery)
497 		return false;
498 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
499 		+ RTC_STATE_ADDRESS);
500 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
501 }
502 #endif
503 
504 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
505 static void hif_pci_device_reset(struct hif_pci_softc *sc)
506 {
507 	void __iomem *mem = sc->mem;
508 	int i;
509 	uint32_t val;
510 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
511 
512 	if (!scn->hostdef)
513 		return;
514 
515 	/* NB: Don't check resetok here.  This form of reset
516 	 * is integral to correct operation.
517 	 */
518 
519 	if (!SOC_GLOBAL_RESET_ADDRESS)
520 		return;
521 
522 	if (!mem)
523 		return;
524 
525 	HIF_ERROR("%s: Reset Device", __func__);
526 
527 	/*
528 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
529 	 * writing WAKE_V, the Target may scribble over Host memory!
530 	 */
531 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
532 			       PCIE_SOC_WAKE_V_MASK);
533 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
534 		if (hif_targ_is_awake(scn, mem))
535 			break;
536 
537 		qdf_mdelay(1);
538 	}
539 
540 	/* Put Target, including PCIe, into RESET. */
541 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
542 	val |= 1;
543 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
544 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
545 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
546 		    RTC_STATE_COLD_RESET_MASK)
547 			break;
548 
549 		qdf_mdelay(1);
550 	}
551 
552 	/* Pull Target, including PCIe, out of RESET. */
553 	val &= ~1;
554 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
555 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
556 		if (!
557 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
558 		     RTC_STATE_COLD_RESET_MASK))
559 			break;
560 
561 		qdf_mdelay(1);
562 	}
563 
564 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
565 			       PCIE_SOC_WAKE_RESET);
566 }
567 
568 /* CPU warm reset function
569  * Steps:
570  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
571  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
572  *    correctly on WARM reset
573  * 3. Clear TARGET CPU LF timer interrupt
574  * 4. Reset all CEs to clear any pending CE tarnsactions
575  * 5. Warm reset CPU
576  */
577 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
578 {
579 	void __iomem *mem = sc->mem;
580 	int i;
581 	uint32_t val;
582 	uint32_t fw_indicator;
583 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
584 
585 	/* NB: Don't check resetok here.  This form of reset is
586 	 * integral to correct operation.
587 	 */
588 
589 	if (!mem)
590 		return;
591 
592 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
593 
594 	/*
595 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
596 	 * writing WAKE_V, the Target may scribble over Host memory!
597 	 */
598 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
599 			       PCIE_SOC_WAKE_V_MASK);
600 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
601 		if (hif_targ_is_awake(scn, mem))
602 			break;
603 		qdf_mdelay(1);
604 	}
605 
606 	/*
607 	 * Disable Pending interrupts
608 	 */
609 	val =
610 		hif_read32_mb(sc, mem +
611 			     (SOC_CORE_BASE_ADDRESS |
612 			      PCIE_INTR_CAUSE_ADDRESS));
613 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
614 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
615 	/* Target CPU Intr Cause */
616 	val = hif_read32_mb(sc, mem +
617 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
618 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
619 
620 	val =
621 		hif_read32_mb(sc, mem +
622 			     (SOC_CORE_BASE_ADDRESS |
623 			      PCIE_INTR_ENABLE_ADDRESS));
624 	hif_write32_mb(sc, (mem +
625 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
626 	hif_write32_mb(sc, (mem +
627 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
628 		       HOST_GROUP0_MASK);
629 
630 	qdf_mdelay(100);
631 
632 	/* Clear FW_INDICATOR_ADDRESS */
633 	if (HAS_FW_INDICATOR) {
634 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
635 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
636 	}
637 
638 	/* Clear Target LF Timer interrupts */
639 	val =
640 		hif_read32_mb(sc, mem +
641 			     (RTC_SOC_BASE_ADDRESS +
642 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
643 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
644 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
645 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
646 	hif_write32_mb(sc, mem +
647 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
648 		      val);
649 
650 	/* Reset CE */
651 	val =
652 		hif_read32_mb(sc, mem +
653 			     (RTC_SOC_BASE_ADDRESS |
654 			      SOC_RESET_CONTROL_ADDRESS));
655 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
656 	hif_write32_mb(sc, (mem +
657 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
658 		      val);
659 	val =
660 		hif_read32_mb(sc, mem +
661 			     (RTC_SOC_BASE_ADDRESS |
662 			      SOC_RESET_CONTROL_ADDRESS));
663 	qdf_mdelay(10);
664 
665 	/* CE unreset */
666 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
667 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
668 		       SOC_RESET_CONTROL_ADDRESS), val);
669 	val =
670 		hif_read32_mb(sc, mem +
671 			     (RTC_SOC_BASE_ADDRESS |
672 			      SOC_RESET_CONTROL_ADDRESS));
673 	qdf_mdelay(10);
674 
675 	/* Read Target CPU Intr Cause */
676 	val = hif_read32_mb(sc, mem +
677 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
678 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
679 		    __func__, val);
680 
681 	/* CPU warm RESET */
682 	val =
683 		hif_read32_mb(sc, mem +
684 			     (RTC_SOC_BASE_ADDRESS |
685 			      SOC_RESET_CONTROL_ADDRESS));
686 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
687 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
688 		       SOC_RESET_CONTROL_ADDRESS), val);
689 	val =
690 		hif_read32_mb(sc, mem +
691 			     (RTC_SOC_BASE_ADDRESS |
692 			      SOC_RESET_CONTROL_ADDRESS));
693 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
694 		    __func__, val);
695 
696 	qdf_mdelay(100);
697 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
698 
699 }
700 
701 #ifndef QCA_WIFI_3_0
702 /* only applicable to legacy ce */
703 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
704 {
705 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
706 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
707 	void __iomem *mem = sc->mem;
708 	uint32_t val;
709 
710 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
711 		return ATH_ISR_NOSCHED;
712 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
713 	if (Q_TARGET_ACCESS_END(scn) < 0)
714 		return ATH_ISR_SCHED;
715 
716 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
717 
718 	if (val & FW_IND_HELPER)
719 		return 0;
720 
721 	return 1;
722 }
723 #endif
724 
725 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
726 {
727 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
728 	uint16_t device_id = 0;
729 	uint32_t val;
730 	uint16_t timeout_count = 0;
731 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
732 
733 	/* Check device ID from PCIe configuration space for link status */
734 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
735 	if (device_id != sc->devid) {
736 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
737 			  __func__, device_id, sc->devid);
738 		return -EACCES;
739 	}
740 
741 	/* Check PCIe local register for bar/memory access */
742 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
743 			   RTC_STATE_ADDRESS);
744 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
745 
746 	/* Try to wake up taget if it sleeps */
747 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
748 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
749 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
750 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
751 		PCIE_SOC_WAKE_ADDRESS));
752 
753 	/* Check if taget can be woken up */
754 	while (!hif_targ_is_awake(scn, sc->mem)) {
755 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
756 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
757 				__func__,
758 				hif_read32_mb(sc, sc->mem +
759 					     PCIE_LOCAL_BASE_ADDRESS +
760 					     RTC_STATE_ADDRESS),
761 				hif_read32_mb(sc, sc->mem +
762 					     PCIE_LOCAL_BASE_ADDRESS +
763 					PCIE_SOC_WAKE_ADDRESS));
764 			return -EACCES;
765 		}
766 
767 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
768 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
769 
770 		qdf_mdelay(100);
771 		timeout_count += 100;
772 	}
773 
774 	/* Check Power register for SoC internal bus issues */
775 	val =
776 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
777 			     SOC_POWER_REG_OFFSET);
778 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
779 
780 	return 0;
781 }
782 
783 /**
784  * __hif_pci_dump_registers(): dump other PCI debug registers
785  * @scn: struct hif_softc
786  *
787  * This function dumps pci debug registers.  The parrent function
788  * dumps the copy engine registers before calling this function.
789  *
790  * Return: void
791  */
792 static void __hif_pci_dump_registers(struct hif_softc *scn)
793 {
794 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
795 	void __iomem *mem = sc->mem;
796 	uint32_t val, i, j;
797 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
798 	uint32_t ce_base;
799 
800 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
801 		return;
802 
803 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
804 	val =
805 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
806 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
807 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
808 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
809 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
810 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
811 
812 	/* DEBUG_CONTROL_ENABLE = 0x1 */
813 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
814 			   WLAN_DEBUG_CONTROL_OFFSET);
815 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
816 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
817 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
818 		      WLAN_DEBUG_CONTROL_OFFSET, val);
819 
820 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
821 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
822 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
823 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
824 			    WLAN_DEBUG_CONTROL_OFFSET));
825 
826 	HIF_INFO_MED("%s: Debug CE", __func__);
827 	/* Loop CE debug output */
828 	/* AMBA_DEBUG_BUS_SEL = 0xc */
829 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
830 			    AMBA_DEBUG_BUS_OFFSET);
831 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
832 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
833 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
834 		       val);
835 
836 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
837 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
838 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
839 				   CE_WRAPPER_DEBUG_OFFSET);
840 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
841 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
842 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
843 			      CE_WRAPPER_DEBUG_OFFSET, val);
844 
845 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
846 			    __func__, wrapper_idx[i],
847 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
848 				AMBA_DEBUG_BUS_OFFSET),
849 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
850 				CE_WRAPPER_DEBUG_OFFSET));
851 
852 		if (wrapper_idx[i] <= 7) {
853 			for (j = 0; j <= 5; j++) {
854 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
855 				/* For (j=0~5) write CE_DEBUG_SEL = j */
856 				val =
857 					hif_read32_mb(sc, mem + ce_base +
858 						     CE_DEBUG_OFFSET);
859 				val &= ~CE_DEBUG_SEL_MASK;
860 				val |= CE_DEBUG_SEL_SET(j);
861 				hif_write32_mb(sc, mem + ce_base +
862 					       CE_DEBUG_OFFSET, val);
863 
864 				/* read (@gpio_athr_wlan_reg)
865 				 * WLAN_DEBUG_OUT_DATA
866 				 */
867 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
868 						    + WLAN_DEBUG_OUT_OFFSET);
869 				val = WLAN_DEBUG_OUT_DATA_GET(val);
870 
871 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
872 					    __func__, j,
873 					    hif_read32_mb(sc, mem + ce_base +
874 						    CE_DEBUG_OFFSET), val);
875 			}
876 		} else {
877 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
878 			val =
879 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
880 					     WLAN_DEBUG_OUT_OFFSET);
881 			val = WLAN_DEBUG_OUT_DATA_GET(val);
882 
883 			HIF_INFO_MED("%s: out: %x", __func__, val);
884 		}
885 	}
886 
887 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
888 	/* Loop PCIe debug output */
889 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
890 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
891 			    AMBA_DEBUG_BUS_OFFSET);
892 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
893 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
894 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
895 		       AMBA_DEBUG_BUS_OFFSET, val);
896 
897 	for (i = 0; i <= 8; i++) {
898 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
899 		val =
900 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
901 				     AMBA_DEBUG_BUS_OFFSET);
902 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
903 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
904 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
905 			       AMBA_DEBUG_BUS_OFFSET, val);
906 
907 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
908 		val =
909 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
910 				     WLAN_DEBUG_OUT_OFFSET);
911 		val = WLAN_DEBUG_OUT_DATA_GET(val);
912 
913 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
914 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
915 				    WLAN_DEBUG_OUT_OFFSET), val,
916 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
917 				    WLAN_DEBUG_OUT_OFFSET));
918 	}
919 
920 	Q_TARGET_ACCESS_END(scn);
921 }
922 
923 /**
924  * hif_dump_registers(): dump bus debug registers
925  * @scn: struct hif_opaque_softc
926  *
927  * This function dumps hif bus debug registers
928  *
929  * Return: 0 for success or error code
930  */
931 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
932 {
933 	int status;
934 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
935 
936 	status = hif_dump_ce_registers(scn);
937 
938 	if (status)
939 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
940 
941 	/* dump non copy engine pci registers */
942 	__hif_pci_dump_registers(scn);
943 
944 	return 0;
945 }
946 
947 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
948 
949 /* worker thread to schedule wlan_tasklet in SLUB debug build */
950 static void reschedule_tasklet_work_handler(void *arg)
951 {
952 	struct hif_pci_softc *sc = arg;
953 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
954 
955 	if (!scn) {
956 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
957 		return;
958 	}
959 
960 	if (scn->hif_init_done == false) {
961 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
962 		return;
963 	}
964 
965 	tasklet_schedule(&sc->intr_tq);
966 }
967 
968 /**
969  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
970  * work
971  * @sc: HIF PCI Context
972  *
973  * Return: void
974  */
975 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
976 {
977 	qdf_create_work(0, &sc->reschedule_tasklet_work,
978 				reschedule_tasklet_work_handler, NULL);
979 }
980 #else
981 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
982 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
983 
984 void wlan_tasklet(unsigned long data)
985 {
986 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
987 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
988 
989 	if (scn->hif_init_done == false)
990 		goto end;
991 
992 	if (qdf_atomic_read(&scn->link_suspended))
993 		goto end;
994 
995 	if (!ADRASTEA_BU) {
996 		hif_fw_interrupt_handler(sc->irq_event, scn);
997 		if (scn->target_status == TARGET_STATUS_RESET)
998 			goto end;
999 	}
1000 
1001 end:
1002 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
1003 	qdf_atomic_dec(&scn->active_tasklet_cnt);
1004 }
1005 
1006 #ifdef FEATURE_RUNTIME_PM
1007 static bool hif_pci_pm_runtime_enabled(struct hif_pci_softc *sc)
1008 {
1009 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1010 
1011 	if (scn->hif_config.enable_runtime_pm)
1012 		return true;
1013 
1014 	return pm_runtime_enabled(sc->dev);
1015 }
1016 
1017 static const char *hif_pm_runtime_state_to_string(uint32_t state)
1018 {
1019 	switch (state) {
1020 	case HIF_PM_RUNTIME_STATE_NONE:
1021 		return "INIT_STATE";
1022 	case HIF_PM_RUNTIME_STATE_ON:
1023 		return "ON";
1024 	case HIF_PM_RUNTIME_STATE_RESUMING:
1025 		return "RESUMING";
1026 	case HIF_PM_RUNTIME_STATE_SUSPENDING:
1027 		return "SUSPENDING";
1028 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
1029 		return "SUSPENDED";
1030 	default:
1031 		return "INVALID STATE";
1032 	}
1033 }
1034 
1035 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
1036 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
1037 /**
1038  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
1039  * @sc: hif_pci_softc context
1040  * @msg: log message
1041  *
1042  * log runtime pm stats when something seems off.
1043  *
1044  * Return: void
1045  */
1046 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
1047 {
1048 	struct hif_pm_runtime_lock *ctx;
1049 	int i;
1050 
1051 	hif_nofl_debug("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
1052 		       msg, atomic_read(&sc->dev->power.usage_count),
1053 		       hif_pm_runtime_state_to_string(
1054 				atomic_read(&sc->pm_state)),
1055 		       sc->prevent_suspend_cnt);
1056 
1057 	hif_nofl_debug("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
1058 		       sc->dev->power.runtime_status,
1059 		       sc->dev->power.runtime_error,
1060 		       sc->dev->power.disable_depth,
1061 		       sc->dev->power.autosuspend_delay);
1062 
1063 	hif_nofl_debug("runtime_get: %u, runtime_put: %u, request_resume: %u",
1064 		       qdf_atomic_read(&sc->pm_stats.runtime_get),
1065 		       qdf_atomic_read(&sc->pm_stats.runtime_put),
1066 		       sc->pm_stats.request_resume);
1067 
1068 	hif_nofl_debug("get     put     get-timestamp put-timestamp :DBGID_NAME");
1069 	for (i = 0; i < RTPM_ID_MAX; i++) {
1070 		hif_nofl_debug("%-10d %-10d  0x%-10llx  0x%-10llx :%-30s",
1071 			       qdf_atomic_read(
1072 				       &sc->pm_stats.runtime_get_dbgid[i]),
1073 			       qdf_atomic_read(
1074 				       &sc->pm_stats.runtime_put_dbgid[i]),
1075 			       sc->pm_stats.runtime_get_timestamp_dbgid[i],
1076 			       sc->pm_stats.runtime_put_timestamp_dbgid[i],
1077 			       rtpm_string_from_dbgid(i));
1078 	}
1079 
1080 	hif_nofl_debug("allow_suspend: %u, prevent_suspend: %u",
1081 		       qdf_atomic_read(&sc->pm_stats.allow_suspend),
1082 		       qdf_atomic_read(&sc->pm_stats.prevent_suspend));
1083 
1084 	hif_nofl_debug("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
1085 		       sc->pm_stats.prevent_suspend_timeout,
1086 		       sc->pm_stats.allow_suspend_timeout);
1087 
1088 	hif_nofl_debug("Suspended: %u, resumed: %u count",
1089 		       sc->pm_stats.suspended,
1090 		       sc->pm_stats.resumed);
1091 
1092 	hif_nofl_debug("suspend_err: %u, runtime_get_err: %u",
1093 		       sc->pm_stats.suspend_err,
1094 		       sc->pm_stats.runtime_get_err);
1095 
1096 	hif_nofl_debug("Active Wakeup Sources preventing Runtime Suspend: ");
1097 
1098 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1099 		hif_nofl_debug("source %s; timeout %d ms",
1100 			       ctx->name, ctx->timeout);
1101 	}
1102 
1103 	if (qdf_is_fw_down()) {
1104 		hif_err("fw is down");
1105 		return;
1106 	}
1107 
1108 	QDF_DEBUG_PANIC("hif_pci_runtime_pm_warn");
1109 }
1110 
1111 /**
1112  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
1113  * @s: file to print to
1114  * @data: unused
1115  *
1116  * debugging tool added to the debug fs for displaying runtimepm stats
1117  *
1118  * Return: 0
1119  */
1120 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
1121 {
1122 	struct hif_pci_softc *sc = s->private;
1123 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
1124 		"SUSPENDING", "SUSPENDED"};
1125 	unsigned int msecs_age;
1126 	qdf_time_t usecs_age;
1127 	int pm_state = atomic_read(&sc->pm_state);
1128 	unsigned long timer_expires;
1129 	struct hif_pm_runtime_lock *ctx;
1130 	int i;
1131 
1132 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
1133 		   autopm_state[pm_state]);
1134 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
1135 		   sc->pm_stats.last_resume_caller);
1136 	seq_printf(s, "%30s: %pf\n", "Last Busy Marker",
1137 		   sc->pm_stats.last_busy_marker);
1138 
1139 	usecs_age = qdf_get_log_timestamp_usecs() -
1140 		sc->pm_stats.last_busy_timestamp;
1141 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
1142 		   sc->pm_stats.last_busy_timestamp / 1000000,
1143 		   sc->pm_stats.last_busy_timestamp % 1000000);
1144 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
1145 		   usecs_age / 1000000, usecs_age % 1000000);
1146 
1147 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
1148 		msecs_age = jiffies_to_msecs(jiffies -
1149 					     sc->pm_stats.suspend_jiffies);
1150 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1151 			   msecs_age / 1000, msecs_age % 1000);
1152 	}
1153 
1154 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1155 		   atomic_read(&sc->dev->power.usage_count));
1156 
1157 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1158 		   sc->prevent_suspend_cnt);
1159 
1160 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1161 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1162 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1163 
1164 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1165 	seq_printf(s, "%30s: %u\n", "prevent_suspend",
1166 		   qdf_atomic_read(&sc->pm_stats.prevent_suspend));
1167 	seq_printf(s, "%30s: %u\n", "allow_suspend",
1168 		   qdf_atomic_read(&sc->pm_stats.allow_suspend));
1169 
1170 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1171 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1172 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1173 
1174 	seq_printf(s, "%30s: %u\n", "runtime_get",
1175 		   qdf_atomic_read(&sc->pm_stats.runtime_get));
1176 	seq_printf(s, "%30s: %u\n", "runtime_put",
1177 		   qdf_atomic_read(&sc->pm_stats.runtime_put));
1178 	seq_printf(s, "get     put     get-timestamp put-timestamp :DBGID_NAME\n");
1179 	for (i = 0; i < RTPM_ID_MAX; i++) {
1180 		seq_printf(s, "%-10d ",
1181 			   qdf_atomic_read(&sc->pm_stats.runtime_get_dbgid[i]));
1182 		seq_printf(s, "%-10d ",
1183 			   qdf_atomic_read(&sc->pm_stats.runtime_put_dbgid[i]));
1184 		seq_printf(s, "0x%-10llx ",
1185 			   sc->pm_stats.runtime_get_timestamp_dbgid[i]);
1186 		seq_printf(s, "0x%-10llx ",
1187 			   sc->pm_stats.runtime_put_timestamp_dbgid[i]);
1188 		seq_printf(s, ":%-30s\n", rtpm_string_from_dbgid(i));
1189 	}
1190 
1191 	timer_expires = sc->runtime_timer_expires;
1192 	if (timer_expires > 0) {
1193 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1194 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1195 			   msecs_age / 1000, msecs_age % 1000);
1196 	}
1197 
1198 	spin_lock_bh(&sc->runtime_lock);
1199 	if (list_empty(&sc->prevent_suspend_list)) {
1200 		spin_unlock_bh(&sc->runtime_lock);
1201 		return 0;
1202 	}
1203 
1204 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1205 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1206 		seq_printf(s, "%s", ctx->name);
1207 		if (ctx->timeout)
1208 			seq_printf(s, "(%d ms)", ctx->timeout);
1209 		seq_puts(s, " ");
1210 	}
1211 	seq_puts(s, "\n");
1212 	spin_unlock_bh(&sc->runtime_lock);
1213 
1214 	return 0;
1215 }
1216 #undef HIF_PCI_RUNTIME_PM_STATS
1217 
1218 /**
1219  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1220  * @inode
1221  * @file
1222  *
1223  * Return: linux error code of single_open.
1224  */
1225 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1226 {
1227 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1228 			inode->i_private);
1229 }
1230 
1231 static const struct file_operations hif_pci_runtime_pm_fops = {
1232 	.owner          = THIS_MODULE,
1233 	.open           = hif_pci_runtime_pm_open,
1234 	.release        = single_release,
1235 	.read           = seq_read,
1236 	.llseek         = seq_lseek,
1237 };
1238 
1239 /**
1240  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1241  * @sc: pci context
1242  *
1243  * creates a debugfs entry to debug the runtime pm feature.
1244  */
1245 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1246 {
1247 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1248 					0400, NULL, sc,
1249 					&hif_pci_runtime_pm_fops);
1250 }
1251 
1252 /**
1253  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1254  * @sc: pci context
1255  *
1256  * removes the debugfs entry to debug the runtime pm feature.
1257  */
1258 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1259 {
1260 	debugfs_remove(sc->pm_dentry);
1261 }
1262 
1263 static void hif_runtime_init(struct device *dev, int delay)
1264 {
1265 	pm_runtime_set_autosuspend_delay(dev, delay);
1266 	pm_runtime_use_autosuspend(dev);
1267 	pm_runtime_allow(dev);
1268 	pm_runtime_mark_last_busy(dev);
1269 	pm_runtime_put_noidle(dev);
1270 	pm_suspend_ignore_children(dev, true);
1271 }
1272 
1273 static void hif_runtime_exit(struct device *dev)
1274 {
1275 	pm_runtime_get_noresume(dev);
1276 	pm_runtime_set_active(dev);
1277 	/* Symmetric call to make sure default usage count == 2 */
1278 	pm_runtime_forbid(dev);
1279 }
1280 
1281 static void hif_pm_runtime_lock_timeout_fn(void *data);
1282 
1283 /**
1284  * hif_pm_runtime_start(): start the runtime pm
1285  * @sc: pci context
1286  *
1287  * After this call, runtime pm will be active.
1288  */
1289 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1290 {
1291 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1292 	uint32_t mode = hif_get_conparam(ol_sc);
1293 
1294 	if (!ol_sc->hif_config.enable_runtime_pm) {
1295 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1296 		return;
1297 	}
1298 
1299 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
1300 	    mode == QDF_GLOBAL_MONITOR_MODE) {
1301 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1302 				__func__);
1303 		return;
1304 	}
1305 
1306 	qdf_timer_init(NULL, &sc->runtime_timer,
1307 		       hif_pm_runtime_lock_timeout_fn,
1308 		       sc, QDF_TIMER_TYPE_WAKE_APPS);
1309 
1310 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1311 			ol_sc->hif_config.runtime_pm_delay);
1312 
1313 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1314 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1315 	hif_runtime_pm_debugfs_create(sc);
1316 }
1317 
1318 /**
1319  * hif_pm_runtime_stop(): stop runtime pm
1320  * @sc: pci context
1321  *
1322  * Turns off runtime pm and frees corresponding resources
1323  * that were acquired by hif_runtime_pm_start().
1324  */
1325 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1326 {
1327 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1328 	uint32_t mode = hif_get_conparam(ol_sc);
1329 
1330 	if (!ol_sc->hif_config.enable_runtime_pm)
1331 		return;
1332 
1333 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
1334 	    mode == QDF_GLOBAL_MONITOR_MODE)
1335 		return;
1336 
1337 	hif_runtime_exit(sc->dev);
1338 
1339 	hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(sc));
1340 
1341 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1342 
1343 	hif_runtime_pm_debugfs_remove(sc);
1344 	qdf_timer_free(&sc->runtime_timer);
1345 }
1346 
1347 /**
1348  * hif_pm_runtime_open(): initialize runtime pm
1349  * @sc: pci data structure
1350  *
1351  * Early initialization
1352  */
1353 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1354 {
1355 	int i;
1356 	spin_lock_init(&sc->runtime_lock);
1357 
1358 	qdf_atomic_init(&sc->pm_state);
1359 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1360 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1361 	qdf_atomic_init(&sc->pm_stats.runtime_get);
1362 	qdf_atomic_init(&sc->pm_stats.runtime_put);
1363 	qdf_atomic_init(&sc->pm_stats.allow_suspend);
1364 	qdf_atomic_init(&sc->pm_stats.prevent_suspend);
1365 	for (i = 0; i < RTPM_ID_MAX; i++) {
1366 		qdf_atomic_init(&sc->pm_stats.runtime_get_dbgid[i]);
1367 		qdf_atomic_init(&sc->pm_stats.runtime_put_dbgid[i]);
1368 	}
1369 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1370 }
1371 
1372 static void  hif_check_for_get_put_out_of_sync(struct hif_pci_softc *sc)
1373 {
1374 	int32_t i;
1375 	int32_t get_count, put_count;
1376 
1377 	if (qdf_is_fw_down())
1378 		return;
1379 
1380 	for (i = 0; i < RTPM_ID_MAX; i++) {
1381 		get_count = qdf_atomic_read(&sc->pm_stats.runtime_get_dbgid[i]);
1382 		put_count = qdf_atomic_read(&sc->pm_stats.runtime_put_dbgid[i]);
1383 		if (get_count != put_count) {
1384 			QDF_DEBUG_PANIC("%s get-put out of sync. get %d put %d",
1385 					rtpm_string_from_dbgid(i),
1386 					get_count, put_count);
1387 		}
1388 	}
1389 }
1390 
1391 /**
1392  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1393  * @sc: pci context
1394  *
1395  * Ensure we have only one vote against runtime suspend before closing
1396  * the runtime suspend feature.
1397  *
1398  * all gets by the wlan driver should have been returned
1399  * one vote should remain as part of cnss_runtime_exit
1400  *
1401  * needs to be revisited if we share the root complex.
1402  */
1403 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1404 {
1405 	struct hif_pm_runtime_lock *ctx, *tmp;
1406 
1407 	hif_check_for_get_put_out_of_sync(sc);
1408 
1409 	if (atomic_read(&sc->dev->power.usage_count) != 2)
1410 		hif_pci_runtime_pm_warn(sc, "Driver Module Closing");
1411 	else
1412 		return;
1413 
1414 	spin_lock_bh(&sc->runtime_lock);
1415 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1416 		spin_unlock_bh(&sc->runtime_lock);
1417 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1418 		spin_lock_bh(&sc->runtime_lock);
1419 	}
1420 	spin_unlock_bh(&sc->runtime_lock);
1421 
1422 	/* Since default usage count is 2 so ensure 2 and only 2 usage count
1423 	 * when exit so that when the WLAN driver module is re-enabled runtime
1424 	 * PM won't be disabled also ensures runtime PM doesn't get broken on
1425 	 * by being less than 2.
1426 	 */
1427 	if (atomic_read(&sc->dev->power.usage_count) <= 1)
1428 		atomic_set(&sc->dev->power.usage_count, 2);
1429 	while (atomic_read(&sc->dev->power.usage_count) > 2)
1430 		hif_pm_runtime_put_auto(sc->dev);
1431 }
1432 
1433 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1434 					  struct hif_pm_runtime_lock *lock);
1435 
1436 /**
1437  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1438  * @sc: PCIe Context
1439  *
1440  * API is used to empty the runtime pm prevent suspend list.
1441  *
1442  * Return: void
1443  */
1444 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1445 {
1446 	struct hif_pm_runtime_lock *ctx, *tmp;
1447 
1448 	spin_lock_bh(&sc->runtime_lock);
1449 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1450 		__hif_pm_runtime_allow_suspend(sc, ctx);
1451 	}
1452 	spin_unlock_bh(&sc->runtime_lock);
1453 }
1454 
1455 /**
1456  * hif_pm_runtime_close(): close runtime pm
1457  * @sc: pci bus handle
1458  *
1459  * ensure runtime_pm is stopped before closing the driver
1460  */
1461 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1462 {
1463 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1464 
1465 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1466 
1467 	hif_is_recovery_in_progress(scn) ?
1468 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1469 		hif_pm_runtime_sanitize_on_exit(sc);
1470 }
1471 
1472 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
1473 {
1474 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1475 	int pm_state;
1476 
1477 	if (!sc)
1478 		return -EINVAL;
1479 
1480 	if (!hif_pci_pm_runtime_enabled(sc))
1481 		return 0;
1482 
1483 	pm_state = qdf_atomic_read(&sc->pm_state);
1484 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1485 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1486 		HIF_INFO("Runtime PM resume is requested by %ps",
1487 			 (void *)_RET_IP_);
1488 
1489 	sc->pm_stats.request_resume++;
1490 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
1491 
1492 	return pm_runtime_resume(sc->dev);
1493 }
1494 #else
1495 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1496 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1497 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1498 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1499 #endif
1500 
1501 /**
1502  * hif_disable_power_gating() - disable HW power gating
1503  * @hif_ctx: hif context
1504  *
1505  * disables pcie L1 power states
1506  */
1507 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1508 {
1509 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1510 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1511 
1512 	if (!scn) {
1513 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1514 		       __func__);
1515 		return;
1516 	}
1517 
1518 	/* Disable ASPM when pkt log is enabled */
1519 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1520 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1521 }
1522 
1523 /**
1524  * hif_enable_power_gating() - enable HW power gating
1525  * @hif_ctx: hif context
1526  *
1527  * enables pcie L1 power states
1528  */
1529 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1530 {
1531 	if (!sc) {
1532 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1533 		       __func__);
1534 		return;
1535 	}
1536 
1537 	/* Re-enable ASPM after firmware/OTP download is complete */
1538 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1539 }
1540 
1541 /**
1542  * hif_enable_power_management() - enable power management
1543  * @hif_ctx: hif context
1544  *
1545  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1546  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1547  *
1548  * note: epping mode does not call this function as it does not
1549  *       care about saving power.
1550  */
1551 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1552 				 bool is_packet_log_enabled)
1553 {
1554 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1555 	uint32_t mode;
1556 
1557 	if (!pci_ctx) {
1558 		HIF_ERROR("%s, hif_ctx null", __func__);
1559 		return;
1560 	}
1561 
1562 	mode = hif_get_conparam(hif_sc);
1563 	if (mode == QDF_GLOBAL_FTM_MODE) {
1564 		HIF_INFO("%s: Enable power gating for FTM mode", __func__);
1565 		hif_enable_power_gating(pci_ctx);
1566 		return;
1567 	}
1568 
1569 	hif_pm_runtime_start(pci_ctx);
1570 
1571 	if (!is_packet_log_enabled)
1572 		hif_enable_power_gating(pci_ctx);
1573 
1574 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1575 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1576 	    !ce_srng_based(hif_sc)) {
1577 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1578 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1579 			HIF_ERROR("%s, failed to set target to sleep",
1580 				  __func__);
1581 	}
1582 }
1583 
1584 /**
1585  * hif_disable_power_management() - disable power management
1586  * @hif_ctx: hif context
1587  *
1588  * Currently disables runtime pm. Should be updated to behave
1589  * if runtime pm is not started. Should be updated to take care
1590  * of aspm and soc sleep for driver load.
1591  */
1592 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1593 {
1594 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1595 
1596 	if (!pci_ctx) {
1597 		HIF_ERROR("%s, hif_ctx null", __func__);
1598 		return;
1599 	}
1600 
1601 	hif_pm_runtime_stop(pci_ctx);
1602 }
1603 
1604 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1605 {
1606 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1607 
1608 	if (!pci_ctx) {
1609 		HIF_ERROR("%s, hif_ctx null", __func__);
1610 		return;
1611 	}
1612 	hif_display_ce_stats(hif_ctx);
1613 
1614 	hif_print_pci_stats(pci_ctx);
1615 }
1616 
1617 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1618 {
1619 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1620 
1621 	if (!pci_ctx) {
1622 		HIF_ERROR("%s, hif_ctx null", __func__);
1623 		return;
1624 	}
1625 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1626 }
1627 
1628 #define ATH_PCI_PROBE_RETRY_MAX 3
1629 /**
1630  * hif_bus_open(): hif_bus_open
1631  * @scn: scn
1632  * @bus_type: bus type
1633  *
1634  * Return: n/a
1635  */
1636 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1637 {
1638 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1639 
1640 	hif_ctx->bus_type = bus_type;
1641 	hif_pm_runtime_open(sc);
1642 
1643 	qdf_spinlock_create(&sc->irq_lock);
1644 
1645 	return hif_ce_open(hif_ctx);
1646 }
1647 
1648 /**
1649  * hif_wake_target_cpu() - wake the target's cpu
1650  * @scn: hif context
1651  *
1652  * Send an interrupt to the device to wake up the Target CPU
1653  * so it has an opportunity to notice any changed state.
1654  */
1655 static void hif_wake_target_cpu(struct hif_softc *scn)
1656 {
1657 	QDF_STATUS rv;
1658 	uint32_t core_ctrl;
1659 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1660 
1661 	rv = hif_diag_read_access(hif_hdl,
1662 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1663 				  &core_ctrl);
1664 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1665 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1666 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1667 
1668 	rv = hif_diag_write_access(hif_hdl,
1669 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1670 				   core_ctrl);
1671 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1672 }
1673 
1674 /**
1675  * soc_wake_reset() - allow the target to go to sleep
1676  * @scn: hif_softc
1677  *
1678  * Clear the force wake register.  This is done by
1679  * hif_sleep_entry and cancel defered timer sleep.
1680  */
1681 static void soc_wake_reset(struct hif_softc *scn)
1682 {
1683 	hif_write32_mb(scn, scn->mem +
1684 		PCIE_LOCAL_BASE_ADDRESS +
1685 		PCIE_SOC_WAKE_ADDRESS,
1686 		PCIE_SOC_WAKE_RESET);
1687 }
1688 
1689 /**
1690  * hif_sleep_entry() - gate target sleep
1691  * @arg: hif context
1692  *
1693  * This function is the callback for the sleep timer.
1694  * Check if last force awake critical section was at least
1695  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1696  * allow the target to go to sleep and cancel the sleep timer.
1697  * otherwise reschedule the sleep timer.
1698  */
1699 static void hif_sleep_entry(void *arg)
1700 {
1701 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1702 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1703 	uint32_t idle_ms;
1704 
1705 	if (scn->recovery)
1706 		return;
1707 
1708 	if (hif_is_driver_unloading(scn))
1709 		return;
1710 
1711 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1712 	if (hif_state->fake_sleep) {
1713 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1714 						    - hif_state->sleep_ticks);
1715 		if (!hif_state->verified_awake &&
1716 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1717 			if (!qdf_atomic_read(&scn->link_suspended)) {
1718 				soc_wake_reset(scn);
1719 				hif_state->fake_sleep = false;
1720 			}
1721 		} else {
1722 			qdf_timer_stop(&hif_state->sleep_timer);
1723 			qdf_timer_start(&hif_state->sleep_timer,
1724 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1725 		}
1726 	}
1727 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1728 }
1729 
1730 #define HIF_HIA_MAX_POLL_LOOP    1000000
1731 #define HIF_HIA_POLLING_DELAY_MS 10
1732 
1733 #ifdef QCA_HIF_HIA_EXTND
1734 
1735 static void hif_set_hia_extnd(struct hif_softc *scn)
1736 {
1737 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1738 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1739 	uint32_t target_type = tgt_info->target_type;
1740 
1741 	HIF_TRACE("%s: E", __func__);
1742 
1743 	if ((target_type == TARGET_TYPE_AR900B) ||
1744 			target_type == TARGET_TYPE_QCA9984 ||
1745 			target_type == TARGET_TYPE_QCA9888) {
1746 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1747 		 * in RTC space
1748 		 */
1749 		tgt_info->target_revision
1750 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1751 					+ CHIP_ID_ADDRESS));
1752 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1753 			  target_type, tgt_info->target_revision);
1754 	}
1755 
1756 	{
1757 		uint32_t flag2_value = 0;
1758 		uint32_t flag2_targ_addr =
1759 			host_interest_item_address(target_type,
1760 			offsetof(struct host_interest_s, hi_skip_clock_init));
1761 
1762 		if ((ar900b_20_targ_clk != -1) &&
1763 			(frac != -1) && (intval != -1)) {
1764 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1765 				&flag2_value);
1766 			qdf_print("\n Setting clk_override");
1767 			flag2_value |= CLOCK_OVERRIDE;
1768 
1769 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1770 					flag2_value);
1771 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1772 		} else {
1773 			qdf_print("\n CLOCK PLL skipped");
1774 		}
1775 	}
1776 
1777 	if (target_type == TARGET_TYPE_AR900B
1778 			|| target_type == TARGET_TYPE_QCA9984
1779 			|| target_type == TARGET_TYPE_QCA9888) {
1780 
1781 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1782 		 * this would be supplied through module parameters,
1783 		 * if not supplied assumed default or same behavior as 1.0.
1784 		 * Assume 1.0 clock can't be tuned, reset to defaults
1785 		 */
1786 
1787 		qdf_print(KERN_INFO
1788 			  "%s: setting the target pll frac %x intval %x",
1789 			  __func__, frac, intval);
1790 
1791 		/* do not touch frac, and int val, let them be default -1,
1792 		 * if desired, host can supply these through module params
1793 		 */
1794 		if (frac != -1 || intval != -1) {
1795 			uint32_t flag2_value = 0;
1796 			uint32_t flag2_targ_addr;
1797 
1798 			flag2_targ_addr =
1799 				host_interest_item_address(target_type,
1800 				offsetof(struct host_interest_s,
1801 					hi_clock_info));
1802 			hif_diag_read_access(hif_hdl,
1803 				flag2_targ_addr, &flag2_value);
1804 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1805 				  flag2_value);
1806 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1807 			qdf_print("\n INT Val %x  Address %x",
1808 				  intval, flag2_value + 4);
1809 			hif_diag_write_access(hif_hdl,
1810 					flag2_value + 4, intval);
1811 		} else {
1812 			qdf_print(KERN_INFO
1813 				  "%s: no frac provided, skipping pre-configuring PLL",
1814 				  __func__);
1815 		}
1816 
1817 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1818 		if ((target_type == TARGET_TYPE_AR900B)
1819 			&& (tgt_info->target_revision == AR900B_REV_2)
1820 			&& ar900b_20_targ_clk != -1) {
1821 			uint32_t flag2_value = 0;
1822 			uint32_t flag2_targ_addr;
1823 
1824 			flag2_targ_addr
1825 				= host_interest_item_address(target_type,
1826 					offsetof(struct host_interest_s,
1827 					hi_desired_cpu_speed_hz));
1828 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1829 							&flag2_value);
1830 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1831 				  flag2_value);
1832 			hif_diag_write_access(hif_hdl, flag2_value,
1833 				ar900b_20_targ_clk/*300000000u*/);
1834 		} else if (target_type == TARGET_TYPE_QCA9888) {
1835 			uint32_t flag2_targ_addr;
1836 
1837 			if (200000000u != qca9888_20_targ_clk) {
1838 				qca9888_20_targ_clk = 300000000u;
1839 				/* Setting the target clock speed to 300 mhz */
1840 			}
1841 
1842 			flag2_targ_addr
1843 				= host_interest_item_address(target_type,
1844 					offsetof(struct host_interest_s,
1845 					hi_desired_cpu_speed_hz));
1846 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1847 				qca9888_20_targ_clk);
1848 		} else {
1849 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1850 				  __func__);
1851 		}
1852 	} else {
1853 		if (frac != -1 || intval != -1) {
1854 			uint32_t flag2_value = 0;
1855 			uint32_t flag2_targ_addr =
1856 				host_interest_item_address(target_type,
1857 					offsetof(struct host_interest_s,
1858 							hi_clock_info));
1859 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1860 						&flag2_value);
1861 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1862 				  flag2_value);
1863 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1864 			qdf_print("\n INT Val %x  Address %x", intval,
1865 				  flag2_value + 4);
1866 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1867 					      intval);
1868 		}
1869 	}
1870 }
1871 
1872 #else
1873 
1874 static void hif_set_hia_extnd(struct hif_softc *scn)
1875 {
1876 }
1877 
1878 #endif
1879 
1880 /**
1881  * hif_set_hia() - fill out the host interest area
1882  * @scn: hif context
1883  *
1884  * This is replaced by hif_wlan_enable for integrated targets.
1885  * This fills out the host interest area.  The firmware will
1886  * process these memory addresses when it is first brought out
1887  * of reset.
1888  *
1889  * Return: 0 for success.
1890  */
1891 static int hif_set_hia(struct hif_softc *scn)
1892 {
1893 	QDF_STATUS rv;
1894 	uint32_t interconnect_targ_addr = 0;
1895 	uint32_t pcie_state_targ_addr = 0;
1896 	uint32_t pipe_cfg_targ_addr = 0;
1897 	uint32_t svc_to_pipe_map = 0;
1898 	uint32_t pcie_config_flags = 0;
1899 	uint32_t flag2_value = 0;
1900 	uint32_t flag2_targ_addr = 0;
1901 #ifdef QCA_WIFI_3_0
1902 	uint32_t host_interest_area = 0;
1903 	uint8_t i;
1904 #else
1905 	uint32_t ealloc_value = 0;
1906 	uint32_t ealloc_targ_addr = 0;
1907 	uint8_t banks_switched = 1;
1908 	uint32_t chip_id;
1909 #endif
1910 	uint32_t pipe_cfg_addr;
1911 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1912 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1913 	uint32_t target_type = tgt_info->target_type;
1914 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1915 	static struct CE_pipe_config *target_ce_config;
1916 	struct service_to_pipe *target_service_to_ce_map;
1917 
1918 	HIF_TRACE("%s: E", __func__);
1919 
1920 	hif_get_target_ce_config(scn,
1921 				 &target_ce_config, &target_ce_config_sz,
1922 				 &target_service_to_ce_map,
1923 				 &target_service_to_ce_map_sz,
1924 				 NULL, NULL);
1925 
1926 	if (ADRASTEA_BU)
1927 		return QDF_STATUS_SUCCESS;
1928 
1929 #ifdef QCA_WIFI_3_0
1930 	i = 0;
1931 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1932 		host_interest_area = hif_read32_mb(scn, scn->mem +
1933 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1934 		if ((host_interest_area & 0x01) == 0) {
1935 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1936 			host_interest_area = 0;
1937 			i++;
1938 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1939 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1940 		} else {
1941 			host_interest_area &= (~0x01);
1942 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1943 			break;
1944 		}
1945 	}
1946 
1947 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1948 		HIF_ERROR("%s: hia polling timeout", __func__);
1949 		return -EIO;
1950 	}
1951 
1952 	if (host_interest_area == 0) {
1953 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1954 		return -EIO;
1955 	}
1956 
1957 	interconnect_targ_addr = host_interest_area +
1958 			offsetof(struct host_interest_area_t,
1959 			hi_interconnect_state);
1960 
1961 	flag2_targ_addr = host_interest_area +
1962 			offsetof(struct host_interest_area_t, hi_option_flag2);
1963 
1964 #else
1965 	interconnect_targ_addr = hif_hia_item_address(target_type,
1966 		offsetof(struct host_interest_s, hi_interconnect_state));
1967 	ealloc_targ_addr = hif_hia_item_address(target_type,
1968 		offsetof(struct host_interest_s, hi_early_alloc));
1969 	flag2_targ_addr = hif_hia_item_address(target_type,
1970 		offsetof(struct host_interest_s, hi_option_flag2));
1971 #endif
1972 	/* Supply Target-side CE configuration */
1973 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1974 			  &pcie_state_targ_addr);
1975 	if (rv != QDF_STATUS_SUCCESS) {
1976 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1977 			  __func__, interconnect_targ_addr, rv);
1978 		goto done;
1979 	}
1980 	if (pcie_state_targ_addr == 0) {
1981 		rv = QDF_STATUS_E_FAILURE;
1982 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1983 		goto done;
1984 	}
1985 	pipe_cfg_addr = pcie_state_targ_addr +
1986 			  offsetof(struct pcie_state_s,
1987 			  pipe_cfg_addr);
1988 	rv = hif_diag_read_access(hif_hdl,
1989 			  pipe_cfg_addr,
1990 			  &pipe_cfg_targ_addr);
1991 	if (rv != QDF_STATUS_SUCCESS) {
1992 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1993 			__func__, pipe_cfg_addr, rv);
1994 		goto done;
1995 	}
1996 	if (pipe_cfg_targ_addr == 0) {
1997 		rv = QDF_STATUS_E_FAILURE;
1998 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1999 		goto done;
2000 	}
2001 
2002 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
2003 			(uint8_t *) target_ce_config,
2004 			target_ce_config_sz);
2005 
2006 	if (rv != QDF_STATUS_SUCCESS) {
2007 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
2008 		goto done;
2009 	}
2010 
2011 	rv = hif_diag_read_access(hif_hdl,
2012 			  pcie_state_targ_addr +
2013 			  offsetof(struct pcie_state_s,
2014 			   svc_to_pipe_map),
2015 			  &svc_to_pipe_map);
2016 	if (rv != QDF_STATUS_SUCCESS) {
2017 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
2018 		goto done;
2019 	}
2020 	if (svc_to_pipe_map == 0) {
2021 		rv = QDF_STATUS_E_FAILURE;
2022 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
2023 		goto done;
2024 	}
2025 
2026 	rv = hif_diag_write_mem(hif_hdl,
2027 			svc_to_pipe_map,
2028 			(uint8_t *) target_service_to_ce_map,
2029 			target_service_to_ce_map_sz);
2030 	if (rv != QDF_STATUS_SUCCESS) {
2031 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
2032 		goto done;
2033 	}
2034 
2035 	rv = hif_diag_read_access(hif_hdl,
2036 			pcie_state_targ_addr +
2037 			offsetof(struct pcie_state_s,
2038 			config_flags),
2039 			&pcie_config_flags);
2040 	if (rv != QDF_STATUS_SUCCESS) {
2041 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
2042 		goto done;
2043 	}
2044 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
2045 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
2046 #else
2047 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2048 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
2049 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
2050 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
2051 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
2052 #endif
2053 	rv = hif_diag_write_mem(hif_hdl,
2054 			pcie_state_targ_addr +
2055 			offsetof(struct pcie_state_s,
2056 			config_flags),
2057 			(uint8_t *) &pcie_config_flags,
2058 			sizeof(pcie_config_flags));
2059 	if (rv != QDF_STATUS_SUCCESS) {
2060 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
2061 		goto done;
2062 	}
2063 
2064 #ifndef QCA_WIFI_3_0
2065 	/* configure early allocation */
2066 	ealloc_targ_addr = hif_hia_item_address(target_type,
2067 						offsetof(
2068 						struct host_interest_s,
2069 						hi_early_alloc));
2070 
2071 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
2072 			&ealloc_value);
2073 	if (rv != QDF_STATUS_SUCCESS) {
2074 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
2075 		goto done;
2076 	}
2077 
2078 	/* 1 bank is switched to IRAM, except ROME 1.0 */
2079 	ealloc_value |=
2080 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2081 		 HI_EARLY_ALLOC_MAGIC_MASK);
2082 
2083 	rv = hif_diag_read_access(hif_hdl,
2084 			  CHIP_ID_ADDRESS |
2085 			  RTC_SOC_BASE_ADDRESS, &chip_id);
2086 	if (rv != QDF_STATUS_SUCCESS) {
2087 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
2088 		goto done;
2089 	}
2090 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
2091 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
2092 		switch (CHIP_ID_REVISION_GET(chip_id)) {
2093 		case 0x2:       /* ROME 1.3 */
2094 			/* 2 banks are switched to IRAM */
2095 			banks_switched = 2;
2096 			break;
2097 		case 0x4:       /* ROME 2.1 */
2098 		case 0x5:       /* ROME 2.2 */
2099 			banks_switched = 6;
2100 			break;
2101 		case 0x8:       /* ROME 3.0 */
2102 		case 0x9:       /* ROME 3.1 */
2103 		case 0xA:       /* ROME 3.2 */
2104 			banks_switched = 9;
2105 			break;
2106 		case 0x0:       /* ROME 1.0 */
2107 		case 0x1:       /* ROME 1.1 */
2108 		default:
2109 			/* 3 banks are switched to IRAM */
2110 			banks_switched = 3;
2111 			break;
2112 		}
2113 	}
2114 
2115 	ealloc_value |=
2116 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
2117 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2118 
2119 	rv = hif_diag_write_access(hif_hdl,
2120 				ealloc_targ_addr,
2121 				ealloc_value);
2122 	if (rv != QDF_STATUS_SUCCESS) {
2123 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
2124 		goto done;
2125 	}
2126 #endif
2127 	if ((target_type == TARGET_TYPE_AR900B)
2128 			|| (target_type == TARGET_TYPE_QCA9984)
2129 			|| (target_type == TARGET_TYPE_QCA9888)
2130 			|| (target_type == TARGET_TYPE_AR9888)) {
2131 		hif_set_hia_extnd(scn);
2132 	}
2133 
2134 	/* Tell Target to proceed with initialization */
2135 	flag2_targ_addr = hif_hia_item_address(target_type,
2136 						offsetof(
2137 						struct host_interest_s,
2138 						hi_option_flag2));
2139 
2140 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
2141 			  &flag2_value);
2142 	if (rv != QDF_STATUS_SUCCESS) {
2143 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
2144 		goto done;
2145 	}
2146 
2147 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2148 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
2149 			   flag2_value);
2150 	if (rv != QDF_STATUS_SUCCESS) {
2151 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
2152 		goto done;
2153 	}
2154 
2155 	hif_wake_target_cpu(scn);
2156 
2157 done:
2158 
2159 	return rv;
2160 }
2161 
2162 /**
2163  * hif_bus_configure() - configure the pcie bus
2164  * @hif_sc: pointer to the hif context.
2165  *
2166  * return: 0 for success. nonzero for failure.
2167  */
2168 int hif_pci_bus_configure(struct hif_softc *hif_sc)
2169 {
2170 	int status = 0;
2171 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
2172 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
2173 
2174 	hif_ce_prepare_config(hif_sc);
2175 
2176 	/* initialize sleep state adjust variables */
2177 	hif_state->sleep_timer_init = true;
2178 	hif_state->keep_awake_count = 0;
2179 	hif_state->fake_sleep = false;
2180 	hif_state->sleep_ticks = 0;
2181 
2182 	qdf_timer_init(NULL, &hif_state->sleep_timer,
2183 			       hif_sleep_entry, (void *)hif_state,
2184 			       QDF_TIMER_TYPE_WAKE_APPS);
2185 	hif_state->sleep_timer_init = true;
2186 
2187 	status = hif_wlan_enable(hif_sc);
2188 	if (status) {
2189 		HIF_ERROR("%s: hif_wlan_enable error = %d",
2190 			  __func__, status);
2191 		goto timer_free;
2192 	}
2193 
2194 	A_TARGET_ACCESS_LIKELY(hif_sc);
2195 
2196 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
2197 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
2198 	    !ce_srng_based(hif_sc)) {
2199 		/*
2200 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
2201 		 * prevent sleep when we want to keep firmware always awake
2202 		 * note: when we want to keep firmware always awake,
2203 		 *       hif_target_sleep_state_adjust will point to a dummy
2204 		 *       function, and hif_pci_target_sleep_state_adjust must
2205 		 *       be called instead.
2206 		 * note: bus type check is here because AHB bus is reusing
2207 		 *       hif_pci_bus_configure code.
2208 		 */
2209 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
2210 			if (hif_pci_target_sleep_state_adjust(hif_sc,
2211 					false, true) < 0) {
2212 				status = -EACCES;
2213 				goto disable_wlan;
2214 			}
2215 		}
2216 	}
2217 
2218 	/* todo: consider replacing this with an srng field */
2219 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2220 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2221 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
2222 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
2223 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
2224 		hif_sc->per_ce_irq = true;
2225 	}
2226 
2227 	status = hif_config_ce(hif_sc);
2228 	if (status)
2229 		goto disable_wlan;
2230 
2231 	if (hif_needs_bmi(hif_osc)) {
2232 		status = hif_set_hia(hif_sc);
2233 		if (status)
2234 			goto unconfig_ce;
2235 
2236 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2237 
2238 	}
2239 
2240 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2241 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2242 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
2243 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
2244 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2245 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2246 						__func__);
2247 	else {
2248 		status = hif_configure_irq(hif_sc);
2249 		if (status < 0)
2250 			goto unconfig_ce;
2251 	}
2252 
2253 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2254 
2255 	return status;
2256 
2257 unconfig_ce:
2258 	hif_unconfig_ce(hif_sc);
2259 disable_wlan:
2260 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2261 	hif_wlan_disable(hif_sc);
2262 
2263 timer_free:
2264 	qdf_timer_stop(&hif_state->sleep_timer);
2265 	qdf_timer_free(&hif_state->sleep_timer);
2266 	hif_state->sleep_timer_init = false;
2267 
2268 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2269 	return status;
2270 }
2271 
2272 /**
2273  * hif_bus_close(): hif_bus_close
2274  *
2275  * Return: n/a
2276  */
2277 void hif_pci_close(struct hif_softc *hif_sc)
2278 {
2279 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2280 
2281 	hif_pm_runtime_close(hif_pci_sc);
2282 	hif_ce_close(hif_sc);
2283 }
2284 
2285 #define BAR_NUM 0
2286 
2287 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2288 				struct pci_dev *pdev,
2289 				const struct pci_device_id *id)
2290 {
2291 	void __iomem *mem;
2292 	int ret = 0;
2293 	uint16_t device_id = 0;
2294 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2295 
2296 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2297 	if (device_id != id->device)  {
2298 		HIF_ERROR(
2299 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2300 		   __func__, device_id, id->device);
2301 		/* pci link is down, so returing with error code */
2302 		return -EIO;
2303 	}
2304 
2305 	/* FIXME: temp. commenting out assign_resource
2306 	 * call for dev_attach to work on 2.6.38 kernel
2307 	 */
2308 #if (!defined(__LINUX_ARM_ARCH__))
2309 	if (pci_assign_resource(pdev, BAR_NUM)) {
2310 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2311 		return -EIO;
2312 	}
2313 #endif
2314 	if (pci_enable_device(pdev)) {
2315 		HIF_ERROR("%s: pci_enable_device error",
2316 			   __func__);
2317 		return -EIO;
2318 	}
2319 
2320 	/* Request MMIO resources */
2321 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2322 	if (ret) {
2323 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2324 		ret = -EIO;
2325 		goto err_region;
2326 	}
2327 
2328 #ifdef CONFIG_ARM_LPAE
2329 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2330 	 * for 32 bits device also.
2331 	 */
2332 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2333 	if (ret) {
2334 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2335 		goto err_dma;
2336 	}
2337 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2338 	if (ret) {
2339 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2340 		goto err_dma;
2341 	}
2342 #else
2343 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2344 	if (ret) {
2345 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2346 		goto err_dma;
2347 	}
2348 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2349 	if (ret) {
2350 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2351 			   __func__);
2352 		goto err_dma;
2353 	}
2354 #endif
2355 
2356 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2357 
2358 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2359 	pci_set_master(pdev);
2360 
2361 	/* Arrange for access to Target SoC registers. */
2362 	mem = pci_iomap(pdev, BAR_NUM, 0);
2363 	if (!mem) {
2364 		HIF_ERROR("%s: PCI iomap error", __func__);
2365 		ret = -EIO;
2366 		goto err_iomap;
2367 	}
2368 
2369 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
2370 
2371 	sc->mem = mem;
2372 
2373 	/* Hawkeye emulation specific change */
2374 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2375 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2376 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2377 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
2378 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
2379 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
2380 		mem = mem + 0x0c000000;
2381 		sc->mem = mem;
2382 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2383 			__func__, sc->mem);
2384 	}
2385 
2386 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2387 	ol_sc->mem = mem;
2388 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2389 	sc->pci_enabled = true;
2390 	return ret;
2391 
2392 err_iomap:
2393 	pci_clear_master(pdev);
2394 err_dma:
2395 	pci_release_region(pdev, BAR_NUM);
2396 err_region:
2397 	pci_disable_device(pdev);
2398 	return ret;
2399 }
2400 
2401 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2402 			      struct pci_dev *pdev,
2403 			      const struct pci_device_id *id)
2404 {
2405 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2406 	sc->pci_enabled = true;
2407 	return 0;
2408 }
2409 
2410 
2411 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
2412 {
2413 	pci_disable_msi(sc->pdev);
2414 	pci_iounmap(sc->pdev, sc->mem);
2415 	pci_clear_master(sc->pdev);
2416 	pci_release_region(sc->pdev, BAR_NUM);
2417 	pci_disable_device(sc->pdev);
2418 }
2419 
2420 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
2421 
2422 static void hif_disable_pci(struct hif_pci_softc *sc)
2423 {
2424 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2425 
2426 	if (!ol_sc) {
2427 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2428 		return;
2429 	}
2430 	hif_pci_device_reset(sc);
2431 	sc->hif_pci_deinit(sc);
2432 
2433 	sc->mem = NULL;
2434 	ol_sc->mem = NULL;
2435 }
2436 
2437 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2438 {
2439 	int ret = 0;
2440 	int targ_awake_limit = 500;
2441 #ifndef QCA_WIFI_3_0
2442 	uint32_t fw_indicator;
2443 #endif
2444 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2445 
2446 	/*
2447 	 * Verify that the Target was started cleanly.*
2448 	 * The case where this is most likely is with an AUX-powered
2449 	 * Target and a Host in WoW mode. If the Host crashes,
2450 	 * loses power, or is restarted (without unloading the driver)
2451 	 * then the Target is left (aux) powered and running.  On a
2452 	 * subsequent driver load, the Target is in an unexpected state.
2453 	 * We try to catch that here in order to reset the Target and
2454 	 * retry the probe.
2455 	 */
2456 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2457 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2458 	while (!hif_targ_is_awake(scn, sc->mem)) {
2459 		if (0 == targ_awake_limit) {
2460 			HIF_ERROR("%s: target awake timeout", __func__);
2461 			ret = -EAGAIN;
2462 			goto end;
2463 		}
2464 		qdf_mdelay(1);
2465 		targ_awake_limit--;
2466 	}
2467 
2468 #if PCIE_BAR0_READY_CHECKING
2469 	{
2470 		int wait_limit = 200;
2471 		/* Synchronization point: wait the BAR0 is configured */
2472 		while (wait_limit-- &&
2473 			   !(hif_read32_mb(sc, c->mem +
2474 					  PCIE_LOCAL_BASE_ADDRESS +
2475 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2476 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2477 			qdf_mdelay(10);
2478 		}
2479 		if (wait_limit < 0) {
2480 			/* AR6320v1 doesn't support checking of BAR0
2481 			 * configuration, takes one sec to wait BAR0 ready
2482 			 */
2483 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2484 				    __func__);
2485 		}
2486 	}
2487 #endif
2488 
2489 #ifndef QCA_WIFI_3_0
2490 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2491 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2492 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2493 
2494 	if (fw_indicator & FW_IND_INITIALIZED) {
2495 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2496 			   __func__);
2497 		ret = -EAGAIN;
2498 		goto end;
2499 	}
2500 #endif
2501 
2502 end:
2503 	return ret;
2504 }
2505 
2506 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2507 {
2508 	int ret = 0;
2509 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2510 	uint32_t target_type = scn->target_info.target_type;
2511 
2512 	HIF_TRACE("%s: E", __func__);
2513 
2514 	/* do notn support MSI or MSI IRQ failed */
2515 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2516 	ret = request_irq(sc->pdev->irq,
2517 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2518 			  "wlan_pci", sc);
2519 	if (ret) {
2520 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2521 		goto end;
2522 	}
2523 	scn->wake_irq = sc->pdev->irq;
2524 	/* Use sc->irq instead of sc->pdev-irq
2525 	 * platform_device pdev doesn't have an irq field
2526 	 */
2527 	sc->irq = sc->pdev->irq;
2528 	/* Use Legacy PCI Interrupts */
2529 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2530 		  PCIE_INTR_ENABLE_ADDRESS),
2531 		  HOST_GROUP0_MASK);
2532 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2533 			       PCIE_INTR_ENABLE_ADDRESS));
2534 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2535 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2536 
2537 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2538 			(target_type == TARGET_TYPE_AR900B)  ||
2539 			(target_type == TARGET_TYPE_QCA9984) ||
2540 			(target_type == TARGET_TYPE_AR9888) ||
2541 			(target_type == TARGET_TYPE_QCA9888) ||
2542 			(target_type == TARGET_TYPE_AR6320V1) ||
2543 			(target_type == TARGET_TYPE_AR6320V2) ||
2544 			(target_type == TARGET_TYPE_AR6320V3)) {
2545 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2546 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2547 	}
2548 end:
2549 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2550 			  "%s: X, ret = %d", __func__, ret);
2551 	return ret;
2552 }
2553 
2554 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2555 {
2556 	int ret;
2557 	int ce_id, irq;
2558 	uint32_t msi_data_start;
2559 	uint32_t msi_data_count;
2560 	uint32_t msi_irq_start;
2561 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2562 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2563 
2564 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2565 					    &msi_data_count, &msi_data_start,
2566 					    &msi_irq_start);
2567 	if (ret)
2568 		return ret;
2569 
2570 	/* needs to match the ce_id -> irq data mapping
2571 	 * used in the srng parameter configuration
2572 	 */
2573 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2574 		unsigned int msi_data;
2575 
2576 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2577 			continue;
2578 
2579 		if (!ce_sc->tasklets[ce_id].inited)
2580 			continue;
2581 
2582 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2583 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2584 
2585 		hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2586 			  ce_id, msi_data, irq);
2587 
2588 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2589 	}
2590 
2591 	return ret;
2592 }
2593 
2594 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2595 {
2596 	int i, j, irq;
2597 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2598 	struct hif_exec_context *hif_ext_group;
2599 
2600 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2601 		hif_ext_group = hif_state->hif_ext_group[i];
2602 		if (hif_ext_group->irq_requested) {
2603 			hif_ext_group->irq_requested = false;
2604 			for (j = 0; j < hif_ext_group->numirq; j++) {
2605 				irq = hif_ext_group->os_irq[j];
2606 				pfrm_free_irq(scn->qdf_dev->dev,
2607 					      irq, hif_ext_group);
2608 			}
2609 			hif_ext_group->numirq = 0;
2610 		}
2611 	}
2612 }
2613 
2614 /**
2615  * hif_nointrs(): disable IRQ
2616  *
2617  * This function stops interrupt(s)
2618  *
2619  * @scn: struct hif_softc
2620  *
2621  * Return: none
2622  */
2623 void hif_pci_nointrs(struct hif_softc *scn)
2624 {
2625 	int i, ret;
2626 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2627 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2628 
2629 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2630 
2631 	if (scn->request_irq_done == false)
2632 		return;
2633 
2634 	hif_pci_deconfigure_grp_irq(scn);
2635 
2636 	ret = hif_ce_srng_msi_free_irq(scn);
2637 	if (ret != -EINVAL) {
2638 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2639 
2640 		if (scn->wake_irq)
2641 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2642 		scn->wake_irq = 0;
2643 	} else if (sc->num_msi_intrs > 0) {
2644 		/* MSI interrupt(s) */
2645 		for (i = 0; i < sc->num_msi_intrs; i++)
2646 			free_irq(sc->irq + i, sc);
2647 		sc->num_msi_intrs = 0;
2648 	} else {
2649 		/* Legacy PCI line interrupt
2650 		 * Use sc->irq instead of sc->pdev-irq
2651 		 * platform_device pdev doesn't have an irq field
2652 		 */
2653 		free_irq(sc->irq, sc);
2654 	}
2655 	scn->request_irq_done = false;
2656 }
2657 
2658 static inline
2659 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2660 {
2661 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2662 		return true;
2663 	else
2664 		return false;
2665 }
2666 /**
2667  * hif_disable_bus(): hif_disable_bus
2668  *
2669  * This function disables the bus
2670  *
2671  * @bdev: bus dev
2672  *
2673  * Return: none
2674  */
2675 void hif_pci_disable_bus(struct hif_softc *scn)
2676 {
2677 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2678 	struct pci_dev *pdev;
2679 	void __iomem *mem;
2680 	struct hif_target_info *tgt_info = &scn->target_info;
2681 
2682 	/* Attach did not succeed, all resources have been
2683 	 * freed in error handler
2684 	 */
2685 	if (!sc)
2686 		return;
2687 
2688 	pdev = sc->pdev;
2689 	if (hif_pci_default_link_up(tgt_info)) {
2690 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2691 
2692 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2693 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2694 			       HOST_GROUP0_MASK);
2695 	}
2696 
2697 #if defined(CPU_WARM_RESET_WAR)
2698 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2699 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2700 	 * verified for AR9888_REV1
2701 	 */
2702 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2703 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2704 		hif_pci_device_warm_reset(sc);
2705 	else
2706 		hif_pci_device_reset(sc);
2707 #else
2708 	hif_pci_device_reset(sc);
2709 #endif
2710 	mem = (void __iomem *)sc->mem;
2711 	if (mem) {
2712 		hif_dump_pipe_debug_count(scn);
2713 		if (scn->athdiag_procfs_inited) {
2714 			athdiag_procfs_remove();
2715 			scn->athdiag_procfs_inited = false;
2716 		}
2717 		sc->hif_pci_deinit(sc);
2718 		scn->mem = NULL;
2719 	}
2720 	HIF_INFO("%s: X", __func__);
2721 }
2722 
2723 #define OL_ATH_PCI_PM_CONTROL 0x44
2724 
2725 #ifdef FEATURE_RUNTIME_PM
2726 /**
2727  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2728  * @scn: hif context
2729  * @flag: prevent linkdown if true otherwise allow
2730  *
2731  * this api should only be called as part of bus prevent linkdown
2732  */
2733 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2734 {
2735 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2736 
2737 	if (flag)
2738 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2739 	else
2740 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2741 }
2742 #else
2743 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2744 {
2745 }
2746 #endif
2747 
2748 #if defined(CONFIG_PCI_MSM)
2749 /**
2750  * hif_bus_prevent_linkdown(): allow or permit linkdown
2751  * @flag: true prevents linkdown, false allows
2752  *
2753  * Calls into the platform driver to vote against taking down the
2754  * pcie link.
2755  *
2756  * Return: n/a
2757  */
2758 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2759 {
2760 	int errno;
2761 
2762 	HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2763 	hif_runtime_prevent_linkdown(scn, flag);
2764 
2765 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2766 	if (errno)
2767 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2768 			  __func__, errno);
2769 }
2770 #else
2771 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2772 {
2773 	HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
2774 	hif_runtime_prevent_linkdown(scn, flag);
2775 }
2776 #endif
2777 
2778 /**
2779  * hif_pci_bus_suspend(): prepare hif for suspend
2780  *
2781  * Return: Errno
2782  */
2783 int hif_pci_bus_suspend(struct hif_softc *scn)
2784 {
2785 	QDF_STATUS ret;
2786 
2787 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2788 
2789 	ret = hif_try_complete_tasks(scn);
2790 	if (QDF_IS_STATUS_ERROR(ret)) {
2791 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2792 		return -EBUSY;
2793 	}
2794 
2795 	/* Stop the HIF Sleep Timer */
2796 	hif_cancel_deferred_target_sleep(scn);
2797 
2798 	return 0;
2799 }
2800 
2801 /**
2802  * __hif_check_link_status() - API to check if PCIe link is active/not
2803  * @scn: HIF Context
2804  *
2805  * API reads the PCIe config space to verify if PCIe link training is
2806  * successful or not.
2807  *
2808  * Return: Success/Failure
2809  */
2810 static int __hif_check_link_status(struct hif_softc *scn)
2811 {
2812 	uint16_t dev_id = 0;
2813 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2814 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2815 
2816 	if (!sc) {
2817 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2818 		return -EINVAL;
2819 	}
2820 
2821 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2822 
2823 	if (dev_id == sc->devid)
2824 		return 0;
2825 
2826 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2827 	       __func__, dev_id);
2828 
2829 	scn->recovery = true;
2830 
2831 	if (cbk && cbk->set_recovery_in_progress)
2832 		cbk->set_recovery_in_progress(cbk->context, true);
2833 	else
2834 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2835 
2836 	pld_is_pci_link_down(sc->dev);
2837 	return -EACCES;
2838 }
2839 
2840 /**
2841  * hif_pci_bus_resume(): prepare hif for resume
2842  *
2843  * Return: Errno
2844  */
2845 int hif_pci_bus_resume(struct hif_softc *scn)
2846 {
2847 	int errno;
2848 
2849 	errno = __hif_check_link_status(scn);
2850 	if (errno)
2851 		return errno;
2852 
2853 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2854 
2855 	return 0;
2856 }
2857 
2858 /**
2859  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2860  * @scn: hif context
2861  *
2862  * Ensure that if we received the wakeup message before the irq
2863  * was disabled that the message is pocessed before suspending.
2864  *
2865  * Return: -EBUSY if we fail to flush the tasklets.
2866  */
2867 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2868 {
2869 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2870 		qdf_atomic_set(&scn->link_suspended, 1);
2871 
2872 	hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
2873 
2874 	return 0;
2875 }
2876 
2877 /**
2878  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2879  * @scn: hif context
2880  *
2881  * Ensure that if we received the wakeup message before the irq
2882  * was disabled that the message is pocessed before suspending.
2883  *
2884  * Return: -EBUSY if we fail to flush the tasklets.
2885  */
2886 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2887 {
2888 	hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
2889 
2890 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2891 		qdf_atomic_set(&scn->link_suspended, 0);
2892 
2893 	return 0;
2894 }
2895 
2896 #ifdef FEATURE_RUNTIME_PM
2897 /**
2898  * __hif_runtime_pm_set_state(): utility function
2899  * @state: state to set
2900  *
2901  * indexes into the runtime pm state and sets it.
2902  */
2903 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2904 				enum hif_pm_runtime_state state)
2905 {
2906 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2907 
2908 	if (!sc) {
2909 		HIF_ERROR("%s: HIF_CTX not initialized",
2910 		       __func__);
2911 		return;
2912 	}
2913 
2914 	qdf_atomic_set(&sc->pm_state, state);
2915 }
2916 
2917 /**
2918  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2919  *
2920  * Notify hif that a the runtime pm state should be on
2921  */
2922 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2923 {
2924 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2925 }
2926 
2927 /**
2928  * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
2929  *
2930  * Notify hif that a runtime pm resuming has started
2931  */
2932 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
2933 {
2934 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
2935 }
2936 
2937 /**
2938  * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
2939  *
2940  * Notify hif that a runtime pm suspend has started
2941  */
2942 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
2943 {
2944 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
2945 }
2946 
2947 /**
2948  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2949  *
2950  * Notify hif that a runtime suspend attempt has been completed successfully
2951  */
2952 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2953 {
2954 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2955 }
2956 
2957 /**
2958  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2959  */
2960 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2961 {
2962 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2963 
2964 	if (!sc)
2965 		return;
2966 
2967 	sc->pm_stats.suspended++;
2968 	sc->pm_stats.suspend_jiffies = jiffies;
2969 }
2970 
2971 /**
2972  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2973  *
2974  * log a failed runtime suspend
2975  * mark last busy to prevent immediate runtime suspend
2976  */
2977 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2978 {
2979 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2980 
2981 	if (!sc)
2982 		return;
2983 
2984 	sc->pm_stats.suspend_err++;
2985 }
2986 
2987 /**
2988  * hif_log_runtime_resume_success() - log a successful runtime resume
2989  *
2990  * log a successful runtime resume
2991  * mark last busy to prevent immediate runtime suspend
2992  */
2993 static void hif_log_runtime_resume_success(void *hif_ctx)
2994 {
2995 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2996 
2997 	if (!sc)
2998 		return;
2999 
3000 	sc->pm_stats.resumed++;
3001 }
3002 
3003 /**
3004  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
3005  *
3006  * Record the failure.
3007  * mark last busy to delay a retry.
3008  * adjust the runtime_pm state.
3009  */
3010 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
3011 {
3012 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3013 
3014 	hif_log_runtime_suspend_failure(hif_ctx);
3015 	hif_pm_runtime_mark_last_busy(hif_ctx);
3016 	hif_runtime_pm_set_state_on(scn);
3017 }
3018 
3019 /**
3020  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
3021  *
3022  * Makes sure that the pci link will be taken down by the suspend opperation.
3023  * If the hif layer is configured to leave the bus on, runtime suspend will
3024  * not save any power.
3025  *
3026  * Set the runtime suspend state to in progress.
3027  *
3028  * return -EINVAL if the bus won't go down.  otherwise return 0
3029  */
3030 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
3031 {
3032 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3033 
3034 	if (!hif_can_suspend_link(hif_ctx)) {
3035 		HIF_ERROR("Runtime PM not supported for link up suspend");
3036 		return -EINVAL;
3037 	}
3038 
3039 	hif_runtime_pm_set_state_suspending(scn);
3040 	return 0;
3041 }
3042 
3043 /**
3044  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
3045  *
3046  * Record the success.
3047  * adjust the runtime_pm state
3048  */
3049 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
3050 {
3051 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3052 
3053 	hif_runtime_pm_set_state_suspended(scn);
3054 	hif_log_runtime_suspend_success(scn);
3055 }
3056 
3057 /**
3058  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
3059  *
3060  * update the runtime pm state.
3061  */
3062 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
3063 {
3064 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3065 
3066 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
3067 	hif_runtime_pm_set_state_resuming(scn);
3068 }
3069 
3070 /**
3071  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
3072  *
3073  * record the success.
3074  * adjust the runtime_pm state
3075  */
3076 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
3077 {
3078 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3079 
3080 	hif_log_runtime_resume_success(hif_ctx);
3081 	hif_pm_runtime_mark_last_busy(hif_ctx);
3082 	hif_runtime_pm_set_state_on(scn);
3083 }
3084 
3085 /**
3086  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
3087  *
3088  * Return: 0 for success and non-zero error code for failure
3089  */
3090 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
3091 {
3092 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3093 	int errno;
3094 
3095 	errno = hif_bus_suspend(hif_ctx);
3096 	if (errno) {
3097 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
3098 		return errno;
3099 	}
3100 
3101 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
3102 
3103 	errno = hif_bus_suspend_noirq(hif_ctx);
3104 	if (errno) {
3105 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
3106 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
3107 		goto bus_resume;
3108 	}
3109 
3110 	qdf_atomic_set(&sc->pm_dp_rx_busy, 0);
3111 
3112 	return 0;
3113 
3114 bus_resume:
3115 	QDF_BUG(!hif_bus_resume(hif_ctx));
3116 
3117 	return errno;
3118 }
3119 
3120 /**
3121  * hif_fastpath_resume() - resume fastpath for runtimepm
3122  *
3123  * ensure that the fastpath write index register is up to date
3124  * since runtime pm may cause ce_send_fast to skip the register
3125  * write.
3126  *
3127  * fastpath only applicable to legacy copy engine
3128  */
3129 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
3130 {
3131 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3132 	struct CE_state *ce_state;
3133 
3134 	if (!scn)
3135 		return;
3136 
3137 	if (scn->fastpath_mode_on) {
3138 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3139 			return;
3140 
3141 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
3142 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
3143 
3144 		/*war_ce_src_ring_write_idx_set */
3145 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
3146 				ce_state->src_ring->write_index);
3147 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
3148 		Q_TARGET_ACCESS_END(scn);
3149 	}
3150 }
3151 
3152 /**
3153  * hif_runtime_resume() - do the bus resume part of a runtime resume
3154  *
3155  *  Return: 0 for success and non-zero error code for failure
3156  */
3157 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
3158 {
3159 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
3160 	QDF_BUG(!hif_bus_resume(hif_ctx));
3161 	return 0;
3162 }
3163 #endif /* #ifdef FEATURE_RUNTIME_PM */
3164 
3165 #if CONFIG_PCIE_64BIT_MSI
3166 static void hif_free_msi_ctx(struct hif_softc *scn)
3167 {
3168 	struct hif_pci_softc *sc = scn->hif_sc;
3169 	struct hif_msi_info *info = &sc->msi_info;
3170 	struct device *dev = scn->qdf_dev->dev;
3171 
3172 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
3173 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
3174 	info->magic = NULL;
3175 	info->magic_dma = 0;
3176 }
3177 #else
3178 static void hif_free_msi_ctx(struct hif_softc *scn)
3179 {
3180 }
3181 #endif
3182 
3183 void hif_pci_disable_isr(struct hif_softc *scn)
3184 {
3185 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3186 
3187 	hif_exec_kill(&scn->osc);
3188 	hif_nointrs(scn);
3189 	hif_free_msi_ctx(scn);
3190 	/* Cancel the pending tasklet */
3191 	ce_tasklet_kill(scn);
3192 	tasklet_kill(&sc->intr_tq);
3193 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
3194 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
3195 }
3196 
3197 /* Function to reset SoC */
3198 void hif_pci_reset_soc(struct hif_softc *hif_sc)
3199 {
3200 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
3201 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
3202 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
3203 
3204 #if defined(CPU_WARM_RESET_WAR)
3205 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
3206 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
3207 	 * verified for AR9888_REV1
3208 	 */
3209 	if (tgt_info->target_version == AR9888_REV2_VERSION)
3210 		hif_pci_device_warm_reset(sc);
3211 	else
3212 		hif_pci_device_reset(sc);
3213 #else
3214 	hif_pci_device_reset(sc);
3215 #endif
3216 }
3217 
3218 #ifdef CONFIG_PCI_MSM
3219 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
3220 {
3221 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
3222 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
3223 }
3224 #else
3225 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3226 #endif
3227 
3228 /**
3229  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3230  * @sc: HIF PCIe Context
3231  *
3232  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3233  *
3234  * Return: Failure to caller
3235  */
3236 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3237 {
3238 	uint16_t val = 0;
3239 	uint32_t bar = 0;
3240 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3241 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3242 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3243 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
3244 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
3245 	A_target_id_t pci_addr = scn->mem;
3246 
3247 	HIF_ERROR("%s: keep_awake_count = %d",
3248 			__func__, hif_state->keep_awake_count);
3249 
3250 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3251 
3252 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3253 
3254 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3255 
3256 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3257 
3258 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
3259 
3260 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3261 
3262 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
3263 
3264 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3265 
3266 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3267 
3268 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3269 
3270 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3271 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3272 						PCIE_SOC_WAKE_ADDRESS));
3273 
3274 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3275 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3276 							RTC_STATE_ADDRESS));
3277 
3278 	HIF_ERROR("%s:error, wakeup target", __func__);
3279 	hif_msm_pcie_debug_info(sc);
3280 
3281 	if (!cfg->enable_self_recovery)
3282 		QDF_BUG(0);
3283 
3284 	scn->recovery = true;
3285 
3286 	if (cbk->set_recovery_in_progress)
3287 		cbk->set_recovery_in_progress(cbk->context, true);
3288 
3289 	pld_is_pci_link_down(sc->dev);
3290 	return -EACCES;
3291 }
3292 
3293 /*
3294  * For now, we use simple on-demand sleep/wake.
3295  * Some possible improvements:
3296  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3297  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3298  *   Careful, though, these functions may be used by
3299  *  interrupt handlers ("atomic")
3300  *  -Don't use host_reg_table for this code; instead use values directly
3301  *  -Use a separate timer to track activity and allow Target to sleep only
3302  *   if it hasn't done anything for a while; may even want to delay some
3303  *   processing for a short while in order to "batch" (e.g.) transmit
3304  *   requests with completion processing into "windows of up time".  Costs
3305  *   some performance, but improves power utilization.
3306  *  -On some platforms, it might be possible to eliminate explicit
3307  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3308  *   recover from the failure by forcing the Target awake.
3309  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3310  *   overhead in some cases. Perhaps this makes more sense when
3311  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3312  *   disabled.
3313  *  -It is possible to compile this code out and simply force the Target
3314  *   to remain awake.  That would yield optimal performance at the cost of
3315  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3316  *
3317  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3318  */
3319 /**
3320  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3321  * @scn: hif_softc pointer.
3322  * @sleep_ok: bool
3323  * @wait_for_it: bool
3324  *
3325  * Output the pipe error counts of each pipe to log file
3326  *
3327  * Return: int
3328  */
3329 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3330 			      bool sleep_ok, bool wait_for_it)
3331 {
3332 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3333 	A_target_id_t pci_addr = scn->mem;
3334 	static int max_delay;
3335 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3336 	static int debug;
3337 	if (scn->recovery)
3338 		return -EACCES;
3339 
3340 	if (qdf_atomic_read(&scn->link_suspended)) {
3341 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3342 		debug = true;
3343 		QDF_ASSERT(0);
3344 		return -EACCES;
3345 	}
3346 
3347 	if (debug) {
3348 		wait_for_it = true;
3349 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3350 				__func__);
3351 		QDF_ASSERT(0);
3352 	}
3353 
3354 	if (sleep_ok) {
3355 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3356 		hif_state->keep_awake_count--;
3357 		if (hif_state->keep_awake_count == 0) {
3358 			/* Allow sleep */
3359 			hif_state->verified_awake = false;
3360 			hif_state->sleep_ticks = qdf_system_ticks();
3361 		}
3362 		if (hif_state->fake_sleep == false) {
3363 			/* Set the Fake Sleep */
3364 			hif_state->fake_sleep = true;
3365 
3366 			/* Start the Sleep Timer */
3367 			qdf_timer_stop(&hif_state->sleep_timer);
3368 			qdf_timer_start(&hif_state->sleep_timer,
3369 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3370 		}
3371 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3372 	} else {
3373 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3374 
3375 		if (hif_state->fake_sleep) {
3376 			hif_state->verified_awake = true;
3377 		} else {
3378 			if (hif_state->keep_awake_count == 0) {
3379 				/* Force AWAKE */
3380 				hif_write32_mb(sc, pci_addr +
3381 					      PCIE_LOCAL_BASE_ADDRESS +
3382 					      PCIE_SOC_WAKE_ADDRESS,
3383 					      PCIE_SOC_WAKE_V_MASK);
3384 			}
3385 		}
3386 		hif_state->keep_awake_count++;
3387 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3388 
3389 		if (wait_for_it && !hif_state->verified_awake) {
3390 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3391 			int tot_delay = 0;
3392 			int curr_delay = 5;
3393 
3394 			for (;; ) {
3395 				if (hif_targ_is_awake(scn, pci_addr)) {
3396 					hif_state->verified_awake = true;
3397 					break;
3398 				}
3399 				if (!hif_pci_targ_is_present(scn, pci_addr))
3400 					break;
3401 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3402 					return hif_log_soc_wakeup_timeout(sc);
3403 
3404 				OS_DELAY(curr_delay);
3405 				tot_delay += curr_delay;
3406 
3407 				if (curr_delay < 50)
3408 					curr_delay += 5;
3409 			}
3410 
3411 			/*
3412 			 * NB: If Target has to come out of Deep Sleep,
3413 			 * this may take a few Msecs. Typically, though
3414 			 * this delay should be <30us.
3415 			 */
3416 			if (tot_delay > max_delay)
3417 				max_delay = tot_delay;
3418 		}
3419 	}
3420 
3421 	if (debug && hif_state->verified_awake) {
3422 		debug = 0;
3423 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3424 			__func__,
3425 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3426 				PCIE_INTR_ENABLE_ADDRESS),
3427 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3428 				PCIE_INTR_CAUSE_ADDRESS),
3429 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3430 				CPU_INTR_ADDRESS),
3431 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3432 				PCIE_INTR_CLR_ADDRESS),
3433 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3434 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3435 	}
3436 
3437 	return 0;
3438 }
3439 
3440 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3441 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3442 {
3443 	uint32_t value;
3444 	void *addr;
3445 
3446 	addr = scn->mem + offset;
3447 	value = hif_read32_mb(scn, addr);
3448 
3449 	{
3450 		unsigned long irq_flags;
3451 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3452 
3453 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3454 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3455 		pcie_access_log[idx].is_write = false;
3456 		pcie_access_log[idx].addr = addr;
3457 		pcie_access_log[idx].value = value;
3458 		pcie_access_log_seqnum++;
3459 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3460 	}
3461 
3462 	return value;
3463 }
3464 
3465 void
3466 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3467 {
3468 	void *addr;
3469 
3470 	addr = scn->mem + (offset);
3471 	hif_write32_mb(scn, addr, value);
3472 
3473 	{
3474 		unsigned long irq_flags;
3475 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3476 
3477 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3478 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3479 		pcie_access_log[idx].is_write = true;
3480 		pcie_access_log[idx].addr = addr;
3481 		pcie_access_log[idx].value = value;
3482 		pcie_access_log_seqnum++;
3483 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3484 	}
3485 }
3486 
3487 /**
3488  * hif_target_dump_access_log() - dump access log
3489  *
3490  * dump access log
3491  *
3492  * Return: n/a
3493  */
3494 void hif_target_dump_access_log(void)
3495 {
3496 	int idx, len, start_idx, cur_idx;
3497 	unsigned long irq_flags;
3498 
3499 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3500 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3501 		len = PCIE_ACCESS_LOG_NUM;
3502 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3503 	} else {
3504 		len = pcie_access_log_seqnum;
3505 		start_idx = 0;
3506 	}
3507 
3508 	for (idx = 0; idx < len; idx++) {
3509 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3510 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3511 		       __func__, idx,
3512 		       pcie_access_log[cur_idx].seqnum,
3513 		       pcie_access_log[cur_idx].is_write,
3514 		       pcie_access_log[cur_idx].addr,
3515 		       pcie_access_log[cur_idx].value);
3516 	}
3517 
3518 	pcie_access_log_seqnum = 0;
3519 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3520 }
3521 #endif
3522 
3523 #ifndef HIF_AHB
3524 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3525 {
3526 	QDF_BUG(0);
3527 	return -EINVAL;
3528 }
3529 
3530 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3531 {
3532 	QDF_BUG(0);
3533 	return -EINVAL;
3534 }
3535 #endif
3536 
3537 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3538 {
3539 	struct ce_tasklet_entry *tasklet_entry = context;
3540 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3541 }
3542 extern const char *ce_name[];
3543 
3544 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3545 {
3546 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3547 
3548 	return pci_scn->ce_msi_irq_num[ce_id];
3549 }
3550 
3551 /* hif_srng_msi_irq_disable() - disable the irq for msi
3552  * @hif_sc: hif context
3553  * @ce_id: which ce to disable copy complete interrupts for
3554  *
3555  * since MSI interrupts are not level based, the system can function
3556  * without disabling these interrupts.  Interrupt mitigation can be
3557  * added here for better system performance.
3558  */
3559 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3560 {
3561 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
3562 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3563 }
3564 
3565 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3566 {
3567 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
3568 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3569 }
3570 
3571 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3572 {
3573 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3574 }
3575 
3576 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3577 {
3578 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3579 }
3580 
3581 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3582 {
3583 	int ret;
3584 	int ce_id, irq;
3585 	uint32_t msi_data_start;
3586 	uint32_t msi_data_count;
3587 	uint32_t msi_irq_start;
3588 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3589 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3590 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
3591 	int pci_slot;
3592 
3593 	if (!scn->disable_wake_irq) {
3594 		/* do wake irq assignment */
3595 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3596 						  &msi_data_count,
3597 						  &msi_data_start,
3598 						  &msi_irq_start);
3599 		if (ret)
3600 			return ret;
3601 
3602 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
3603 						msi_irq_start);
3604 
3605 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
3606 				       hif_wake_interrupt_handler,
3607 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
3608 
3609 		if (ret)
3610 			return ret;
3611 	}
3612 
3613 	/* do ce irq assignments */
3614 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3615 					    &msi_data_count, &msi_data_start,
3616 					    &msi_irq_start);
3617 	if (ret)
3618 		goto free_wake_irq;
3619 
3620 	if (ce_srng_based(scn)) {
3621 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3622 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3623 	} else {
3624 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3625 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3626 	}
3627 
3628 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3629 
3630 	/* needs to match the ce_id -> irq data mapping
3631 	 * used in the srng parameter configuration
3632 	 */
3633 	pci_slot = hif_get_pci_slot(scn);
3634 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3635 		unsigned int msi_data = (ce_id % msi_data_count) +
3636 			msi_irq_start;
3637 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3638 			continue;
3639 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3640 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3641 			 __func__, ce_id, msi_data, irq,
3642 			 &ce_sc->tasklets[ce_id]);
3643 
3644 		/* implies the ce is also initialized */
3645 		if (!ce_sc->tasklets[ce_id].inited)
3646 			continue;
3647 
3648 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3649 		ret = pfrm_request_irq(scn->qdf_dev->dev,
3650 				       irq, hif_ce_interrupt_handler,
3651 				       IRQF_SHARED,
3652 				       ce_irqname[pci_slot][ce_id],
3653 				       &ce_sc->tasklets[ce_id]);
3654 		if (ret)
3655 			goto free_irq;
3656 	}
3657 
3658 	return ret;
3659 
3660 free_irq:
3661 	/* the request_irq for the last ce_id failed so skip it. */
3662 	while (ce_id > 0 && ce_id < scn->ce_count) {
3663 		unsigned int msi_data;
3664 
3665 		ce_id--;
3666 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
3667 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3668 		pfrm_free_irq(scn->qdf_dev->dev,
3669 			      irq, &ce_sc->tasklets[ce_id]);
3670 	}
3671 
3672 free_wake_irq:
3673 	if (!scn->disable_wake_irq) {
3674 		pfrm_free_irq(scn->qdf_dev->dev,
3675 			      scn->wake_irq, scn->qdf_dev->dev);
3676 		scn->wake_irq = 0;
3677 	}
3678 
3679 	return ret;
3680 }
3681 
3682 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3683 {
3684 	int i;
3685 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3686 
3687 	for (i = 0; i < hif_ext_group->numirq; i++)
3688 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
3689 					hif_ext_group->os_irq[i]);
3690 }
3691 
3692 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3693 {
3694 	int i;
3695 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3696 
3697 	for (i = 0; i < hif_ext_group->numirq; i++)
3698 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
3699 }
3700 
3701 /**
3702  * hif_pci_get_irq_name() - get irqname
3703  * This function gives irqnumber to irqname
3704  * mapping.
3705  *
3706  * @irq_no: irq number
3707  *
3708  * Return: irq name
3709  */
3710 const char *hif_pci_get_irq_name(int irq_no)
3711 {
3712 	return "pci-dummy";
3713 }
3714 
3715 #ifdef HIF_CPU_PERF_AFFINE_MASK
3716 /**
3717  * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity
3718  * @hif_ext_group: hif_ext_group to extract the irq info
3719  *
3720  * This function will set the IRQ affinity to the gold cores
3721  * only for defconfig builds
3722  *
3723  * @hif_ext_group: hif_ext_group to extract the irq info
3724  *
3725  * Return: none
3726  */
3727 void hif_pci_irq_set_affinity_hint(
3728 	struct hif_exec_context *hif_ext_group)
3729 {
3730 	int i, ret;
3731 	unsigned int cpus;
3732 	bool mask_set = false;
3733 
3734 	for (i = 0; i < hif_ext_group->numirq; i++)
3735 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
3736 
3737 	for (i = 0; i < hif_ext_group->numirq; i++) {
3738 		qdf_for_each_online_cpu(cpus) {
3739 			if (qdf_topology_physical_package_id(cpus) ==
3740 				CPU_CLUSTER_TYPE_PERF) {
3741 				qdf_cpumask_set_cpu(cpus,
3742 						    &hif_ext_group->
3743 						    new_cpu_mask[i]);
3744 				mask_set = true;
3745 			}
3746 		}
3747 	}
3748 	for (i = 0; i < hif_ext_group->numirq; i++) {
3749 		if (mask_set) {
3750 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3751 						  IRQ_NO_BALANCING, 0);
3752 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3753 						       (struct qdf_cpu_mask *)
3754 						       &hif_ext_group->
3755 						       new_cpu_mask[i]);
3756 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3757 						  0, IRQ_NO_BALANCING);
3758 			if (ret)
3759 				qdf_err("Set affinity %*pbl fails for IRQ %d ",
3760 					qdf_cpumask_pr_args(&hif_ext_group->
3761 							    new_cpu_mask[i]),
3762 					hif_ext_group->os_irq[i]);
3763 			else
3764 				qdf_debug("Set affinity %*pbl for IRQ: %d",
3765 					  qdf_cpumask_pr_args(&hif_ext_group->
3766 							      new_cpu_mask[i]),
3767 					  hif_ext_group->os_irq[i]);
3768 		} else {
3769 			qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
3770 				hif_ext_group->os_irq[i]);
3771 		}
3772 	}
3773 }
3774 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3775 
3776 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3777 {
3778 	int i;
3779 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3780 	struct hif_exec_context *hif_ext_group;
3781 
3782 	hif_core_ctl_set_boost(true);
3783 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3784 		hif_ext_group = hif_state->hif_ext_group[i];
3785 		hif_pci_irq_set_affinity_hint(hif_ext_group);
3786 	}
3787 }
3788 
3789 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3790 			      struct hif_exec_context *hif_ext_group)
3791 {
3792 	int ret = 0;
3793 	int irq = 0;
3794 	int j;
3795 	int pci_slot;
3796 
3797 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3798 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3799 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3800 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3801 
3802 	pci_slot = hif_get_pci_slot(scn);
3803 	for (j = 0; j < hif_ext_group->numirq; j++) {
3804 		irq = hif_ext_group->irq[j];
3805 
3806 		hif_info("request_irq = %d for grp %d",
3807 			 irq, hif_ext_group->grp_id);
3808 		ret = pfrm_request_irq(
3809 				scn->qdf_dev->dev, irq,
3810 				hif_ext_group_interrupt_handler,
3811 				IRQF_SHARED | IRQF_NO_SUSPEND,
3812 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3813 				hif_ext_group);
3814 		if (ret) {
3815 			HIF_ERROR("%s: request_irq failed ret = %d",
3816 				  __func__, ret);
3817 			return -EFAULT;
3818 		}
3819 		hif_ext_group->os_irq[j] = irq;
3820 	}
3821 	hif_ext_group->irq_requested = true;
3822 	return 0;
3823 }
3824 
3825 /**
3826  * hif_configure_irq() - configure interrupt
3827  *
3828  * This function configures interrupt(s)
3829  *
3830  * @sc: PCIe control struct
3831  * @hif_hdl: struct HIF_CE_state
3832  *
3833  * Return: 0 - for success
3834  */
3835 int hif_configure_irq(struct hif_softc *scn)
3836 {
3837 	int ret = 0;
3838 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3839 
3840 	HIF_TRACE("%s: E", __func__);
3841 
3842 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3843 		scn->request_irq_done = false;
3844 		return 0;
3845 	}
3846 
3847 	hif_init_reschedule_tasklet_work(sc);
3848 
3849 	ret = hif_ce_msi_configure_irq(scn);
3850 	if (ret == 0) {
3851 		goto end;
3852 	}
3853 
3854 	switch (scn->target_info.target_type) {
3855 	case TARGET_TYPE_IPQ4019:
3856 		ret = hif_ahb_configure_legacy_irq(sc);
3857 		break;
3858 	case TARGET_TYPE_QCA8074:
3859 	case TARGET_TYPE_QCA8074V2:
3860 	case TARGET_TYPE_QCA6018:
3861 	case TARGET_TYPE_QCA5018:
3862 		ret = hif_ahb_configure_irq(sc);
3863 		break;
3864 	default:
3865 		ret = hif_pci_configure_legacy_irq(sc);
3866 		break;
3867 	}
3868 	if (ret < 0) {
3869 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3870 			__func__, ret);
3871 		return ret;
3872 	}
3873 end:
3874 	scn->request_irq_done = true;
3875 	return 0;
3876 }
3877 
3878 /**
3879  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3880  * @scn: hif control structure
3881  *
3882  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3883  * stuck at a polling loop in pcie_address_config in FW
3884  *
3885  * Return: none
3886  */
3887 static void hif_trigger_timer_irq(struct hif_softc *scn)
3888 {
3889 	int tmp;
3890 	/* Trigger IRQ on Peregrine/Swift by setting
3891 	 * IRQ Bit of LF_TIMER 0
3892 	 */
3893 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3894 						SOC_LF_TIMER_STATUS0_ADDRESS));
3895 	/* Set Raw IRQ Bit */
3896 	tmp |= 1;
3897 	/* SOC_LF_TIMER_STATUS0 */
3898 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3899 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3900 }
3901 
3902 /**
3903  * hif_target_sync() : ensure the target is ready
3904  * @scn: hif control structure
3905  *
3906  * Informs fw that we plan to use legacy interupts so that
3907  * it can begin booting. Ensures that the fw finishes booting
3908  * before continuing. Should be called before trying to write
3909  * to the targets other registers for the first time.
3910  *
3911  * Return: none
3912  */
3913 static void hif_target_sync(struct hif_softc *scn)
3914 {
3915 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3916 			    PCIE_INTR_ENABLE_ADDRESS),
3917 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3918 	/* read to flush pcie write */
3919 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3920 			PCIE_INTR_ENABLE_ADDRESS));
3921 
3922 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3923 			PCIE_SOC_WAKE_ADDRESS,
3924 			PCIE_SOC_WAKE_V_MASK);
3925 	while (!hif_targ_is_awake(scn, scn->mem))
3926 		;
3927 
3928 	if (HAS_FW_INDICATOR) {
3929 		int wait_limit = 500;
3930 		int fw_ind = 0;
3931 		int retry_count = 0;
3932 		uint32_t target_type = scn->target_info.target_type;
3933 fw_retry:
3934 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3935 		while (1) {
3936 			fw_ind = hif_read32_mb(scn, scn->mem +
3937 					FW_INDICATOR_ADDRESS);
3938 			if (fw_ind & FW_IND_INITIALIZED)
3939 				break;
3940 			if (wait_limit-- < 0)
3941 				break;
3942 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3943 			    PCIE_INTR_ENABLE_ADDRESS),
3944 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3945 			    /* read to flush pcie write */
3946 			(void)hif_read32_mb(scn, scn->mem +
3947 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3948 
3949 			qdf_mdelay(10);
3950 		}
3951 		if (wait_limit < 0) {
3952 			if (target_type == TARGET_TYPE_AR9888 &&
3953 			    retry_count++ < 2) {
3954 				hif_trigger_timer_irq(scn);
3955 				wait_limit = 500;
3956 				goto fw_retry;
3957 			}
3958 			HIF_TRACE("%s: FW signal timed out",
3959 					__func__);
3960 			qdf_assert_always(0);
3961 		} else {
3962 			HIF_TRACE("%s: Got FW signal, retries = %x",
3963 					__func__, 500-wait_limit);
3964 		}
3965 	}
3966 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3967 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3968 }
3969 
3970 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3971 				     struct device *dev)
3972 {
3973 	struct pld_soc_info info;
3974 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3975 
3976 	pld_get_soc_info(dev, &info);
3977 	sc->mem = info.v_addr;
3978 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3979 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3980 	scn->target_info.target_version = info.soc_id;
3981 	scn->target_info.target_revision = 0;
3982 }
3983 
3984 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3985 				       struct device *dev)
3986 {}
3987 
3988 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3989 				    int device_id)
3990 {
3991 	if (!pld_have_platform_driver_support(sc->dev))
3992 		return false;
3993 
3994 	switch (device_id) {
3995 	case QCA6290_DEVICE_ID:
3996 	case QCN9000_DEVICE_ID:
3997 	case QCA6290_EMULATION_DEVICE_ID:
3998 	case QCA6390_DEVICE_ID:
3999 	case QCA6490_DEVICE_ID:
4000 	case AR6320_DEVICE_ID:
4001 	case QCN7605_DEVICE_ID:
4002 		return true;
4003 	}
4004 	return false;
4005 }
4006 
4007 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
4008 					   int device_id)
4009 {
4010 	if (hif_is_pld_based_target(sc, device_id)) {
4011 		sc->hif_enable_pci = hif_enable_pci_pld;
4012 		sc->hif_pci_deinit = hif_pci_deinit_pld;
4013 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
4014 	} else {
4015 		sc->hif_enable_pci = hif_enable_pci_nopld;
4016 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
4017 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
4018 	}
4019 }
4020 
4021 #ifdef HIF_REG_WINDOW_SUPPORT
4022 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
4023 					       u32 target_type)
4024 {
4025 	switch (target_type) {
4026 	case TARGET_TYPE_QCN7605:
4027 		sc->use_register_windowing = true;
4028 		qdf_spinlock_create(&sc->register_access_lock);
4029 		sc->register_window = 0;
4030 		break;
4031 	default:
4032 		sc->use_register_windowing = false;
4033 	}
4034 }
4035 #else
4036 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
4037 					       u32 target_type)
4038 {
4039 	sc->use_register_windowing = false;
4040 }
4041 #endif
4042 
4043 /**
4044  * hif_enable_bus(): enable bus
4045  *
4046  * This function enables the bus
4047  *
4048  * @ol_sc: soft_sc struct
4049  * @dev: device pointer
4050  * @bdev: bus dev pointer
4051  * bid: bus id pointer
4052  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
4053  * Return: QDF_STATUS
4054  */
4055 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
4056 			  struct device *dev, void *bdev,
4057 			  const struct hif_bus_id *bid,
4058 			  enum hif_enable_type type)
4059 {
4060 	int ret = 0;
4061 	uint32_t hif_type, target_type;
4062 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
4063 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
4064 	uint16_t revision_id = 0;
4065 	int probe_again = 0;
4066 	struct pci_dev *pdev = bdev;
4067 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
4068 	struct hif_target_info *tgt_info;
4069 
4070 	if (!ol_sc) {
4071 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
4072 		return QDF_STATUS_E_NOMEM;
4073 	}
4074 
4075 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
4076 		  __func__, hif_get_conparam(ol_sc), id->device);
4077 
4078 	sc->pdev = pdev;
4079 	sc->dev = &pdev->dev;
4080 	sc->devid = id->device;
4081 	sc->cacheline_sz = dma_get_cache_alignment();
4082 	tgt_info = hif_get_target_info_handle(hif_hdl);
4083 	hif_pci_init_deinit_ops_attach(sc, id->device);
4084 	sc->hif_pci_get_soc_info(sc, dev);
4085 again:
4086 	ret = sc->hif_enable_pci(sc, pdev, id);
4087 	if (ret < 0) {
4088 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
4089 		       __func__, ret);
4090 		goto err_enable_pci;
4091 	}
4092 	HIF_TRACE("%s: hif_enable_pci done", __func__);
4093 
4094 	/* Temporary FIX: disable ASPM on peregrine.
4095 	 * Will be removed after the OTP is programmed
4096 	 */
4097 	hif_disable_power_gating(hif_hdl);
4098 
4099 	device_disable_async_suspend(&pdev->dev);
4100 	pfrm_read_config_word(pdev, 0x08, &revision_id);
4101 
4102 	ret = hif_get_device_type(id->device, revision_id,
4103 						&hif_type, &target_type);
4104 	if (ret < 0) {
4105 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
4106 		goto err_tgtstate;
4107 	}
4108 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
4109 		  __func__, hif_type, target_type);
4110 
4111 	hif_register_tbl_attach(ol_sc, hif_type);
4112 	hif_target_register_tbl_attach(ol_sc, target_type);
4113 
4114 	hif_pci_init_reg_windowing_support(sc, target_type);
4115 
4116 	tgt_info->target_type = target_type;
4117 
4118 	if (ce_srng_based(ol_sc)) {
4119 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
4120 	} else {
4121 		ret = hif_pci_probe_tgt_wakeup(sc);
4122 		if (ret < 0) {
4123 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
4124 					__func__, ret);
4125 			if (ret == -EAGAIN)
4126 				probe_again++;
4127 			goto err_tgtstate;
4128 		}
4129 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
4130 	}
4131 
4132 	if (!ol_sc->mem_pa) {
4133 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
4134 		ret = -EIO;
4135 		goto err_tgtstate;
4136 	}
4137 
4138 	if (!ce_srng_based(ol_sc)) {
4139 		hif_target_sync(ol_sc);
4140 
4141 		if (hif_pci_default_link_up(tgt_info))
4142 			hif_vote_link_up(hif_hdl);
4143 	}
4144 
4145 	return 0;
4146 
4147 err_tgtstate:
4148 	hif_disable_pci(sc);
4149 	sc->pci_enabled = false;
4150 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
4151 	return QDF_STATUS_E_ABORTED;
4152 
4153 err_enable_pci:
4154 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
4155 		int delay_time;
4156 
4157 		HIF_INFO("%s: pci reprobe", __func__);
4158 		/* 10, 40, 90, 100, 100, ... */
4159 		delay_time = max(100, 10 * (probe_again * probe_again));
4160 		qdf_mdelay(delay_time);
4161 		goto again;
4162 	}
4163 	return ret;
4164 }
4165 
4166 /**
4167  * hif_pci_irq_enable() - ce_irq_enable
4168  * @scn: hif_softc
4169  * @ce_id: ce_id
4170  *
4171  * Return: void
4172  */
4173 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
4174 {
4175 	uint32_t tmp = 1 << ce_id;
4176 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4177 
4178 	qdf_spin_lock_irqsave(&sc->irq_lock);
4179 	scn->ce_irq_summary &= ~tmp;
4180 	if (scn->ce_irq_summary == 0) {
4181 		/* Enable Legacy PCI line interrupts */
4182 		if (LEGACY_INTERRUPTS(sc) &&
4183 			(scn->target_status != TARGET_STATUS_RESET) &&
4184 			(!qdf_atomic_read(&scn->link_suspended))) {
4185 
4186 			hif_write32_mb(scn, scn->mem +
4187 				(SOC_CORE_BASE_ADDRESS |
4188 				PCIE_INTR_ENABLE_ADDRESS),
4189 				HOST_GROUP0_MASK);
4190 
4191 			hif_read32_mb(scn, scn->mem +
4192 					(SOC_CORE_BASE_ADDRESS |
4193 					PCIE_INTR_ENABLE_ADDRESS));
4194 		}
4195 	}
4196 	if (scn->hif_init_done == true)
4197 		Q_TARGET_ACCESS_END(scn);
4198 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
4199 
4200 	/* check for missed firmware crash */
4201 	hif_fw_interrupt_handler(0, scn);
4202 }
4203 
4204 /**
4205  * hif_pci_irq_disable() - ce_irq_disable
4206  * @scn: hif_softc
4207  * @ce_id: ce_id
4208  *
4209  * only applicable to legacy copy engine...
4210  *
4211  * Return: void
4212  */
4213 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
4214 {
4215 	/* For Rome only need to wake up target */
4216 	/* target access is maintained until interrupts are re-enabled */
4217 	Q_TARGET_ACCESS_BEGIN(scn);
4218 }
4219 
4220 #ifdef FEATURE_RUNTIME_PM
4221 /**
4222  * hif_pm_stats_runtime_get_record() - record runtime get statistics
4223  * @sc: hif pci context
4224  * @rtpm_dbgid: debug id to trace who use it
4225  *
4226  *
4227  * Return: void
4228  */
4229 static void hif_pm_stats_runtime_get_record(struct hif_pci_softc *sc,
4230 					    wlan_rtpm_dbgid rtpm_dbgid)
4231 {
4232 	if (rtpm_dbgid >= RTPM_ID_MAX) {
4233 		QDF_BUG(0);
4234 		return;
4235 	}
4236 	qdf_atomic_inc(&sc->pm_stats.runtime_get);
4237 	qdf_atomic_inc(&sc->pm_stats.runtime_get_dbgid[rtpm_dbgid]);
4238 	sc->pm_stats.runtime_get_timestamp_dbgid[rtpm_dbgid] =
4239 		qdf_get_log_timestamp();
4240 }
4241 
4242 /**
4243  * hif_pm_stats_runtime_put_record() - record runtime put statistics
4244  * @sc: hif pci context
4245  * @rtpm_dbgid: dbg_id to trace who use it
4246  *
4247  *
4248  * Return: void
4249  */
4250 static void hif_pm_stats_runtime_put_record(struct hif_pci_softc *sc,
4251 					    wlan_rtpm_dbgid rtpm_dbgid)
4252 {
4253 	if (rtpm_dbgid >= RTPM_ID_MAX) {
4254 		QDF_BUG(0);
4255 		return;
4256 	}
4257 
4258 	if (atomic_read(&sc->dev->power.usage_count) <= 0) {
4259 		QDF_BUG(0);
4260 		return;
4261 	}
4262 
4263 	qdf_atomic_inc(&sc->pm_stats.runtime_put);
4264 	qdf_atomic_inc(&sc->pm_stats.runtime_put_dbgid[rtpm_dbgid]);
4265 	sc->pm_stats.runtime_put_timestamp_dbgid[rtpm_dbgid] =
4266 		qdf_get_log_timestamp();
4267 }
4268 
4269 /**
4270  * hif_pm_runtime_get_sync() - do a get operation with sync resume
4271  * @hif_ctx: pointer of HIF context
4272  * @rtpm_dbgid: dbgid to trace who use it
4273  *
4274  * A get operation will prevent a runtime suspend until a corresponding
4275  * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
4276  * resume instead of requesting a resume if it is runtime PM suspended
4277  * so it can only be called in non-atomic context.
4278  *
4279  * Return: 0 if it is runtime PM resumed otherwise an error code.
4280  */
4281 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx,
4282 			    wlan_rtpm_dbgid rtpm_dbgid)
4283 {
4284 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4285 	int pm_state;
4286 	int ret;
4287 
4288 	if (!sc)
4289 		return -EINVAL;
4290 
4291 	if (!hif_pci_pm_runtime_enabled(sc))
4292 		return 0;
4293 
4294 	pm_state = qdf_atomic_read(&sc->pm_state);
4295 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
4296 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
4297 		hif_info_high("Runtime PM resume is requested by %ps",
4298 			      (void *)_RET_IP_);
4299 
4300 	hif_pm_stats_runtime_get_record(sc, rtpm_dbgid);
4301 	ret = pm_runtime_get_sync(sc->dev);
4302 
4303 	/* Get can return 1 if the device is already active, just return
4304 	 * success in that case.
4305 	 */
4306 	if (ret > 0)
4307 		ret = 0;
4308 
4309 	if (ret) {
4310 		sc->pm_stats.runtime_get_err++;
4311 		hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
4312 			qdf_atomic_read(&sc->pm_state), ret);
4313 		hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
4314 	}
4315 
4316 	return ret;
4317 }
4318 
4319 /**
4320  * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
4321  * @hif_ctx: pointer of HIF context
4322  * @rtpm_dbgid: dbgid to trace who use it
4323  *
4324  * This API will do a runtime put operation followed by a sync suspend if usage
4325  * count is 0 so it can only be called in non-atomic context.
4326  *
4327  * Return: 0 for success otherwise an error code
4328  */
4329 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx,
4330 				    wlan_rtpm_dbgid rtpm_dbgid)
4331 {
4332 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4333 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4334 	int usage_count;
4335 	char *err = NULL;
4336 
4337 	if (!sc)
4338 		return -EINVAL;
4339 
4340 	if (!hif_pci_pm_runtime_enabled(sc))
4341 		return 0;
4342 
4343 	usage_count = atomic_read(&sc->dev->power.usage_count);
4344 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
4345 		err = "Uexpected PUT when runtime PM is disabled";
4346 	else if (usage_count == 0)
4347 		err = "PUT without a GET Operation";
4348 
4349 	if (err) {
4350 		hif_pci_runtime_pm_warn(sc, err);
4351 		return -EINVAL;
4352 	}
4353 
4354 	hif_pm_stats_runtime_put_record(sc, rtpm_dbgid);
4355 	return pm_runtime_put_sync_suspend(sc->dev);
4356 }
4357 
4358 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
4359 {
4360 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4361 	int pm_state;
4362 
4363 	if (!sc)
4364 		return -EINVAL;
4365 
4366 	if (!hif_pci_pm_runtime_enabled(sc))
4367 		return 0;
4368 
4369 	pm_state = qdf_atomic_read(&sc->pm_state);
4370 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
4371 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
4372 		HIF_INFO("Runtime PM resume is requested by %ps",
4373 			 (void *)_RET_IP_);
4374 
4375 	sc->pm_stats.request_resume++;
4376 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4377 
4378 	return hif_pm_request_resume(sc->dev);
4379 }
4380 
4381 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
4382 {
4383 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4384 
4385 	if (!sc)
4386 		return;
4387 
4388 	sc->pm_stats.last_busy_marker = (void *)_RET_IP_;
4389 	sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
4390 
4391 	return pm_runtime_mark_last_busy(sc->dev);
4392 }
4393 
4394 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx,
4395 				 wlan_rtpm_dbgid rtpm_dbgid)
4396 {
4397 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4398 
4399 	if (!sc)
4400 		return;
4401 
4402 	if (!hif_pci_pm_runtime_enabled(sc))
4403 		return;
4404 
4405 	hif_pm_stats_runtime_get_record(sc, rtpm_dbgid);
4406 	pm_runtime_get_noresume(sc->dev);
4407 }
4408 
4409 /**
4410  * hif_pm_runtime_get() - do a get opperation on the device
4411  * @hif_ctx: pointer of HIF context
4412  * @rtpm_dbgid: dbgid to trace who use it
4413  *
4414  * A get opperation will prevent a runtime suspend until a
4415  * corresponding put is done.  This api should be used when sending
4416  * data.
4417  *
4418  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
4419  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
4420  *
4421  * return: success if the bus is up and a get has been issued
4422  *   otherwise an error code.
4423  */
4424 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx,
4425 		       wlan_rtpm_dbgid rtpm_dbgid)
4426 {
4427 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4428 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4429 	int ret;
4430 	int pm_state;
4431 
4432 	if (!scn) {
4433 		hif_err("Could not do runtime get, scn is null");
4434 		return -EFAULT;
4435 	}
4436 
4437 	if (!hif_pci_pm_runtime_enabled(sc))
4438 		return 0;
4439 
4440 	pm_state = qdf_atomic_read(&sc->pm_state);
4441 
4442 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
4443 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
4444 		hif_pm_stats_runtime_get_record(sc, rtpm_dbgid);
4445 		ret = __hif_pm_runtime_get(sc->dev);
4446 
4447 		/* Get can return 1 if the device is already active, just return
4448 		 * success in that case
4449 		 */
4450 		if (ret > 0)
4451 			ret = 0;
4452 
4453 		if (ret)
4454 			hif_pm_runtime_put(hif_ctx, rtpm_dbgid);
4455 
4456 		if (ret && ret != -EINPROGRESS) {
4457 			sc->pm_stats.runtime_get_err++;
4458 			hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
4459 				qdf_atomic_read(&sc->pm_state), ret);
4460 		}
4461 
4462 		return ret;
4463 	}
4464 
4465 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
4466 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
4467 		hif_info_high("Runtime PM resume is requested by %ps",
4468 			      (void *)_RET_IP_);
4469 		ret = -EAGAIN;
4470 	} else {
4471 		ret = -EBUSY;
4472 	}
4473 
4474 	sc->pm_stats.request_resume++;
4475 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4476 	hif_pm_request_resume(sc->dev);
4477 
4478 	return ret;
4479 }
4480 
4481 /**
4482  * hif_pm_runtime_put() - do a put operation on the device
4483  * @hif_ctx: pointer of HIF context
4484  * @rtpm_dbgid: dbgid to trace who use it
4485  *
4486  * A put operation will allow a runtime suspend after a corresponding
4487  * get was done.  This api should be used when sending data.
4488  *
4489  * This api will return a failure if runtime pm is stopped
4490  * This api will return failure if it would decrement the usage count below 0.
4491  *
4492  * return: QDF_STATUS_SUCCESS if the put is performed
4493  */
4494 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx,
4495 		       wlan_rtpm_dbgid rtpm_dbgid)
4496 {
4497 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4498 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4499 	int usage_count;
4500 	char *error = NULL;
4501 
4502 	if (!scn) {
4503 		HIF_ERROR("%s: Could not do runtime put, scn is null",
4504 				__func__);
4505 		return -EFAULT;
4506 	}
4507 
4508 	if (!hif_pci_pm_runtime_enabled(sc))
4509 		return 0;
4510 
4511 	usage_count = atomic_read(&sc->dev->power.usage_count);
4512 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
4513 		error = "Unexpected PUT when runtime PM is disabled";
4514 	else if (usage_count == 0)
4515 		error = "PUT without a GET operation";
4516 
4517 	if (error) {
4518 		hif_pci_runtime_pm_warn(sc, error);
4519 		return -EINVAL;
4520 	}
4521 
4522 	hif_pm_stats_runtime_put_record(sc, rtpm_dbgid);
4523 
4524 	hif_pm_runtime_mark_last_busy(hif_ctx);
4525 	hif_pm_runtime_put_auto(sc->dev);
4526 
4527 	return 0;
4528 }
4529 
4530 /**
4531  * hif_pm_runtime_put_noidle() - do a put operation with no idle
4532  * @hif_ctx: pointer of HIF context
4533  * @rtpm_dbgid: dbgid to trace who use it
4534  *
4535  * This API will do a runtime put no idle operation
4536  *
4537  * Return: 0 for success otherwise an error code
4538  */
4539 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx,
4540 			      wlan_rtpm_dbgid rtpm_dbgid)
4541 {
4542 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4543 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4544 	int usage_count;
4545 	char *err = NULL;
4546 
4547 	if (!sc)
4548 		return -EINVAL;
4549 
4550 	if (!hif_pci_pm_runtime_enabled(sc))
4551 		return 0;
4552 
4553 	usage_count = atomic_read(&sc->dev->power.usage_count);
4554 	if (usage_count == 2 && !scn->hif_config.enable_runtime_pm)
4555 		err = "Unexpected PUT when runtime PM is disabled";
4556 	else if (usage_count == 0)
4557 		err = "PUT without a GET operation";
4558 
4559 	if (err) {
4560 		hif_pci_runtime_pm_warn(sc, err);
4561 		return -EINVAL;
4562 	}
4563 
4564 	hif_pm_stats_runtime_put_record(sc, rtpm_dbgid);
4565 	pm_runtime_put_noidle(sc->dev);
4566 
4567 	return 0;
4568 }
4569 
4570 /**
4571  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
4572  *                                      reason
4573  * @hif_sc: pci context
4574  * @lock: runtime_pm lock being acquired
4575  *
4576  * Return 0 if successful.
4577  */
4578 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4579 		*hif_sc, struct hif_pm_runtime_lock *lock)
4580 {
4581 	int ret = 0;
4582 
4583 	/*
4584 	 * We shouldn't be setting context->timeout to zero here when
4585 	 * context is active as we will have a case where Timeout API's
4586 	 * for the same context called back to back.
4587 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4588 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4589 	 * API to ensure the timeout version is no more active and
4590 	 * list entry of this context will be deleted during allow suspend.
4591 	 */
4592 	if (lock->active)
4593 		return 0;
4594 
4595 	ret = __hif_pm_runtime_get(hif_sc->dev);
4596 
4597 	/**
4598 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4599 	 * RPM_SUSPENDING. Any other negative value is an error.
4600 	 * We shouldn't be do runtime_put here as in later point allow
4601 	 * suspend gets called with the the context and there the usage count
4602 	 * is decremented, so suspend will be prevented.
4603 	 */
4604 
4605 	if (ret < 0 && ret != -EINPROGRESS) {
4606 		hif_sc->pm_stats.runtime_get_err++;
4607 		hif_pci_runtime_pm_warn(hif_sc,
4608 				"Prevent Suspend Runtime PM Error");
4609 	}
4610 
4611 	hif_sc->prevent_suspend_cnt++;
4612 
4613 	lock->active = true;
4614 
4615 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4616 
4617 	qdf_atomic_inc(&hif_sc->pm_stats.prevent_suspend);
4618 
4619 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
4620 		  hif_pm_runtime_state_to_string(
4621 			  qdf_atomic_read(&hif_sc->pm_state)),
4622 		  ret);
4623 
4624 	return ret;
4625 }
4626 
4627 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4628 		struct hif_pm_runtime_lock *lock)
4629 {
4630 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc);
4631 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4632 	int ret = 0;
4633 	int usage_count;
4634 
4635 	if (hif_sc->prevent_suspend_cnt == 0)
4636 		return ret;
4637 
4638 	if (!lock->active)
4639 		return ret;
4640 
4641 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4642 
4643 	/*
4644 	 * For runtime PM enabled case, the usage count should never be 0
4645 	 * at this point. For runtime PM disabled case, it should never be
4646 	 * 2 at this point. Catch unexpected PUT without GET here.
4647 	 */
4648 	if ((usage_count == 2 && !scn->hif_config.enable_runtime_pm) ||
4649 	    usage_count == 0) {
4650 		hif_pci_runtime_pm_warn(hif_sc, "PUT without a GET Operation");
4651 		return -EINVAL;
4652 	}
4653 
4654 	list_del(&lock->list);
4655 
4656 	hif_sc->prevent_suspend_cnt--;
4657 
4658 	lock->active = false;
4659 	lock->timeout = 0;
4660 
4661 	hif_pm_runtime_mark_last_busy(hif_ctx);
4662 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4663 
4664 	hif_debug("%s: in pm_state:%s ret: %d", __func__,
4665 		  hif_pm_runtime_state_to_string(
4666 			  qdf_atomic_read(&hif_sc->pm_state)),
4667 		  ret);
4668 
4669 	qdf_atomic_inc(&hif_sc->pm_stats.allow_suspend);
4670 	return ret;
4671 }
4672 
4673 /**
4674  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4675  * @data: calback data that is the pci context
4676  *
4677  * if runtime locks are acquired with a timeout, this function releases
4678  * the locks when the last runtime lock expires.
4679  *
4680  * dummy implementation until lock acquisition is implemented.
4681  */
4682 static void hif_pm_runtime_lock_timeout_fn(void *data)
4683 {
4684 	struct hif_pci_softc *hif_sc = data;
4685 	unsigned long timer_expires;
4686 	struct hif_pm_runtime_lock *context, *temp;
4687 
4688 	spin_lock_bh(&hif_sc->runtime_lock);
4689 
4690 	timer_expires = hif_sc->runtime_timer_expires;
4691 
4692 	/* Make sure we are not called too early, this should take care of
4693 	 * following case
4694 	 *
4695 	 * CPU0                         CPU1 (timeout function)
4696 	 * ----                         ----------------------
4697 	 * spin_lock_irq
4698 	 *                              timeout function called
4699 	 *
4700 	 * mod_timer()
4701 	 *
4702 	 * spin_unlock_irq
4703 	 *                              spin_lock_irq
4704 	 */
4705 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4706 		hif_sc->runtime_timer_expires = 0;
4707 		list_for_each_entry_safe(context, temp,
4708 				&hif_sc->prevent_suspend_list, list) {
4709 			if (context->timeout) {
4710 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4711 				hif_sc->pm_stats.allow_suspend_timeout++;
4712 			}
4713 		}
4714 	}
4715 
4716 	spin_unlock_bh(&hif_sc->runtime_lock);
4717 }
4718 
4719 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4720 		struct hif_pm_runtime_lock *data)
4721 {
4722 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4723 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4724 	struct hif_pm_runtime_lock *context = data;
4725 
4726 	if (!sc->hif_config.enable_runtime_pm)
4727 		return 0;
4728 
4729 	if (!context)
4730 		return -EINVAL;
4731 
4732 	if (in_irq())
4733 		WARN_ON(1);
4734 
4735 	spin_lock_bh(&hif_sc->runtime_lock);
4736 	context->timeout = 0;
4737 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4738 	spin_unlock_bh(&hif_sc->runtime_lock);
4739 
4740 	return 0;
4741 }
4742 
4743 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4744 				struct hif_pm_runtime_lock *data)
4745 {
4746 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4747 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4748 	struct hif_pm_runtime_lock *context = data;
4749 
4750 	if (!sc->hif_config.enable_runtime_pm)
4751 		return 0;
4752 
4753 	if (!context)
4754 		return -EINVAL;
4755 
4756 	if (in_irq())
4757 		WARN_ON(1);
4758 
4759 	spin_lock_bh(&hif_sc->runtime_lock);
4760 
4761 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4762 
4763 	/* The list can be empty as well in cases where
4764 	 * we have one context in the list and the allow
4765 	 * suspend came before the timer expires and we delete
4766 	 * context above from the list.
4767 	 * When list is empty prevent_suspend count will be zero.
4768 	 */
4769 	if (hif_sc->prevent_suspend_cnt == 0 &&
4770 			hif_sc->runtime_timer_expires > 0) {
4771 		qdf_timer_free(&hif_sc->runtime_timer);
4772 		hif_sc->runtime_timer_expires = 0;
4773 	}
4774 
4775 	spin_unlock_bh(&hif_sc->runtime_lock);
4776 
4777 	return 0;
4778 }
4779 
4780 /**
4781  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4782  * @ol_sc: HIF context
4783  * @lock: which lock is being acquired
4784  * @delay: Timeout in milliseconds
4785  *
4786  * Prevent runtime suspend with a timeout after which runtime suspend would be
4787  * allowed. This API uses a single timer to allow the suspend and timer is
4788  * modified if the timeout is changed before timer fires.
4789  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4790  * of starting the timer.
4791  *
4792  * It is wise to try not to use this API and correct the design if possible.
4793  *
4794  * Return: 0 on success and negative error code on failure
4795  */
4796 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4797 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4798 {
4799 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4800 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4801 
4802 	int ret = 0;
4803 	unsigned long expires;
4804 	struct hif_pm_runtime_lock *context = lock;
4805 
4806 	if (hif_is_load_or_unload_in_progress(sc)) {
4807 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4808 				__func__);
4809 		return -EINVAL;
4810 	}
4811 
4812 	if (hif_is_recovery_in_progress(sc)) {
4813 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4814 		return -EINVAL;
4815 	}
4816 
4817 	if (!sc->hif_config.enable_runtime_pm)
4818 		return 0;
4819 
4820 	if (!context)
4821 		return -EINVAL;
4822 
4823 	if (in_irq())
4824 		WARN_ON(1);
4825 
4826 	/*
4827 	 * Don't use internal timer if the timeout is less than auto suspend
4828 	 * delay.
4829 	 */
4830 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4831 		hif_pm_request_resume(hif_sc->dev);
4832 		hif_pm_runtime_mark_last_busy(ol_sc);
4833 		return ret;
4834 	}
4835 
4836 	expires = jiffies + msecs_to_jiffies(delay);
4837 	expires += !expires;
4838 
4839 	spin_lock_bh(&hif_sc->runtime_lock);
4840 
4841 	context->timeout = delay;
4842 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4843 	hif_sc->pm_stats.prevent_suspend_timeout++;
4844 
4845 	/* Modify the timer only if new timeout is after already configured
4846 	 * timeout
4847 	 */
4848 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4849 		qdf_timer_mod(&hif_sc->runtime_timer, delay);
4850 		hif_sc->runtime_timer_expires = expires;
4851 	}
4852 
4853 	spin_unlock_bh(&hif_sc->runtime_lock);
4854 
4855 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4856 		hif_pm_runtime_state_to_string(
4857 			qdf_atomic_read(&hif_sc->pm_state)),
4858 					delay, ret);
4859 
4860 	return ret;
4861 }
4862 
4863 /**
4864  * hif_runtime_lock_init() - API to initialize Runtime PM context
4865  * @name: Context name
4866  *
4867  * This API initializes the Runtime PM context of the caller and
4868  * return the pointer.
4869  *
4870  * Return: None
4871  */
4872 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4873 {
4874 	struct hif_pm_runtime_lock *context;
4875 
4876 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4877 
4878 	context = qdf_mem_malloc(sizeof(*context));
4879 	if (!context)
4880 		return -ENOMEM;
4881 
4882 	context->name = name ? name : "Default";
4883 	lock->lock = context;
4884 
4885 	return 0;
4886 }
4887 
4888 /**
4889  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4890  * @data: Runtime PM context
4891  *
4892  * Return: void
4893  */
4894 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4895 			     struct hif_pm_runtime_lock *data)
4896 {
4897 	struct hif_pm_runtime_lock *context = data;
4898 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4899 
4900 	if (!context) {
4901 		HIF_ERROR("Runtime PM wakelock context is NULL");
4902 		return;
4903 	}
4904 
4905 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4906 
4907 	/*
4908 	 * Ensure to delete the context list entry and reduce the usage count
4909 	 * before freeing the context if context is active.
4910 	 */
4911 	if (sc) {
4912 		spin_lock_bh(&sc->runtime_lock);
4913 		__hif_pm_runtime_allow_suspend(sc, context);
4914 		spin_unlock_bh(&sc->runtime_lock);
4915 	}
4916 
4917 	qdf_mem_free(context);
4918 }
4919 
4920 /**
4921  * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
4922  * @hif_ctx: HIF context
4923  *
4924  * Return: true for runtime suspended, otherwise false
4925  */
4926 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
4927 {
4928 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4929 
4930 	return qdf_atomic_read(&sc->pm_state) ==
4931 		HIF_PM_RUNTIME_STATE_SUSPENDED;
4932 }
4933 
4934 /**
4935  * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
4936  * @hif_ctx: HIF context
4937  *
4938  * monitor_wake_intr variable can be used to indicate if driver expects wake
4939  * MSI for runtime PM
4940  *
4941  * Return: monitor_wake_intr variable
4942  */
4943 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
4944 {
4945 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4946 
4947 	return qdf_atomic_read(&sc->monitor_wake_intr);
4948 }
4949 
4950 /**
4951  * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
4952  * @hif_ctx: HIF context
4953  * @val: value to set
4954  *
4955  * monitor_wake_intr variable can be used to indicate if driver expects wake
4956  * MSI for runtime PM
4957  *
4958  * Return: void
4959  */
4960 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
4961 					  int val)
4962 {
4963 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4964 
4965 	qdf_atomic_set(&sc->monitor_wake_intr, val);
4966 }
4967 
4968 /**
4969  * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
4970  * @hif_ctx: HIF context
4971  *
4972  * Return: void
4973  */
4974 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
4975 {
4976 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4977 
4978 	if (!sc)
4979 		return;
4980 
4981 	qdf_atomic_set(&sc->pm_dp_rx_busy, 1);
4982 	sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
4983 
4984 	hif_pm_runtime_mark_last_busy(hif_ctx);
4985 }
4986 
4987 /**
4988  * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
4989  * @hif_ctx: HIF context
4990  *
4991  * Return: dp rx busy set value
4992  */
4993 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
4994 {
4995 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4996 
4997 	if (!sc)
4998 		return 0;
4999 
5000 	return qdf_atomic_read(&sc->pm_dp_rx_busy);
5001 }
5002 
5003 /**
5004  * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
5005  * @hif_ctx: HIF context
5006  *
5007  * Return: timestamp of last mark busy by dp rx
5008  */
5009 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
5010 {
5011 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
5012 
5013 	if (!sc)
5014 		return 0;
5015 
5016 	return sc->dp_last_busy_timestamp;
5017 }
5018 
5019 #endif /* FEATURE_RUNTIME_PM */
5020 
5021 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
5022 {
5023 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
5024 
5025 	/* legacy case only has one irq */
5026 	return pci_scn->irq;
5027 }
5028 
5029 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
5030 {
5031 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
5032 	struct hif_target_info *tgt_info;
5033 
5034 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
5035 
5036 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
5037 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
5038 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
5039 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
5040 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
5041 		/*
5042 		 * Need to consider offset's memtype for QCA6290/QCA8074,
5043 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
5044 		 * well initialized/defined.
5045 		 */
5046 		return 0;
5047 	}
5048 
5049 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
5050 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
5051 		return 0;
5052 	}
5053 
5054 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
5055 		  offset, (uint32_t)(offset + sizeof(unsigned int)),
5056 		  sc->mem_len);
5057 
5058 	return -EINVAL;
5059 }
5060 
5061 /**
5062  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
5063  * @scn: hif context
5064  *
5065  * Return: true if soc needs driver bmi otherwise false
5066  */
5067 bool hif_pci_needs_bmi(struct hif_softc *scn)
5068 {
5069 	return !ce_srng_based(scn);
5070 }
5071 
5072 #ifdef FORCE_WAKE
5073 #ifdef DEVICE_FORCE_WAKE_ENABLE
5074 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
5075 {
5076 	uint32_t timeout = 0, value;
5077 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
5078 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
5079 
5080 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
5081 		hif_err("force wake request send failed");
5082 		return -EINVAL;
5083 	}
5084 
5085 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
5086 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
5087 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
5088 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
5089 		timeout += FORCE_WAKE_DELAY_MS;
5090 	}
5091 
5092 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
5093 		hif_err("Unable to wake up mhi");
5094 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
5095 		return -EINVAL;
5096 	}
5097 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
5098 	hif_write32_mb(scn,
5099 		       scn->mem +
5100 		       PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG,
5101 		       0);
5102 	hif_write32_mb(scn,
5103 		       scn->mem +
5104 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
5105 		       1);
5106 
5107 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
5108 	/*
5109 	 * do not reset the timeout
5110 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
5111 	 */
5112 	do {
5113 		value =
5114 		hif_read32_mb(scn,
5115 			      scn->mem +
5116 			      PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
5117 		if (value)
5118 			break;
5119 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
5120 		timeout += FORCE_WAKE_DELAY_MS;
5121 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
5122 
5123 	if (!value) {
5124 		hif_err("failed handshake mechanism");
5125 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
5126 		return -ETIMEDOUT;
5127 	}
5128 
5129 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
5130 	return 0;
5131 }
5132 
5133 #else /* DEVICE_FORCE_WAKE_ENABLE */
5134 /** hif_force_wake_request() - Disable the PCIE scratch register
5135  * write/read
5136  *
5137  * Return: 0
5138  */
5139 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
5140 {
5141 	uint32_t timeout = 0;
5142 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
5143 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
5144 
5145 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
5146 		hif_err("force wake request send failed");
5147 		return -EINVAL;
5148 	}
5149 
5150 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
5151 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
5152 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
5153 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
5154 		timeout += FORCE_WAKE_DELAY_MS;
5155 	}
5156 
5157 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
5158 		hif_err("Unable to wake up mhi");
5159 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
5160 		return -EINVAL;
5161 	}
5162 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
5163 	return 0;
5164 }
5165 #endif /* DEVICE_FORCE_WAKE_ENABLE */
5166 
5167 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
5168 {
5169 	int ret;
5170 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
5171 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
5172 
5173 	ret = pld_force_wake_release(scn->qdf_dev->dev);
5174 	if (ret) {
5175 		hif_err("force wake release failure");
5176 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
5177 		return ret;
5178 	}
5179 
5180 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
5181 	hif_write32_mb(scn,
5182 		       scn->mem +
5183 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
5184 		       0);
5185 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
5186 	return 0;
5187 }
5188 
5189 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
5190 {
5191 	hif_debug("mhi_force_wake_request_vote: %d",
5192 		  pci_handle->stats.mhi_force_wake_request_vote);
5193 	hif_debug("mhi_force_wake_failure: %d",
5194 		  pci_handle->stats.mhi_force_wake_failure);
5195 	hif_debug("mhi_force_wake_success: %d",
5196 		  pci_handle->stats.mhi_force_wake_success);
5197 	hif_debug("soc_force_wake_register_write_success: %d",
5198 		  pci_handle->stats.soc_force_wake_register_write_success);
5199 	hif_debug("soc_force_wake_failure: %d",
5200 		  pci_handle->stats.soc_force_wake_failure);
5201 	hif_debug("soc_force_wake_success: %d",
5202 		  pci_handle->stats.soc_force_wake_success);
5203 	hif_debug("mhi_force_wake_release_failure: %d",
5204 		  pci_handle->stats.mhi_force_wake_release_failure);
5205 	hif_debug("mhi_force_wake_release_success: %d",
5206 		  pci_handle->stats.mhi_force_wake_release_success);
5207 	hif_debug("oc_force_wake_release_success: %d",
5208 		  pci_handle->stats.soc_force_wake_release_success);
5209 }
5210 #endif /* FORCE_WAKE */
5211 
5212 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
5213 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
5214 {
5215 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
5216 }
5217 
5218 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
5219 {
5220 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
5221 }
5222 #endif
5223