xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 8ddef7dd9a290d4a9b1efd5d3efacf51d78a1a0d)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 #ifdef CONFIG_WIN
66 extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
67 #endif
68 
69 /*
70  * Top-level interrupt handler for all PCI interrupts from a Target.
71  * When a block of MSI interrupts is allocated, this top-level handler
72  * is not used; instead, we directly call the correct sub-handler.
73  */
74 struct ce_irq_reg_table {
75 	uint32_t irq_enable;
76 	uint32_t irq_status;
77 };
78 
79 #ifndef QCA_WIFI_3_0_ADRASTEA
80 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 }
83 #else
84 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
87 	unsigned int target_enable0, target_enable1;
88 	unsigned int target_cause0, target_cause1;
89 
90 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
91 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
92 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
93 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
94 
95 	if ((target_enable0 & target_cause0) ||
96 	    (target_enable1 & target_cause1)) {
97 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
98 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
99 
100 		if (scn->notice_send)
101 			pld_intr_notify_q6(sc->dev);
102 	}
103 }
104 #endif
105 
106 
107 /**
108  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
109  * @scn: scn
110  *
111  * Return: N/A
112  */
113 static void pci_dispatch_interrupt(struct hif_softc *scn)
114 {
115 	uint32_t intr_summary;
116 	int id;
117 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
118 
119 	if (scn->hif_init_done != true)
120 		return;
121 
122 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
123 		return;
124 
125 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
126 
127 	if (intr_summary == 0) {
128 		if ((scn->target_status != TARGET_STATUS_RESET) &&
129 			(!qdf_atomic_read(&scn->link_suspended))) {
130 
131 			hif_write32_mb(scn, scn->mem +
132 				(SOC_CORE_BASE_ADDRESS |
133 				PCIE_INTR_ENABLE_ADDRESS),
134 				HOST_GROUP0_MASK);
135 
136 			hif_read32_mb(scn, scn->mem +
137 					(SOC_CORE_BASE_ADDRESS |
138 					PCIE_INTR_ENABLE_ADDRESS));
139 		}
140 		Q_TARGET_ACCESS_END(scn);
141 		return;
142 	}
143 	Q_TARGET_ACCESS_END(scn);
144 
145 	scn->ce_irq_summary = intr_summary;
146 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
147 		if (intr_summary & (1 << id)) {
148 			intr_summary &= ~(1 << id);
149 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
150 		}
151 	}
152 }
153 
154 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
155 {
156 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
157 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
158 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
159 
160 	volatile int tmp;
161 	uint16_t val = 0;
162 	uint32_t bar0 = 0;
163 	uint32_t fw_indicator_address, fw_indicator;
164 	bool ssr_irq = false;
165 	unsigned int host_cause, host_enable;
166 
167 	if (LEGACY_INTERRUPTS(sc)) {
168 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
169 			return IRQ_HANDLED;
170 
171 		if (ADRASTEA_BU) {
172 			host_enable = hif_read32_mb(sc, sc->mem +
173 						    PCIE_INTR_ENABLE_ADDRESS);
174 			host_cause = hif_read32_mb(sc, sc->mem +
175 						   PCIE_INTR_CAUSE_ADDRESS);
176 			if (!(host_enable & host_cause)) {
177 				hif_pci_route_adrastea_interrupt(sc);
178 				return IRQ_HANDLED;
179 			}
180 		}
181 
182 		/* Clear Legacy PCI line interrupts
183 		 * IMPORTANT: INTR_CLR regiser has to be set
184 		 * after INTR_ENABLE is set to 0,
185 		 * otherwise interrupt can not be really cleared
186 		 */
187 		hif_write32_mb(sc, sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS |
189 			       PCIE_INTR_ENABLE_ADDRESS), 0);
190 
191 		hif_write32_mb(sc, sc->mem +
192 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
193 			       ADRASTEA_BU ?
194 			       (host_enable & host_cause) :
195 			      HOST_GROUP0_MASK);
196 
197 		if (ADRASTEA_BU)
198 			hif_write32_mb(sc, sc->mem + 0x2f100c,
199 				       (host_cause >> 1));
200 
201 		/* IMPORTANT: this extra read transaction is required to
202 		 * flush the posted write buffer
203 		 */
204 		if (!ADRASTEA_BU) {
205 		tmp =
206 			hif_read32_mb(sc, sc->mem +
207 				     (SOC_CORE_BASE_ADDRESS |
208 				      PCIE_INTR_ENABLE_ADDRESS));
209 
210 		if (tmp == 0xdeadbeef) {
211 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
212 			       __func__);
213 
214 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
215 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
216 			       __func__, val);
217 
218 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
219 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
220 			       __func__, val);
221 
222 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
223 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
224 			       val);
225 
226 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
227 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
228 			       val);
229 
230 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
231 					      &bar0);
232 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
233 			       bar0);
234 
235 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
236 				  __func__,
237 				  hif_read32_mb(sc, sc->mem +
238 						PCIE_LOCAL_BASE_ADDRESS
239 						+ RTC_STATE_ADDRESS));
240 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
241 				  __func__,
242 				  hif_read32_mb(sc, sc->mem +
243 						PCIE_LOCAL_BASE_ADDRESS
244 						+ PCIE_SOC_WAKE_ADDRESS));
245 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
246 				  __func__,
247 				  hif_read32_mb(sc, sc->mem + 0x80008),
248 				  hif_read32_mb(sc, sc->mem + 0x8000c));
249 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
250 				  __func__,
251 				  hif_read32_mb(sc, sc->mem + 0x80010),
252 				  hif_read32_mb(sc, sc->mem + 0x80014));
253 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
254 				  __func__,
255 				  hif_read32_mb(sc, sc->mem + 0x80018),
256 				  hif_read32_mb(sc, sc->mem + 0x8001c));
257 			QDF_BUG(0);
258 		}
259 
260 		PCI_CLR_CAUSE0_REGISTER(sc);
261 		}
262 
263 		if (HAS_FW_INDICATOR) {
264 			fw_indicator_address = hif_state->fw_indicator_address;
265 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
266 			if ((fw_indicator != ~0) &&
267 			   (fw_indicator & FW_IND_EVENT_PENDING))
268 				ssr_irq = true;
269 		}
270 
271 		if (Q_TARGET_ACCESS_END(scn) < 0)
272 			return IRQ_HANDLED;
273 	}
274 	/* TBDXXX: Add support for WMAC */
275 
276 	if (ssr_irq) {
277 		sc->irq_event = irq;
278 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
279 
280 		qdf_atomic_inc(&scn->active_tasklet_cnt);
281 		tasklet_schedule(&sc->intr_tq);
282 	} else {
283 		pci_dispatch_interrupt(scn);
284 	}
285 
286 	return IRQ_HANDLED;
287 }
288 
289 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
290 {
291 	return 1;               /* FIX THIS */
292 }
293 
294 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
295 {
296 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
297 	int i = 0;
298 
299 	if (!irq || !size) {
300 		return -EINVAL;
301 	}
302 
303 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
304 		irq[0] = sc->irq;
305 		return 1;
306 	}
307 
308 	if (sc->num_msi_intrs > size) {
309 		qdf_print("Not enough space in irq buffer to return irqs");
310 		return -EINVAL;
311 	}
312 
313 	for (i = 0; i < sc->num_msi_intrs; i++) {
314 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
315 	}
316 
317 	return sc->num_msi_intrs;
318 }
319 
320 
321 /**
322  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
323  * @scn: hif_softc
324  *
325  * Return: void
326  */
327 #if CONFIG_ATH_PCIE_MAX_PERF == 0
328 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
329 {
330 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
331 	A_target_id_t pci_addr = scn->mem;
332 
333 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
334 	/*
335 	 * If the deferred sleep timer is running cancel it
336 	 * and put the soc into sleep.
337 	 */
338 	if (hif_state->fake_sleep == true) {
339 		qdf_timer_stop(&hif_state->sleep_timer);
340 		if (hif_state->verified_awake == false) {
341 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
342 				      PCIE_SOC_WAKE_ADDRESS,
343 				      PCIE_SOC_WAKE_RESET);
344 		}
345 		hif_state->fake_sleep = false;
346 	}
347 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
348 }
349 #else
350 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
351 {
352 }
353 #endif
354 
355 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
356 	hif_read32_mb(sc, (char *)(mem) + \
357 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
358 
359 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
360 	hif_write32_mb(sc, ((char *)(mem) + \
361 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
362 
363 #ifdef QCA_WIFI_3_0
364 /**
365  * hif_targ_is_awake() - check to see if the target is awake
366  * @hif_ctx: hif context
367  *
368  * emulation never goes to sleep
369  *
370  * Return: true if target is awake
371  */
372 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
373 {
374 	return true;
375 }
376 #else
377 /**
378  * hif_targ_is_awake() - check to see if the target is awake
379  * @hif_ctx: hif context
380  *
381  * Return: true if the targets clocks are on
382  */
383 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
384 {
385 	uint32_t val;
386 
387 	if (scn->recovery)
388 		return false;
389 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
390 		+ RTC_STATE_ADDRESS);
391 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
392 }
393 #endif
394 
395 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
396 static void hif_pci_device_reset(struct hif_pci_softc *sc)
397 {
398 	void __iomem *mem = sc->mem;
399 	int i;
400 	uint32_t val;
401 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
402 
403 	if (!scn->hostdef)
404 		return;
405 
406 	/* NB: Don't check resetok here.  This form of reset
407 	 * is integral to correct operation.
408 	 */
409 
410 	if (!SOC_GLOBAL_RESET_ADDRESS)
411 		return;
412 
413 	if (!mem)
414 		return;
415 
416 	HIF_ERROR("%s: Reset Device", __func__);
417 
418 	/*
419 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
420 	 * writing WAKE_V, the Target may scribble over Host memory!
421 	 */
422 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
423 			       PCIE_SOC_WAKE_V_MASK);
424 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
425 		if (hif_targ_is_awake(scn, mem))
426 			break;
427 
428 		qdf_mdelay(1);
429 	}
430 
431 	/* Put Target, including PCIe, into RESET. */
432 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
433 	val |= 1;
434 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
435 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
436 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
437 		    RTC_STATE_COLD_RESET_MASK)
438 			break;
439 
440 		qdf_mdelay(1);
441 	}
442 
443 	/* Pull Target, including PCIe, out of RESET. */
444 	val &= ~1;
445 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
446 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
447 		if (!
448 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
449 		     RTC_STATE_COLD_RESET_MASK))
450 			break;
451 
452 		qdf_mdelay(1);
453 	}
454 
455 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
456 			       PCIE_SOC_WAKE_RESET);
457 }
458 
459 /* CPU warm reset function
460  * Steps:
461  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
462  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
463  *    correctly on WARM reset
464  * 3. Clear TARGET CPU LF timer interrupt
465  * 4. Reset all CEs to clear any pending CE tarnsactions
466  * 5. Warm reset CPU
467  */
468 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
469 {
470 	void __iomem *mem = sc->mem;
471 	int i;
472 	uint32_t val;
473 	uint32_t fw_indicator;
474 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
475 
476 	/* NB: Don't check resetok here.  This form of reset is
477 	 * integral to correct operation.
478 	 */
479 
480 	if (!mem)
481 		return;
482 
483 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
484 
485 	/*
486 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
487 	 * writing WAKE_V, the Target may scribble over Host memory!
488 	 */
489 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
490 			       PCIE_SOC_WAKE_V_MASK);
491 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
492 		if (hif_targ_is_awake(scn, mem))
493 			break;
494 		qdf_mdelay(1);
495 	}
496 
497 	/*
498 	 * Disable Pending interrupts
499 	 */
500 	val =
501 		hif_read32_mb(sc, mem +
502 			     (SOC_CORE_BASE_ADDRESS |
503 			      PCIE_INTR_CAUSE_ADDRESS));
504 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
505 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
506 	/* Target CPU Intr Cause */
507 	val = hif_read32_mb(sc, mem +
508 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
509 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
510 
511 	val =
512 		hif_read32_mb(sc, mem +
513 			     (SOC_CORE_BASE_ADDRESS |
514 			      PCIE_INTR_ENABLE_ADDRESS));
515 	hif_write32_mb(sc, (mem +
516 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
517 	hif_write32_mb(sc, (mem +
518 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
519 		       HOST_GROUP0_MASK);
520 
521 	qdf_mdelay(100);
522 
523 	/* Clear FW_INDICATOR_ADDRESS */
524 	if (HAS_FW_INDICATOR) {
525 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
526 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
527 	}
528 
529 	/* Clear Target LF Timer interrupts */
530 	val =
531 		hif_read32_mb(sc, mem +
532 			     (RTC_SOC_BASE_ADDRESS +
533 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
534 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
535 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
536 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
537 	hif_write32_mb(sc, mem +
538 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
539 		      val);
540 
541 	/* Reset CE */
542 	val =
543 		hif_read32_mb(sc, mem +
544 			     (RTC_SOC_BASE_ADDRESS |
545 			      SOC_RESET_CONTROL_ADDRESS));
546 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
547 	hif_write32_mb(sc, (mem +
548 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
549 		      val);
550 	val =
551 		hif_read32_mb(sc, mem +
552 			     (RTC_SOC_BASE_ADDRESS |
553 			      SOC_RESET_CONTROL_ADDRESS));
554 	qdf_mdelay(10);
555 
556 	/* CE unreset */
557 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
558 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
559 		       SOC_RESET_CONTROL_ADDRESS), val);
560 	val =
561 		hif_read32_mb(sc, mem +
562 			     (RTC_SOC_BASE_ADDRESS |
563 			      SOC_RESET_CONTROL_ADDRESS));
564 	qdf_mdelay(10);
565 
566 	/* Read Target CPU Intr Cause */
567 	val = hif_read32_mb(sc, mem +
568 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
569 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
570 		    __func__, val);
571 
572 	/* CPU warm RESET */
573 	val =
574 		hif_read32_mb(sc, mem +
575 			     (RTC_SOC_BASE_ADDRESS |
576 			      SOC_RESET_CONTROL_ADDRESS));
577 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
578 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
579 		       SOC_RESET_CONTROL_ADDRESS), val);
580 	val =
581 		hif_read32_mb(sc, mem +
582 			     (RTC_SOC_BASE_ADDRESS |
583 			      SOC_RESET_CONTROL_ADDRESS));
584 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
585 		    __func__, val);
586 
587 	qdf_mdelay(100);
588 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
589 
590 }
591 
592 #ifndef QCA_WIFI_3_0
593 /* only applicable to legacy ce */
594 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
595 {
596 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
597 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
598 	void __iomem *mem = sc->mem;
599 	uint32_t val;
600 
601 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
602 		return ATH_ISR_NOSCHED;
603 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
604 	if (Q_TARGET_ACCESS_END(scn) < 0)
605 		return ATH_ISR_SCHED;
606 
607 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
608 
609 	if (val & FW_IND_HELPER)
610 		return 0;
611 
612 	return 1;
613 }
614 #endif
615 
616 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
617 {
618 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
619 	uint16_t device_id = 0;
620 	uint32_t val;
621 	uint16_t timeout_count = 0;
622 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
623 
624 	/* Check device ID from PCIe configuration space for link status */
625 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
626 	if (device_id != sc->devid) {
627 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
628 			  __func__, device_id, sc->devid);
629 		return -EACCES;
630 	}
631 
632 	/* Check PCIe local register for bar/memory access */
633 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
634 			   RTC_STATE_ADDRESS);
635 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
636 
637 	/* Try to wake up taget if it sleeps */
638 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
639 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
640 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
641 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
642 		PCIE_SOC_WAKE_ADDRESS));
643 
644 	/* Check if taget can be woken up */
645 	while (!hif_targ_is_awake(scn, sc->mem)) {
646 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
647 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
648 				__func__,
649 				hif_read32_mb(sc, sc->mem +
650 					     PCIE_LOCAL_BASE_ADDRESS +
651 					     RTC_STATE_ADDRESS),
652 				hif_read32_mb(sc, sc->mem +
653 					     PCIE_LOCAL_BASE_ADDRESS +
654 					PCIE_SOC_WAKE_ADDRESS));
655 			return -EACCES;
656 		}
657 
658 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
659 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
660 
661 		qdf_mdelay(100);
662 		timeout_count += 100;
663 	}
664 
665 	/* Check Power register for SoC internal bus issues */
666 	val =
667 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
668 			     SOC_POWER_REG_OFFSET);
669 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
670 
671 	return 0;
672 }
673 
674 /**
675  * __hif_pci_dump_registers(): dump other PCI debug registers
676  * @scn: struct hif_softc
677  *
678  * This function dumps pci debug registers.  The parrent function
679  * dumps the copy engine registers before calling this function.
680  *
681  * Return: void
682  */
683 static void __hif_pci_dump_registers(struct hif_softc *scn)
684 {
685 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
686 	void __iomem *mem = sc->mem;
687 	uint32_t val, i, j;
688 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
689 	uint32_t ce_base;
690 
691 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
692 		return;
693 
694 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
695 	val =
696 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
697 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
698 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
699 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
700 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
701 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
702 
703 	/* DEBUG_CONTROL_ENABLE = 0x1 */
704 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
705 			   WLAN_DEBUG_CONTROL_OFFSET);
706 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
707 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
708 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
709 		      WLAN_DEBUG_CONTROL_OFFSET, val);
710 
711 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
712 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
713 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
714 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
715 			    WLAN_DEBUG_CONTROL_OFFSET));
716 
717 	HIF_INFO_MED("%s: Debug CE", __func__);
718 	/* Loop CE debug output */
719 	/* AMBA_DEBUG_BUS_SEL = 0xc */
720 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
721 			    AMBA_DEBUG_BUS_OFFSET);
722 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
723 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
724 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
725 		       val);
726 
727 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
728 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
729 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
730 				   CE_WRAPPER_DEBUG_OFFSET);
731 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
732 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
733 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
734 			      CE_WRAPPER_DEBUG_OFFSET, val);
735 
736 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
737 			    __func__, wrapper_idx[i],
738 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
739 				AMBA_DEBUG_BUS_OFFSET),
740 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
741 				CE_WRAPPER_DEBUG_OFFSET));
742 
743 		if (wrapper_idx[i] <= 7) {
744 			for (j = 0; j <= 5; j++) {
745 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
746 				/* For (j=0~5) write CE_DEBUG_SEL = j */
747 				val =
748 					hif_read32_mb(sc, mem + ce_base +
749 						     CE_DEBUG_OFFSET);
750 				val &= ~CE_DEBUG_SEL_MASK;
751 				val |= CE_DEBUG_SEL_SET(j);
752 				hif_write32_mb(sc, mem + ce_base +
753 					       CE_DEBUG_OFFSET, val);
754 
755 				/* read (@gpio_athr_wlan_reg)
756 				 * WLAN_DEBUG_OUT_DATA
757 				 */
758 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
759 						    + WLAN_DEBUG_OUT_OFFSET);
760 				val = WLAN_DEBUG_OUT_DATA_GET(val);
761 
762 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
763 					    __func__, j,
764 					    hif_read32_mb(sc, mem + ce_base +
765 						    CE_DEBUG_OFFSET), val);
766 			}
767 		} else {
768 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
769 			val =
770 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
771 					     WLAN_DEBUG_OUT_OFFSET);
772 			val = WLAN_DEBUG_OUT_DATA_GET(val);
773 
774 			HIF_INFO_MED("%s: out: %x", __func__, val);
775 		}
776 	}
777 
778 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
779 	/* Loop PCIe debug output */
780 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
781 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
782 			    AMBA_DEBUG_BUS_OFFSET);
783 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
784 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
785 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
786 		       AMBA_DEBUG_BUS_OFFSET, val);
787 
788 	for (i = 0; i <= 8; i++) {
789 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
790 		val =
791 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
792 				     AMBA_DEBUG_BUS_OFFSET);
793 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
794 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
795 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
796 			       AMBA_DEBUG_BUS_OFFSET, val);
797 
798 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
799 		val =
800 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
801 				     WLAN_DEBUG_OUT_OFFSET);
802 		val = WLAN_DEBUG_OUT_DATA_GET(val);
803 
804 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
805 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
806 				    WLAN_DEBUG_OUT_OFFSET), val,
807 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
808 				    WLAN_DEBUG_OUT_OFFSET));
809 	}
810 
811 	Q_TARGET_ACCESS_END(scn);
812 }
813 
814 /**
815  * hif_dump_registers(): dump bus debug registers
816  * @scn: struct hif_opaque_softc
817  *
818  * This function dumps hif bus debug registers
819  *
820  * Return: 0 for success or error code
821  */
822 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
823 {
824 	int status;
825 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
826 
827 	status = hif_dump_ce_registers(scn);
828 
829 	if (status)
830 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
831 
832 	/* dump non copy engine pci registers */
833 	__hif_pci_dump_registers(scn);
834 
835 	return 0;
836 }
837 
838 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
839 
840 /* worker thread to schedule wlan_tasklet in SLUB debug build */
841 static void reschedule_tasklet_work_handler(void *arg)
842 {
843 	struct hif_pci_softc *sc = arg;
844 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
845 
846 	if (!scn) {
847 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
848 		return;
849 	}
850 
851 	if (scn->hif_init_done == false) {
852 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
853 		return;
854 	}
855 
856 	tasklet_schedule(&sc->intr_tq);
857 }
858 
859 /**
860  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
861  * work
862  * @sc: HIF PCI Context
863  *
864  * Return: void
865  */
866 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
867 {
868 	qdf_create_work(0, &sc->reschedule_tasklet_work,
869 				reschedule_tasklet_work_handler, NULL);
870 }
871 #else
872 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
873 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
874 
875 void wlan_tasklet(unsigned long data)
876 {
877 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
878 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
879 
880 	if (scn->hif_init_done == false)
881 		goto end;
882 
883 	if (qdf_atomic_read(&scn->link_suspended))
884 		goto end;
885 
886 	if (!ADRASTEA_BU) {
887 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
888 		if (scn->target_status == TARGET_STATUS_RESET)
889 			goto end;
890 	}
891 
892 end:
893 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
894 	qdf_atomic_dec(&scn->active_tasklet_cnt);
895 }
896 
897 #ifdef FEATURE_RUNTIME_PM
898 static const char *hif_pm_runtime_state_to_string(uint32_t state)
899 {
900 	switch (state) {
901 	case HIF_PM_RUNTIME_STATE_NONE:
902 		return "INIT_STATE";
903 	case HIF_PM_RUNTIME_STATE_ON:
904 		return "ON";
905 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
906 		return "INPROGRESS";
907 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
908 		return "SUSPENDED";
909 	default:
910 		return "INVALID STATE";
911 	}
912 }
913 
914 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
915 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
916 /**
917  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
918  * @sc: hif_pci_softc context
919  * @msg: log message
920  *
921  * log runtime pm stats when something seems off.
922  *
923  * Return: void
924  */
925 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
926 {
927 	struct hif_pm_runtime_lock *ctx;
928 
929 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
930 			msg, atomic_read(&sc->dev->power.usage_count),
931 			hif_pm_runtime_state_to_string(
932 					atomic_read(&sc->pm_state)),
933 			sc->prevent_suspend_cnt);
934 
935 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
936 			sc->dev->power.runtime_status,
937 			sc->dev->power.runtime_error,
938 			sc->dev->power.disable_depth,
939 			sc->dev->power.autosuspend_delay);
940 
941 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
942 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
943 			sc->pm_stats.request_resume);
944 
945 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
946 			sc->pm_stats.allow_suspend,
947 			sc->pm_stats.prevent_suspend);
948 
949 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
950 			sc->pm_stats.prevent_suspend_timeout,
951 			sc->pm_stats.allow_suspend_timeout);
952 
953 	HIF_ERROR("Suspended: %u, resumed: %u count",
954 			sc->pm_stats.suspended,
955 			sc->pm_stats.resumed);
956 
957 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
958 			sc->pm_stats.suspend_err,
959 			sc->pm_stats.runtime_get_err);
960 
961 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
962 
963 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
964 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
965 	}
966 
967 	WARN_ON(1);
968 }
969 
970 /**
971  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
972  * @s: file to print to
973  * @data: unused
974  *
975  * debugging tool added to the debug fs for displaying runtimepm stats
976  *
977  * Return: 0
978  */
979 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
980 {
981 	struct hif_pci_softc *sc = s->private;
982 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
983 		"SUSPENDED"};
984 	unsigned int msecs_age;
985 	int pm_state = atomic_read(&sc->pm_state);
986 	unsigned long timer_expires;
987 	struct hif_pm_runtime_lock *ctx;
988 
989 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
990 			autopm_state[pm_state]);
991 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
992 			sc->pm_stats.last_resume_caller);
993 
994 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
995 		msecs_age = jiffies_to_msecs(
996 				jiffies - sc->pm_stats.suspend_jiffies);
997 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
998 				msecs_age / 1000, msecs_age % 1000);
999 	}
1000 
1001 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1002 			atomic_read(&sc->dev->power.usage_count));
1003 
1004 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1005 			sc->prevent_suspend_cnt);
1006 
1007 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1008 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1009 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1010 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1011 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1012 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1013 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1014 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1015 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1016 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1017 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1018 
1019 	timer_expires = sc->runtime_timer_expires;
1020 	if (timer_expires > 0) {
1021 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1022 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1023 				msecs_age / 1000, msecs_age % 1000);
1024 	}
1025 
1026 	spin_lock_bh(&sc->runtime_lock);
1027 	if (list_empty(&sc->prevent_suspend_list)) {
1028 		spin_unlock_bh(&sc->runtime_lock);
1029 		return 0;
1030 	}
1031 
1032 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1033 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1034 		seq_printf(s, "%s", ctx->name);
1035 		if (ctx->timeout)
1036 			seq_printf(s, "(%d ms)", ctx->timeout);
1037 		seq_puts(s, " ");
1038 	}
1039 	seq_puts(s, "\n");
1040 	spin_unlock_bh(&sc->runtime_lock);
1041 
1042 	return 0;
1043 }
1044 #undef HIF_PCI_RUNTIME_PM_STATS
1045 
1046 /**
1047  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1048  * @inode
1049  * @file
1050  *
1051  * Return: linux error code of single_open.
1052  */
1053 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1054 {
1055 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1056 			inode->i_private);
1057 }
1058 
1059 static const struct file_operations hif_pci_runtime_pm_fops = {
1060 	.owner          = THIS_MODULE,
1061 	.open           = hif_pci_runtime_pm_open,
1062 	.release        = single_release,
1063 	.read           = seq_read,
1064 	.llseek         = seq_lseek,
1065 };
1066 
1067 /**
1068  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1069  * @sc: pci context
1070  *
1071  * creates a debugfs entry to debug the runtime pm feature.
1072  */
1073 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1074 {
1075 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1076 					0400, NULL, sc,
1077 					&hif_pci_runtime_pm_fops);
1078 }
1079 
1080 /**
1081  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1082  * @sc: pci context
1083  *
1084  * removes the debugfs entry to debug the runtime pm feature.
1085  */
1086 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1087 {
1088 	debugfs_remove(sc->pm_dentry);
1089 }
1090 
1091 static void hif_runtime_init(struct device *dev, int delay)
1092 {
1093 	pm_runtime_set_autosuspend_delay(dev, delay);
1094 	pm_runtime_use_autosuspend(dev);
1095 	pm_runtime_allow(dev);
1096 	pm_runtime_mark_last_busy(dev);
1097 	pm_runtime_put_noidle(dev);
1098 	pm_suspend_ignore_children(dev, true);
1099 }
1100 
1101 static void hif_runtime_exit(struct device *dev)
1102 {
1103 	pm_runtime_get_noresume(dev);
1104 	pm_runtime_set_active(dev);
1105 }
1106 
1107 static void hif_pm_runtime_lock_timeout_fn(void *data);
1108 
1109 /**
1110  * hif_pm_runtime_start(): start the runtime pm
1111  * @sc: pci context
1112  *
1113  * After this call, runtime pm will be active.
1114  */
1115 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1116 {
1117 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1118 	uint32_t mode = hif_get_conparam(ol_sc);
1119 
1120 	if (!ol_sc->hif_config.enable_runtime_pm) {
1121 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1122 		return;
1123 	}
1124 
1125 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1126 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1127 				__func__);
1128 		return;
1129 	}
1130 
1131 	qdf_timer_init(NULL, &sc->runtime_timer,
1132 		       hif_pm_runtime_lock_timeout_fn,
1133 		       sc, QDF_TIMER_TYPE_WAKE_APPS);
1134 
1135 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1136 			ol_sc->hif_config.runtime_pm_delay);
1137 
1138 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1139 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1140 	hif_runtime_pm_debugfs_create(sc);
1141 }
1142 
1143 /**
1144  * hif_pm_runtime_stop(): stop runtime pm
1145  * @sc: pci context
1146  *
1147  * Turns off runtime pm and frees corresponding resources
1148  * that were acquired by hif_runtime_pm_start().
1149  */
1150 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1151 {
1152 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1153 	uint32_t mode = hif_get_conparam(ol_sc);
1154 
1155 	if (!ol_sc->hif_config.enable_runtime_pm)
1156 		return;
1157 
1158 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1159 		return;
1160 
1161 	hif_runtime_exit(sc->dev);
1162 	hif_pm_runtime_resume(sc->dev);
1163 
1164 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1165 
1166 	hif_runtime_pm_debugfs_remove(sc);
1167 	qdf_timer_free(&sc->runtime_timer);
1168 	/* doesn't wait for penting trafic unlike cld-2.0 */
1169 }
1170 
1171 /**
1172  * hif_pm_runtime_open(): initialize runtime pm
1173  * @sc: pci data structure
1174  *
1175  * Early initialization
1176  */
1177 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1178 {
1179 	spin_lock_init(&sc->runtime_lock);
1180 
1181 	qdf_atomic_init(&sc->pm_state);
1182 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1183 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1184 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1185 }
1186 
1187 /**
1188  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1189  * @sc: pci context
1190  *
1191  * Ensure we have only one vote against runtime suspend before closing
1192  * the runtime suspend feature.
1193  *
1194  * all gets by the wlan driver should have been returned
1195  * one vote should remain as part of cnss_runtime_exit
1196  *
1197  * needs to be revisited if we share the root complex.
1198  */
1199 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1200 {
1201 	struct hif_pm_runtime_lock *ctx, *tmp;
1202 
1203 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1204 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1205 	else
1206 		return;
1207 
1208 	spin_lock_bh(&sc->runtime_lock);
1209 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1210 		spin_unlock_bh(&sc->runtime_lock);
1211 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1212 		spin_lock_bh(&sc->runtime_lock);
1213 	}
1214 	spin_unlock_bh(&sc->runtime_lock);
1215 
1216 	/* ensure 1 and only 1 usage count so that when the wlan
1217 	 * driver is re-insmodded runtime pm won't be
1218 	 * disabled also ensures runtime pm doesn't get
1219 	 * broken on by being less than 1.
1220 	 */
1221 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1222 		atomic_set(&sc->dev->power.usage_count, 1);
1223 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1224 		hif_pm_runtime_put_auto(sc->dev);
1225 }
1226 
1227 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1228 					  struct hif_pm_runtime_lock *lock);
1229 
1230 /**
1231  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1232  * @sc: PCIe Context
1233  *
1234  * API is used to empty the runtime pm prevent suspend list.
1235  *
1236  * Return: void
1237  */
1238 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1239 {
1240 	struct hif_pm_runtime_lock *ctx, *tmp;
1241 
1242 	spin_lock_bh(&sc->runtime_lock);
1243 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1244 		__hif_pm_runtime_allow_suspend(sc, ctx);
1245 	}
1246 	spin_unlock_bh(&sc->runtime_lock);
1247 }
1248 
1249 /**
1250  * hif_pm_runtime_close(): close runtime pm
1251  * @sc: pci bus handle
1252  *
1253  * ensure runtime_pm is stopped before closing the driver
1254  */
1255 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1256 {
1257 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1258 
1259 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1260 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1261 		return;
1262 
1263 	hif_pm_runtime_stop(sc);
1264 
1265 	hif_is_recovery_in_progress(scn) ?
1266 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1267 		hif_pm_runtime_sanitize_on_exit(sc);
1268 }
1269 #else
1270 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1271 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1272 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1273 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1274 #endif
1275 
1276 /**
1277  * hif_disable_power_gating() - disable HW power gating
1278  * @hif_ctx: hif context
1279  *
1280  * disables pcie L1 power states
1281  */
1282 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1283 {
1284 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1285 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1286 
1287 	if (NULL == scn) {
1288 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1289 		       __func__);
1290 		return;
1291 	}
1292 
1293 	/* Disable ASPM when pkt log is enabled */
1294 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1295 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1296 }
1297 
1298 /**
1299  * hif_enable_power_gating() - enable HW power gating
1300  * @hif_ctx: hif context
1301  *
1302  * enables pcie L1 power states
1303  */
1304 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1305 {
1306 	if (NULL == sc) {
1307 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1308 		       __func__);
1309 		return;
1310 	}
1311 
1312 	/* Re-enable ASPM after firmware/OTP download is complete */
1313 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1314 }
1315 
1316 /**
1317  * hif_enable_power_management() - enable power management
1318  * @hif_ctx: hif context
1319  *
1320  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1321  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1322  *
1323  * note: epping mode does not call this function as it does not
1324  *       care about saving power.
1325  */
1326 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1327 				 bool is_packet_log_enabled)
1328 {
1329 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1330 
1331 	if (pci_ctx == NULL) {
1332 		HIF_ERROR("%s, hif_ctx null", __func__);
1333 		return;
1334 	}
1335 
1336 	hif_pm_runtime_start(pci_ctx);
1337 
1338 	if (!is_packet_log_enabled)
1339 		hif_enable_power_gating(pci_ctx);
1340 
1341 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1342 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1343 	    !ce_srng_based(hif_sc)) {
1344 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1345 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1346 			HIF_ERROR("%s, failed to set target to sleep",
1347 				  __func__);
1348 	}
1349 }
1350 
1351 /**
1352  * hif_disable_power_management() - disable power management
1353  * @hif_ctx: hif context
1354  *
1355  * Currently disables runtime pm. Should be updated to behave
1356  * if runtime pm is not started. Should be updated to take care
1357  * of aspm and soc sleep for driver load.
1358  */
1359 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1360 {
1361 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1362 
1363 	if (pci_ctx == NULL) {
1364 		HIF_ERROR("%s, hif_ctx null", __func__);
1365 		return;
1366 	}
1367 
1368 	hif_pm_runtime_stop(pci_ctx);
1369 }
1370 
1371 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1372 {
1373 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1374 
1375 	if (pci_ctx == NULL) {
1376 		HIF_ERROR("%s, hif_ctx null", __func__);
1377 		return;
1378 	}
1379 	hif_display_ce_stats(&pci_ctx->ce_sc);
1380 }
1381 
1382 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1383 {
1384 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1385 
1386 	if (pci_ctx == NULL) {
1387 		HIF_ERROR("%s, hif_ctx null", __func__);
1388 		return;
1389 	}
1390 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1391 }
1392 
1393 #define ATH_PCI_PROBE_RETRY_MAX 3
1394 /**
1395  * hif_bus_open(): hif_bus_open
1396  * @scn: scn
1397  * @bus_type: bus type
1398  *
1399  * Return: n/a
1400  */
1401 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1402 {
1403 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1404 
1405 	hif_ctx->bus_type = bus_type;
1406 	hif_pm_runtime_open(sc);
1407 
1408 	qdf_spinlock_create(&sc->irq_lock);
1409 
1410 	return hif_ce_open(hif_ctx);
1411 }
1412 
1413 /**
1414  * hif_wake_target_cpu() - wake the target's cpu
1415  * @scn: hif context
1416  *
1417  * Send an interrupt to the device to wake up the Target CPU
1418  * so it has an opportunity to notice any changed state.
1419  */
1420 static void hif_wake_target_cpu(struct hif_softc *scn)
1421 {
1422 	QDF_STATUS rv;
1423 	uint32_t core_ctrl;
1424 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1425 
1426 	rv = hif_diag_read_access(hif_hdl,
1427 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1428 				  &core_ctrl);
1429 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1430 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1431 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1432 
1433 	rv = hif_diag_write_access(hif_hdl,
1434 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1435 				   core_ctrl);
1436 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1437 }
1438 
1439 /**
1440  * soc_wake_reset() - allow the target to go to sleep
1441  * @scn: hif_softc
1442  *
1443  * Clear the force wake register.  This is done by
1444  * hif_sleep_entry and cancel defered timer sleep.
1445  */
1446 static void soc_wake_reset(struct hif_softc *scn)
1447 {
1448 	hif_write32_mb(scn, scn->mem +
1449 		PCIE_LOCAL_BASE_ADDRESS +
1450 		PCIE_SOC_WAKE_ADDRESS,
1451 		PCIE_SOC_WAKE_RESET);
1452 }
1453 
1454 /**
1455  * hif_sleep_entry() - gate target sleep
1456  * @arg: hif context
1457  *
1458  * This function is the callback for the sleep timer.
1459  * Check if last force awake critical section was at least
1460  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1461  * allow the target to go to sleep and cancel the sleep timer.
1462  * otherwise reschedule the sleep timer.
1463  */
1464 static void hif_sleep_entry(void *arg)
1465 {
1466 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1467 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1468 	uint32_t idle_ms;
1469 
1470 	if (scn->recovery)
1471 		return;
1472 
1473 	if (hif_is_driver_unloading(scn))
1474 		return;
1475 
1476 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1477 	if (hif_state->verified_awake == false) {
1478 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1479 						    - hif_state->sleep_ticks);
1480 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1481 			if (!qdf_atomic_read(&scn->link_suspended)) {
1482 				soc_wake_reset(scn);
1483 				hif_state->fake_sleep = false;
1484 			}
1485 		} else {
1486 			qdf_timer_stop(&hif_state->sleep_timer);
1487 			qdf_timer_start(&hif_state->sleep_timer,
1488 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1489 		}
1490 	} else {
1491 		qdf_timer_stop(&hif_state->sleep_timer);
1492 		qdf_timer_start(&hif_state->sleep_timer,
1493 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1494 	}
1495 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1496 }
1497 
1498 #define HIF_HIA_MAX_POLL_LOOP    1000000
1499 #define HIF_HIA_POLLING_DELAY_MS 10
1500 
1501 #ifdef CONFIG_WIN
1502 static void hif_set_hia_extnd(struct hif_softc *scn)
1503 {
1504 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1505 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1506 	uint32_t target_type = tgt_info->target_type;
1507 
1508 	HIF_TRACE("%s: E", __func__);
1509 
1510 	if ((target_type == TARGET_TYPE_AR900B) ||
1511 			target_type == TARGET_TYPE_QCA9984 ||
1512 			target_type == TARGET_TYPE_QCA9888) {
1513 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1514 		 * in RTC space
1515 		 */
1516 		tgt_info->target_revision
1517 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1518 					+ CHIP_ID_ADDRESS));
1519 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1520 			  target_type, tgt_info->target_revision);
1521 	}
1522 
1523 	{
1524 		uint32_t flag2_value = 0;
1525 		uint32_t flag2_targ_addr =
1526 			host_interest_item_address(target_type,
1527 			offsetof(struct host_interest_s, hi_skip_clock_init));
1528 
1529 		if ((ar900b_20_targ_clk != -1) &&
1530 			(frac != -1) && (intval != -1)) {
1531 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1532 				&flag2_value);
1533 			qdf_print("\n Setting clk_override");
1534 			flag2_value |= CLOCK_OVERRIDE;
1535 
1536 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1537 					flag2_value);
1538 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1539 		} else {
1540 			qdf_print("\n CLOCK PLL skipped");
1541 		}
1542 	}
1543 
1544 	if (target_type == TARGET_TYPE_AR900B
1545 			|| target_type == TARGET_TYPE_QCA9984
1546 			|| target_type == TARGET_TYPE_QCA9888) {
1547 
1548 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1549 		 * this would be supplied through module parameters,
1550 		 * if not supplied assumed default or same behavior as 1.0.
1551 		 * Assume 1.0 clock can't be tuned, reset to defaults
1552 		 */
1553 
1554 		qdf_print(KERN_INFO
1555 			  "%s: setting the target pll frac %x intval %x",
1556 			  __func__, frac, intval);
1557 
1558 		/* do not touch frac, and int val, let them be default -1,
1559 		 * if desired, host can supply these through module params
1560 		 */
1561 		if (frac != -1 || intval != -1) {
1562 			uint32_t flag2_value = 0;
1563 			uint32_t flag2_targ_addr;
1564 
1565 			flag2_targ_addr =
1566 				host_interest_item_address(target_type,
1567 				offsetof(struct host_interest_s,
1568 					hi_clock_info));
1569 			hif_diag_read_access(hif_hdl,
1570 				flag2_targ_addr, &flag2_value);
1571 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1572 				  flag2_value);
1573 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1574 			qdf_print("\n INT Val %x  Address %x",
1575 				  intval, flag2_value + 4);
1576 			hif_diag_write_access(hif_hdl,
1577 					flag2_value + 4, intval);
1578 		} else {
1579 			qdf_print(KERN_INFO
1580 				  "%s: no frac provided, skipping pre-configuring PLL",
1581 				  __func__);
1582 		}
1583 
1584 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1585 		if ((target_type == TARGET_TYPE_AR900B)
1586 			&& (tgt_info->target_revision == AR900B_REV_2)
1587 			&& ar900b_20_targ_clk != -1) {
1588 			uint32_t flag2_value = 0;
1589 			uint32_t flag2_targ_addr;
1590 
1591 			flag2_targ_addr
1592 				= host_interest_item_address(target_type,
1593 					offsetof(struct host_interest_s,
1594 					hi_desired_cpu_speed_hz));
1595 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1596 							&flag2_value);
1597 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1598 				  flag2_value);
1599 			hif_diag_write_access(hif_hdl, flag2_value,
1600 				ar900b_20_targ_clk/*300000000u*/);
1601 		} else if (target_type == TARGET_TYPE_QCA9888) {
1602 			uint32_t flag2_targ_addr;
1603 
1604 			if (200000000u != qca9888_20_targ_clk) {
1605 				qca9888_20_targ_clk = 300000000u;
1606 				/* Setting the target clock speed to 300 mhz */
1607 			}
1608 
1609 			flag2_targ_addr
1610 				= host_interest_item_address(target_type,
1611 					offsetof(struct host_interest_s,
1612 					hi_desired_cpu_speed_hz));
1613 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1614 				qca9888_20_targ_clk);
1615 		} else {
1616 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1617 				  __func__);
1618 		}
1619 	} else {
1620 		if (frac != -1 || intval != -1) {
1621 			uint32_t flag2_value = 0;
1622 			uint32_t flag2_targ_addr =
1623 				host_interest_item_address(target_type,
1624 					offsetof(struct host_interest_s,
1625 							hi_clock_info));
1626 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1627 						&flag2_value);
1628 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1629 				  flag2_value);
1630 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1631 			qdf_print("\n INT Val %x  Address %x", intval,
1632 				  flag2_value + 4);
1633 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1634 					      intval);
1635 		}
1636 	}
1637 }
1638 
1639 #else
1640 
1641 static void hif_set_hia_extnd(struct hif_softc *scn)
1642 {
1643 }
1644 
1645 #endif
1646 
1647 /**
1648  * hif_set_hia() - fill out the host interest area
1649  * @scn: hif context
1650  *
1651  * This is replaced by hif_wlan_enable for integrated targets.
1652  * This fills out the host interest area.  The firmware will
1653  * process these memory addresses when it is first brought out
1654  * of reset.
1655  *
1656  * Return: 0 for success.
1657  */
1658 static int hif_set_hia(struct hif_softc *scn)
1659 {
1660 	QDF_STATUS rv;
1661 	uint32_t interconnect_targ_addr = 0;
1662 	uint32_t pcie_state_targ_addr = 0;
1663 	uint32_t pipe_cfg_targ_addr = 0;
1664 	uint32_t svc_to_pipe_map = 0;
1665 	uint32_t pcie_config_flags = 0;
1666 	uint32_t flag2_value = 0;
1667 	uint32_t flag2_targ_addr = 0;
1668 #ifdef QCA_WIFI_3_0
1669 	uint32_t host_interest_area = 0;
1670 	uint8_t i;
1671 #else
1672 	uint32_t ealloc_value = 0;
1673 	uint32_t ealloc_targ_addr = 0;
1674 	uint8_t banks_switched = 1;
1675 	uint32_t chip_id;
1676 #endif
1677 	uint32_t pipe_cfg_addr;
1678 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1679 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1680 	uint32_t target_type = tgt_info->target_type;
1681 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1682 	static struct CE_pipe_config *target_ce_config;
1683 	struct service_to_pipe *target_service_to_ce_map;
1684 
1685 	HIF_TRACE("%s: E", __func__);
1686 
1687 	hif_get_target_ce_config(scn,
1688 				 &target_ce_config, &target_ce_config_sz,
1689 				 &target_service_to_ce_map,
1690 				 &target_service_to_ce_map_sz,
1691 				 NULL, NULL);
1692 
1693 	if (ADRASTEA_BU)
1694 		return QDF_STATUS_SUCCESS;
1695 
1696 #ifdef QCA_WIFI_3_0
1697 	i = 0;
1698 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1699 		host_interest_area = hif_read32_mb(scn, scn->mem +
1700 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1701 		if ((host_interest_area & 0x01) == 0) {
1702 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1703 			host_interest_area = 0;
1704 			i++;
1705 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1706 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1707 		} else {
1708 			host_interest_area &= (~0x01);
1709 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1710 			break;
1711 		}
1712 	}
1713 
1714 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1715 		HIF_ERROR("%s: hia polling timeout", __func__);
1716 		return -EIO;
1717 	}
1718 
1719 	if (host_interest_area == 0) {
1720 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1721 		return -EIO;
1722 	}
1723 
1724 	interconnect_targ_addr = host_interest_area +
1725 			offsetof(struct host_interest_area_t,
1726 			hi_interconnect_state);
1727 
1728 	flag2_targ_addr = host_interest_area +
1729 			offsetof(struct host_interest_area_t, hi_option_flag2);
1730 
1731 #else
1732 	interconnect_targ_addr = hif_hia_item_address(target_type,
1733 		offsetof(struct host_interest_s, hi_interconnect_state));
1734 	ealloc_targ_addr = hif_hia_item_address(target_type,
1735 		offsetof(struct host_interest_s, hi_early_alloc));
1736 	flag2_targ_addr = hif_hia_item_address(target_type,
1737 		offsetof(struct host_interest_s, hi_option_flag2));
1738 #endif
1739 	/* Supply Target-side CE configuration */
1740 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1741 			  &pcie_state_targ_addr);
1742 	if (rv != QDF_STATUS_SUCCESS) {
1743 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1744 			  __func__, interconnect_targ_addr, rv);
1745 		goto done;
1746 	}
1747 	if (pcie_state_targ_addr == 0) {
1748 		rv = QDF_STATUS_E_FAILURE;
1749 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1750 		goto done;
1751 	}
1752 	pipe_cfg_addr = pcie_state_targ_addr +
1753 			  offsetof(struct pcie_state_s,
1754 			  pipe_cfg_addr);
1755 	rv = hif_diag_read_access(hif_hdl,
1756 			  pipe_cfg_addr,
1757 			  &pipe_cfg_targ_addr);
1758 	if (rv != QDF_STATUS_SUCCESS) {
1759 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1760 			__func__, pipe_cfg_addr, rv);
1761 		goto done;
1762 	}
1763 	if (pipe_cfg_targ_addr == 0) {
1764 		rv = QDF_STATUS_E_FAILURE;
1765 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1766 		goto done;
1767 	}
1768 
1769 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1770 			(uint8_t *) target_ce_config,
1771 			target_ce_config_sz);
1772 
1773 	if (rv != QDF_STATUS_SUCCESS) {
1774 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1775 		goto done;
1776 	}
1777 
1778 	rv = hif_diag_read_access(hif_hdl,
1779 			  pcie_state_targ_addr +
1780 			  offsetof(struct pcie_state_s,
1781 			   svc_to_pipe_map),
1782 			  &svc_to_pipe_map);
1783 	if (rv != QDF_STATUS_SUCCESS) {
1784 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1785 		goto done;
1786 	}
1787 	if (svc_to_pipe_map == 0) {
1788 		rv = QDF_STATUS_E_FAILURE;
1789 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1790 		goto done;
1791 	}
1792 
1793 	rv = hif_diag_write_mem(hif_hdl,
1794 			svc_to_pipe_map,
1795 			(uint8_t *) target_service_to_ce_map,
1796 			target_service_to_ce_map_sz);
1797 	if (rv != QDF_STATUS_SUCCESS) {
1798 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1799 		goto done;
1800 	}
1801 
1802 	rv = hif_diag_read_access(hif_hdl,
1803 			pcie_state_targ_addr +
1804 			offsetof(struct pcie_state_s,
1805 			config_flags),
1806 			&pcie_config_flags);
1807 	if (rv != QDF_STATUS_SUCCESS) {
1808 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1809 		goto done;
1810 	}
1811 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1812 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1813 #else
1814 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1815 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1816 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1817 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1818 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1819 #endif
1820 	rv = hif_diag_write_mem(hif_hdl,
1821 			pcie_state_targ_addr +
1822 			offsetof(struct pcie_state_s,
1823 			config_flags),
1824 			(uint8_t *) &pcie_config_flags,
1825 			sizeof(pcie_config_flags));
1826 	if (rv != QDF_STATUS_SUCCESS) {
1827 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1828 		goto done;
1829 	}
1830 
1831 #ifndef QCA_WIFI_3_0
1832 	/* configure early allocation */
1833 	ealloc_targ_addr = hif_hia_item_address(target_type,
1834 						offsetof(
1835 						struct host_interest_s,
1836 						hi_early_alloc));
1837 
1838 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1839 			&ealloc_value);
1840 	if (rv != QDF_STATUS_SUCCESS) {
1841 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1842 		goto done;
1843 	}
1844 
1845 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1846 	ealloc_value |=
1847 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1848 		 HI_EARLY_ALLOC_MAGIC_MASK);
1849 
1850 	rv = hif_diag_read_access(hif_hdl,
1851 			  CHIP_ID_ADDRESS |
1852 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1853 	if (rv != QDF_STATUS_SUCCESS) {
1854 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1855 		goto done;
1856 	}
1857 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1858 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1859 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1860 		case 0x2:       /* ROME 1.3 */
1861 			/* 2 banks are switched to IRAM */
1862 			banks_switched = 2;
1863 			break;
1864 		case 0x4:       /* ROME 2.1 */
1865 		case 0x5:       /* ROME 2.2 */
1866 			banks_switched = 6;
1867 			break;
1868 		case 0x8:       /* ROME 3.0 */
1869 		case 0x9:       /* ROME 3.1 */
1870 		case 0xA:       /* ROME 3.2 */
1871 			banks_switched = 9;
1872 			break;
1873 		case 0x0:       /* ROME 1.0 */
1874 		case 0x1:       /* ROME 1.1 */
1875 		default:
1876 			/* 3 banks are switched to IRAM */
1877 			banks_switched = 3;
1878 			break;
1879 		}
1880 	}
1881 
1882 	ealloc_value |=
1883 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1884 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1885 
1886 	rv = hif_diag_write_access(hif_hdl,
1887 				ealloc_targ_addr,
1888 				ealloc_value);
1889 	if (rv != QDF_STATUS_SUCCESS) {
1890 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1891 		goto done;
1892 	}
1893 #endif
1894 	if ((target_type == TARGET_TYPE_AR900B)
1895 			|| (target_type == TARGET_TYPE_QCA9984)
1896 			|| (target_type == TARGET_TYPE_QCA9888)
1897 			|| (target_type == TARGET_TYPE_AR9888)) {
1898 		hif_set_hia_extnd(scn);
1899 	}
1900 
1901 	/* Tell Target to proceed with initialization */
1902 	flag2_targ_addr = hif_hia_item_address(target_type,
1903 						offsetof(
1904 						struct host_interest_s,
1905 						hi_option_flag2));
1906 
1907 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1908 			  &flag2_value);
1909 	if (rv != QDF_STATUS_SUCCESS) {
1910 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1911 		goto done;
1912 	}
1913 
1914 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1915 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1916 			   flag2_value);
1917 	if (rv != QDF_STATUS_SUCCESS) {
1918 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1919 		goto done;
1920 	}
1921 
1922 	hif_wake_target_cpu(scn);
1923 
1924 done:
1925 
1926 	return rv;
1927 }
1928 
1929 /**
1930  * hif_bus_configure() - configure the pcie bus
1931  * @hif_sc: pointer to the hif context.
1932  *
1933  * return: 0 for success. nonzero for failure.
1934  */
1935 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1936 {
1937 	int status = 0;
1938 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1939 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1940 
1941 	hif_ce_prepare_config(hif_sc);
1942 
1943 	/* initialize sleep state adjust variables */
1944 	hif_state->sleep_timer_init = true;
1945 	hif_state->keep_awake_count = 0;
1946 	hif_state->fake_sleep = false;
1947 	hif_state->sleep_ticks = 0;
1948 
1949 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1950 			       hif_sleep_entry, (void *)hif_state,
1951 			       QDF_TIMER_TYPE_WAKE_APPS);
1952 	hif_state->sleep_timer_init = true;
1953 
1954 	status = hif_wlan_enable(hif_sc);
1955 	if (status) {
1956 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1957 			  __func__, status);
1958 		goto timer_free;
1959 	}
1960 
1961 	A_TARGET_ACCESS_LIKELY(hif_sc);
1962 
1963 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1964 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1965 	    !ce_srng_based(hif_sc)) {
1966 		/*
1967 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1968 		 * prevent sleep when we want to keep firmware always awake
1969 		 * note: when we want to keep firmware always awake,
1970 		 *       hif_target_sleep_state_adjust will point to a dummy
1971 		 *       function, and hif_pci_target_sleep_state_adjust must
1972 		 *       be called instead.
1973 		 * note: bus type check is here because AHB bus is reusing
1974 		 *       hif_pci_bus_configure code.
1975 		 */
1976 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1977 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1978 					false, true) < 0) {
1979 				status = -EACCES;
1980 				goto disable_wlan;
1981 			}
1982 		}
1983 	}
1984 
1985 	/* todo: consider replacing this with an srng field */
1986 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1987 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1988 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1989 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1990 		hif_sc->per_ce_irq = true;
1991 	}
1992 
1993 	status = hif_config_ce(hif_sc);
1994 	if (status)
1995 		goto disable_wlan;
1996 
1997 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
1998 	if (hif_needs_bmi(hif_osc)) {
1999 		status = hif_set_hia(hif_sc);
2000 		if (status)
2001 			goto unconfig_ce;
2002 
2003 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2004 
2005 	}
2006 
2007 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2008 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2009 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
2010 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2011 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2012 						__func__);
2013 	else {
2014 		status = hif_configure_irq(hif_sc);
2015 		if (status < 0)
2016 			goto unconfig_ce;
2017 	}
2018 
2019 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2020 
2021 	return status;
2022 
2023 unconfig_ce:
2024 	hif_unconfig_ce(hif_sc);
2025 disable_wlan:
2026 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2027 	hif_wlan_disable(hif_sc);
2028 
2029 timer_free:
2030 	qdf_timer_stop(&hif_state->sleep_timer);
2031 	qdf_timer_free(&hif_state->sleep_timer);
2032 	hif_state->sleep_timer_init = false;
2033 
2034 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2035 	return status;
2036 }
2037 
2038 /**
2039  * hif_bus_close(): hif_bus_close
2040  *
2041  * Return: n/a
2042  */
2043 void hif_pci_close(struct hif_softc *hif_sc)
2044 {
2045 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2046 
2047 	hif_pm_runtime_close(hif_pci_sc);
2048 	hif_ce_close(hif_sc);
2049 }
2050 
2051 #define BAR_NUM 0
2052 
2053 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2054 				struct pci_dev *pdev,
2055 				const struct pci_device_id *id)
2056 {
2057 	void __iomem *mem;
2058 	int ret = 0;
2059 	uint16_t device_id = 0;
2060 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2061 
2062 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2063 	if (device_id != id->device)  {
2064 		HIF_ERROR(
2065 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2066 		   __func__, device_id, id->device);
2067 		/* pci link is down, so returing with error code */
2068 		return -EIO;
2069 	}
2070 
2071 	/* FIXME: temp. commenting out assign_resource
2072 	 * call for dev_attach to work on 2.6.38 kernel
2073 	 */
2074 #if (!defined(__LINUX_ARM_ARCH__))
2075 	if (pci_assign_resource(pdev, BAR_NUM)) {
2076 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2077 		return -EIO;
2078 	}
2079 #endif
2080 	if (pci_enable_device(pdev)) {
2081 		HIF_ERROR("%s: pci_enable_device error",
2082 			   __func__);
2083 		return -EIO;
2084 	}
2085 
2086 	/* Request MMIO resources */
2087 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2088 	if (ret) {
2089 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2090 		ret = -EIO;
2091 		goto err_region;
2092 	}
2093 
2094 #ifdef CONFIG_ARM_LPAE
2095 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2096 	 * for 32 bits device also.
2097 	 */
2098 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2099 	if (ret) {
2100 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2101 		goto err_dma;
2102 	}
2103 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2104 	if (ret) {
2105 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2106 		goto err_dma;
2107 	}
2108 #else
2109 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2110 	if (ret) {
2111 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2112 		goto err_dma;
2113 	}
2114 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2115 	if (ret) {
2116 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2117 			   __func__);
2118 		goto err_dma;
2119 	}
2120 #endif
2121 
2122 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2123 
2124 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2125 	pci_set_master(pdev);
2126 
2127 	/* Arrange for access to Target SoC registers. */
2128 	mem = pci_iomap(pdev, BAR_NUM, 0);
2129 	if (!mem) {
2130 		HIF_ERROR("%s: PCI iomap error", __func__);
2131 		ret = -EIO;
2132 		goto err_iomap;
2133 	}
2134 
2135 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
2136 
2137 	sc->mem = mem;
2138 
2139 	/* Hawkeye emulation specific change */
2140 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2141 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2142 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2143 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
2144 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
2145 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
2146 		mem = mem + 0x0c000000;
2147 		sc->mem = mem;
2148 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2149 			__func__, sc->mem);
2150 	}
2151 
2152 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2153 	ol_sc->mem = mem;
2154 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2155 	sc->pci_enabled = true;
2156 	return ret;
2157 
2158 err_iomap:
2159 	pci_clear_master(pdev);
2160 err_dma:
2161 	pci_release_region(pdev, BAR_NUM);
2162 err_region:
2163 	pci_disable_device(pdev);
2164 	return ret;
2165 }
2166 
2167 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2168 			      struct pci_dev *pdev,
2169 			      const struct pci_device_id *id)
2170 {
2171 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2172 	sc->pci_enabled = true;
2173 	return 0;
2174 }
2175 
2176 
2177 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
2178 {
2179 	pci_disable_msi(sc->pdev);
2180 	pci_iounmap(sc->pdev, sc->mem);
2181 	pci_clear_master(sc->pdev);
2182 	pci_release_region(sc->pdev, BAR_NUM);
2183 	pci_disable_device(sc->pdev);
2184 }
2185 
2186 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
2187 
2188 static void hif_disable_pci(struct hif_pci_softc *sc)
2189 {
2190 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2191 
2192 	if (ol_sc == NULL) {
2193 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2194 		return;
2195 	}
2196 	hif_pci_device_reset(sc);
2197 	sc->hif_pci_deinit(sc);
2198 
2199 	sc->mem = NULL;
2200 	ol_sc->mem = NULL;
2201 }
2202 
2203 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2204 {
2205 	int ret = 0;
2206 	int targ_awake_limit = 500;
2207 #ifndef QCA_WIFI_3_0
2208 	uint32_t fw_indicator;
2209 #endif
2210 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2211 
2212 	/*
2213 	 * Verify that the Target was started cleanly.*
2214 	 * The case where this is most likely is with an AUX-powered
2215 	 * Target and a Host in WoW mode. If the Host crashes,
2216 	 * loses power, or is restarted (without unloading the driver)
2217 	 * then the Target is left (aux) powered and running.  On a
2218 	 * subsequent driver load, the Target is in an unexpected state.
2219 	 * We try to catch that here in order to reset the Target and
2220 	 * retry the probe.
2221 	 */
2222 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2223 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2224 	while (!hif_targ_is_awake(scn, sc->mem)) {
2225 		if (0 == targ_awake_limit) {
2226 			HIF_ERROR("%s: target awake timeout", __func__);
2227 			ret = -EAGAIN;
2228 			goto end;
2229 		}
2230 		qdf_mdelay(1);
2231 		targ_awake_limit--;
2232 	}
2233 
2234 #if PCIE_BAR0_READY_CHECKING
2235 	{
2236 		int wait_limit = 200;
2237 		/* Synchronization point: wait the BAR0 is configured */
2238 		while (wait_limit-- &&
2239 			   !(hif_read32_mb(sc, c->mem +
2240 					  PCIE_LOCAL_BASE_ADDRESS +
2241 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2242 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2243 			qdf_mdelay(10);
2244 		}
2245 		if (wait_limit < 0) {
2246 			/* AR6320v1 doesn't support checking of BAR0
2247 			 * configuration, takes one sec to wait BAR0 ready
2248 			 */
2249 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2250 				    __func__);
2251 		}
2252 	}
2253 #endif
2254 
2255 #ifndef QCA_WIFI_3_0
2256 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2257 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2258 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2259 
2260 	if (fw_indicator & FW_IND_INITIALIZED) {
2261 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2262 			   __func__);
2263 		ret = -EAGAIN;
2264 		goto end;
2265 	}
2266 #endif
2267 
2268 end:
2269 	return ret;
2270 }
2271 
2272 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2273 {
2274 	int ret = 0;
2275 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2276 	uint32_t target_type = scn->target_info.target_type;
2277 
2278 	HIF_TRACE("%s: E", __func__);
2279 
2280 	/* do notn support MSI or MSI IRQ failed */
2281 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2282 	ret = request_irq(sc->pdev->irq,
2283 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2284 			  "wlan_pci", sc);
2285 	if (ret) {
2286 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2287 		goto end;
2288 	}
2289 	scn->wake_irq = sc->pdev->irq;
2290 	/* Use sc->irq instead of sc->pdev-irq
2291 	 * platform_device pdev doesn't have an irq field
2292 	 */
2293 	sc->irq = sc->pdev->irq;
2294 	/* Use Legacy PCI Interrupts */
2295 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2296 		  PCIE_INTR_ENABLE_ADDRESS),
2297 		  HOST_GROUP0_MASK);
2298 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2299 			       PCIE_INTR_ENABLE_ADDRESS));
2300 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2301 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2302 
2303 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2304 			(target_type == TARGET_TYPE_AR900B)  ||
2305 			(target_type == TARGET_TYPE_QCA9984) ||
2306 			(target_type == TARGET_TYPE_AR9888) ||
2307 			(target_type == TARGET_TYPE_QCA9888) ||
2308 			(target_type == TARGET_TYPE_AR6320V1) ||
2309 			(target_type == TARGET_TYPE_AR6320V2) ||
2310 			(target_type == TARGET_TYPE_AR6320V3)) {
2311 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2312 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2313 	}
2314 end:
2315 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2316 			  "%s: X, ret = %d", __func__, ret);
2317 	return ret;
2318 }
2319 
2320 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2321 {
2322 	int ret;
2323 	int ce_id, irq;
2324 	uint32_t msi_data_start;
2325 	uint32_t msi_data_count;
2326 	uint32_t msi_irq_start;
2327 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2328 
2329 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2330 					    &msi_data_count, &msi_data_start,
2331 					    &msi_irq_start);
2332 	if (ret)
2333 		return ret;
2334 
2335 	/* needs to match the ce_id -> irq data mapping
2336 	 * used in the srng parameter configuration
2337 	 */
2338 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2339 		unsigned int msi_data;
2340 
2341 		if (!ce_sc->tasklets[ce_id].inited)
2342 			continue;
2343 
2344 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2345 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2346 
2347 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2348 			  ce_id, msi_data, irq);
2349 
2350 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2351 	}
2352 
2353 	return ret;
2354 }
2355 
2356 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2357 {
2358 	int i, j, irq;
2359 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2360 	struct hif_exec_context *hif_ext_group;
2361 
2362 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2363 		hif_ext_group = hif_state->hif_ext_group[i];
2364 		if (hif_ext_group->irq_requested) {
2365 			hif_ext_group->irq_requested = false;
2366 			for (j = 0; j < hif_ext_group->numirq; j++) {
2367 				irq = hif_ext_group->os_irq[j];
2368 				free_irq(irq, hif_ext_group);
2369 			}
2370 			hif_ext_group->numirq = 0;
2371 		}
2372 	}
2373 }
2374 
2375 /**
2376  * hif_nointrs(): disable IRQ
2377  *
2378  * This function stops interrupt(s)
2379  *
2380  * @scn: struct hif_softc
2381  *
2382  * Return: none
2383  */
2384 void hif_pci_nointrs(struct hif_softc *scn)
2385 {
2386 	int i, ret;
2387 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2388 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2389 
2390 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2391 
2392 	if (scn->request_irq_done == false)
2393 		return;
2394 
2395 	hif_pci_deconfigure_grp_irq(scn);
2396 
2397 	ret = hif_ce_srng_msi_free_irq(scn);
2398 	if (ret != -EINVAL) {
2399 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2400 
2401 		if (scn->wake_irq)
2402 			free_irq(scn->wake_irq, scn);
2403 		scn->wake_irq = 0;
2404 	} else if (sc->num_msi_intrs > 0) {
2405 		/* MSI interrupt(s) */
2406 		for (i = 0; i < sc->num_msi_intrs; i++)
2407 			free_irq(sc->irq + i, sc);
2408 		sc->num_msi_intrs = 0;
2409 	} else {
2410 		/* Legacy PCI line interrupt
2411 		 * Use sc->irq instead of sc->pdev-irq
2412 		 * platform_device pdev doesn't have an irq field
2413 		 */
2414 		free_irq(sc->irq, sc);
2415 	}
2416 	scn->request_irq_done = false;
2417 }
2418 
2419 /**
2420  * hif_disable_bus(): hif_disable_bus
2421  *
2422  * This function disables the bus
2423  *
2424  * @bdev: bus dev
2425  *
2426  * Return: none
2427  */
2428 void hif_pci_disable_bus(struct hif_softc *scn)
2429 {
2430 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2431 	struct pci_dev *pdev;
2432 	void __iomem *mem;
2433 	struct hif_target_info *tgt_info = &scn->target_info;
2434 
2435 	/* Attach did not succeed, all resources have been
2436 	 * freed in error handler
2437 	 */
2438 	if (!sc)
2439 		return;
2440 
2441 	pdev = sc->pdev;
2442 	if (ADRASTEA_BU) {
2443 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2444 
2445 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2446 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2447 			       HOST_GROUP0_MASK);
2448 	}
2449 
2450 #if defined(CPU_WARM_RESET_WAR)
2451 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2452 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2453 	 * verified for AR9888_REV1
2454 	 */
2455 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2456 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2457 		hif_pci_device_warm_reset(sc);
2458 	else
2459 		hif_pci_device_reset(sc);
2460 #else
2461 	hif_pci_device_reset(sc);
2462 #endif
2463 	mem = (void __iomem *)sc->mem;
2464 	if (mem) {
2465 		hif_dump_pipe_debug_count(scn);
2466 		if (scn->athdiag_procfs_inited) {
2467 			athdiag_procfs_remove();
2468 			scn->athdiag_procfs_inited = false;
2469 		}
2470 		sc->hif_pci_deinit(sc);
2471 		scn->mem = NULL;
2472 	}
2473 	HIF_INFO("%s: X", __func__);
2474 }
2475 
2476 #define OL_ATH_PCI_PM_CONTROL 0x44
2477 
2478 #ifdef FEATURE_RUNTIME_PM
2479 /**
2480  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2481  * @scn: hif context
2482  * @flag: prevent linkdown if true otherwise allow
2483  *
2484  * this api should only be called as part of bus prevent linkdown
2485  */
2486 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2487 {
2488 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2489 
2490 	if (flag)
2491 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2492 	else
2493 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2494 }
2495 #else
2496 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2497 {
2498 }
2499 #endif
2500 
2501 #if defined(CONFIG_PCI_MSM)
2502 /**
2503  * hif_bus_prevent_linkdown(): allow or permit linkdown
2504  * @flag: true prevents linkdown, false allows
2505  *
2506  * Calls into the platform driver to vote against taking down the
2507  * pcie link.
2508  *
2509  * Return: n/a
2510  */
2511 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2512 {
2513 	int errno;
2514 
2515 	HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2516 	hif_runtime_prevent_linkdown(scn, flag);
2517 
2518 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2519 	if (errno)
2520 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2521 			  __func__, errno);
2522 }
2523 #else
2524 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2525 {
2526 	HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
2527 	hif_runtime_prevent_linkdown(scn, flag);
2528 }
2529 #endif
2530 
2531 /**
2532  * hif_pci_bus_suspend(): prepare hif for suspend
2533  *
2534  * Return: Errno
2535  */
2536 int hif_pci_bus_suspend(struct hif_softc *scn)
2537 {
2538 	return 0;
2539 }
2540 
2541 /**
2542  * __hif_check_link_status() - API to check if PCIe link is active/not
2543  * @scn: HIF Context
2544  *
2545  * API reads the PCIe config space to verify if PCIe link training is
2546  * successful or not.
2547  *
2548  * Return: Success/Failure
2549  */
2550 static int __hif_check_link_status(struct hif_softc *scn)
2551 {
2552 	uint16_t dev_id = 0;
2553 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2554 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2555 
2556 	if (!sc) {
2557 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2558 		return -EINVAL;
2559 	}
2560 
2561 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2562 
2563 	if (dev_id == sc->devid)
2564 		return 0;
2565 
2566 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2567 	       __func__, dev_id);
2568 
2569 	scn->recovery = true;
2570 
2571 	if (cbk && cbk->set_recovery_in_progress)
2572 		cbk->set_recovery_in_progress(cbk->context, true);
2573 	else
2574 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2575 
2576 	pld_is_pci_link_down(sc->dev);
2577 	return -EACCES;
2578 }
2579 
2580 /**
2581  * hif_pci_bus_resume(): prepare hif for resume
2582  *
2583  * Return: Errno
2584  */
2585 int hif_pci_bus_resume(struct hif_softc *scn)
2586 {
2587 	return __hif_check_link_status(scn);
2588 }
2589 
2590 /**
2591  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2592  * @scn: hif context
2593  *
2594  * Ensure that if we received the wakeup message before the irq
2595  * was disabled that the message is pocessed before suspending.
2596  *
2597  * Return: -EBUSY if we fail to flush the tasklets.
2598  */
2599 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2600 {
2601 	if (hif_drain_tasklets(scn) != 0)
2602 		return -EBUSY;
2603 
2604 	/* Stop the HIF Sleep Timer */
2605 	hif_cancel_deferred_target_sleep(scn);
2606 
2607 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2608 		qdf_atomic_set(&scn->link_suspended, 1);
2609 
2610 	return 0;
2611 }
2612 
2613 /**
2614  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2615  * @scn: hif context
2616  *
2617  * Ensure that if we received the wakeup message before the irq
2618  * was disabled that the message is pocessed before suspending.
2619  *
2620  * Return: -EBUSY if we fail to flush the tasklets.
2621  */
2622 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2623 {
2624 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2625 		qdf_atomic_set(&scn->link_suspended, 0);
2626 
2627 	return 0;
2628 }
2629 
2630 #ifdef FEATURE_RUNTIME_PM
2631 /**
2632  * __hif_runtime_pm_set_state(): utility function
2633  * @state: state to set
2634  *
2635  * indexes into the runtime pm state and sets it.
2636  */
2637 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2638 				enum hif_pm_runtime_state state)
2639 {
2640 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2641 
2642 	if (NULL == sc) {
2643 		HIF_ERROR("%s: HIF_CTX not initialized",
2644 		       __func__);
2645 		return;
2646 	}
2647 
2648 	qdf_atomic_set(&sc->pm_state, state);
2649 }
2650 
2651 /**
2652  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2653  *
2654  * Notify hif that a runtime pm opperation has started
2655  */
2656 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2657 {
2658 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2659 }
2660 
2661 /**
2662  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2663  *
2664  * Notify hif that a the runtime pm state should be on
2665  */
2666 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2667 {
2668 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2669 }
2670 
2671 /**
2672  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2673  *
2674  * Notify hif that a runtime suspend attempt has been completed successfully
2675  */
2676 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2677 {
2678 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2679 }
2680 
2681 /**
2682  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2683  */
2684 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2685 {
2686 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2687 
2688 	if (sc == NULL)
2689 		return;
2690 
2691 	sc->pm_stats.suspended++;
2692 	sc->pm_stats.suspend_jiffies = jiffies;
2693 }
2694 
2695 /**
2696  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2697  *
2698  * log a failed runtime suspend
2699  * mark last busy to prevent immediate runtime suspend
2700  */
2701 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2702 {
2703 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2704 
2705 	if (sc == NULL)
2706 		return;
2707 
2708 	sc->pm_stats.suspend_err++;
2709 }
2710 
2711 /**
2712  * hif_log_runtime_resume_success() - log a successful runtime resume
2713  *
2714  * log a successful runtime resume
2715  * mark last busy to prevent immediate runtime suspend
2716  */
2717 static void hif_log_runtime_resume_success(void *hif_ctx)
2718 {
2719 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2720 
2721 	if (sc == NULL)
2722 		return;
2723 
2724 	sc->pm_stats.resumed++;
2725 }
2726 
2727 /**
2728  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2729  *
2730  * Record the failure.
2731  * mark last busy to delay a retry.
2732  * adjust the runtime_pm state.
2733  */
2734 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2735 {
2736 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2737 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2738 
2739 	hif_log_runtime_suspend_failure(hif_ctx);
2740 	if (hif_pci_sc != NULL)
2741 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2742 	hif_runtime_pm_set_state_on(scn);
2743 }
2744 
2745 /**
2746  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2747  *
2748  * Makes sure that the pci link will be taken down by the suspend opperation.
2749  * If the hif layer is configured to leave the bus on, runtime suspend will
2750  * not save any power.
2751  *
2752  * Set the runtime suspend state to in progress.
2753  *
2754  * return -EINVAL if the bus won't go down.  otherwise return 0
2755  */
2756 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2757 {
2758 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2759 
2760 	if (!hif_can_suspend_link(hif_ctx)) {
2761 		HIF_ERROR("Runtime PM not supported for link up suspend");
2762 		return -EINVAL;
2763 	}
2764 
2765 	hif_runtime_pm_set_state_inprogress(scn);
2766 	return 0;
2767 }
2768 
2769 /**
2770  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2771  *
2772  * Record the success.
2773  * adjust the runtime_pm state
2774  */
2775 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
2776 {
2777 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2778 
2779 	hif_runtime_pm_set_state_suspended(scn);
2780 	hif_log_runtime_suspend_success(scn);
2781 }
2782 
2783 /**
2784  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2785  *
2786  * update the runtime pm state.
2787  */
2788 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
2789 {
2790 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2791 
2792 	hif_runtime_pm_set_state_inprogress(scn);
2793 }
2794 
2795 /**
2796  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2797  *
2798  * record the success.
2799  * adjust the runtime_pm state
2800  */
2801 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
2802 {
2803 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2804 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2805 
2806 	hif_log_runtime_resume_success(hif_ctx);
2807 	if (hif_pci_sc != NULL)
2808 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2809 	hif_runtime_pm_set_state_on(scn);
2810 }
2811 
2812 /**
2813  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2814  *
2815  * Return: 0 for success and non-zero error code for failure
2816  */
2817 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2818 {
2819 	int errno;
2820 
2821 	errno = hif_bus_suspend(hif_ctx);
2822 	if (errno) {
2823 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
2824 		return errno;
2825 	}
2826 
2827 	errno = hif_apps_irqs_disable(hif_ctx);
2828 	if (errno) {
2829 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
2830 		goto bus_resume;
2831 	}
2832 
2833 	errno = hif_bus_suspend_noirq(hif_ctx);
2834 	if (errno) {
2835 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
2836 		goto irqs_enable;
2837 	}
2838 
2839 	/* link should always be down; skip enable wake irq */
2840 
2841 	return 0;
2842 
2843 irqs_enable:
2844 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2845 
2846 bus_resume:
2847 	QDF_BUG(!hif_bus_resume(hif_ctx));
2848 
2849 	return errno;
2850 }
2851 
2852 /**
2853  * hif_fastpath_resume() - resume fastpath for runtimepm
2854  *
2855  * ensure that the fastpath write index register is up to date
2856  * since runtime pm may cause ce_send_fast to skip the register
2857  * write.
2858  *
2859  * fastpath only applicable to legacy copy engine
2860  */
2861 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
2862 {
2863 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2864 	struct CE_state *ce_state;
2865 
2866 	if (!scn)
2867 		return;
2868 
2869 	if (scn->fastpath_mode_on) {
2870 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2871 			return;
2872 
2873 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
2874 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2875 
2876 		/*war_ce_src_ring_write_idx_set */
2877 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2878 				ce_state->src_ring->write_index);
2879 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2880 		Q_TARGET_ACCESS_END(scn);
2881 	}
2882 }
2883 
2884 /**
2885  * hif_runtime_resume() - do the bus resume part of a runtime resume
2886  *
2887  *  Return: 0 for success and non-zero error code for failure
2888  */
2889 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
2890 {
2891 	/* link should always be down; skip disable wake irq */
2892 
2893 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
2894 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2895 	QDF_BUG(!hif_bus_resume(hif_ctx));
2896 	return 0;
2897 }
2898 #endif /* #ifdef FEATURE_RUNTIME_PM */
2899 
2900 #if CONFIG_PCIE_64BIT_MSI
2901 static void hif_free_msi_ctx(struct hif_softc *scn)
2902 {
2903 	struct hif_pci_softc *sc = scn->hif_sc;
2904 	struct hif_msi_info *info = &sc->msi_info;
2905 	struct device *dev = scn->qdf_dev->dev;
2906 
2907 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2908 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2909 	info->magic = NULL;
2910 	info->magic_dma = 0;
2911 }
2912 #else
2913 static void hif_free_msi_ctx(struct hif_softc *scn)
2914 {
2915 }
2916 #endif
2917 
2918 void hif_pci_disable_isr(struct hif_softc *scn)
2919 {
2920 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2921 
2922 	hif_exec_kill(&scn->osc);
2923 	hif_nointrs(scn);
2924 	hif_free_msi_ctx(scn);
2925 	/* Cancel the pending tasklet */
2926 	ce_tasklet_kill(scn);
2927 	tasklet_kill(&sc->intr_tq);
2928 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2929 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2930 }
2931 
2932 /* Function to reset SoC */
2933 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2934 {
2935 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2936 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2937 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2938 
2939 #if defined(CPU_WARM_RESET_WAR)
2940 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2941 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2942 	 * verified for AR9888_REV1
2943 	 */
2944 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2945 		hif_pci_device_warm_reset(sc);
2946 	else
2947 		hif_pci_device_reset(sc);
2948 #else
2949 	hif_pci_device_reset(sc);
2950 #endif
2951 }
2952 
2953 #ifdef CONFIG_PCI_MSM
2954 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2955 {
2956 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2957 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2958 }
2959 #else
2960 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2961 #endif
2962 
2963 /**
2964  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2965  * @sc: HIF PCIe Context
2966  *
2967  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2968  *
2969  * Return: Failure to caller
2970  */
2971 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2972 {
2973 	uint16_t val = 0;
2974 	uint32_t bar = 0;
2975 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2976 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2977 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2978 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2979 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2980 	A_target_id_t pci_addr = scn->mem;
2981 
2982 	HIF_ERROR("%s: keep_awake_count = %d",
2983 			__func__, hif_state->keep_awake_count);
2984 
2985 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2986 
2987 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
2988 
2989 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2990 
2991 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
2992 
2993 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
2994 
2995 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
2996 
2997 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
2998 
2999 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3000 
3001 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3002 
3003 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3004 
3005 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3006 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3007 						PCIE_SOC_WAKE_ADDRESS));
3008 
3009 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3010 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3011 							RTC_STATE_ADDRESS));
3012 
3013 	HIF_ERROR("%s:error, wakeup target", __func__);
3014 	hif_msm_pcie_debug_info(sc);
3015 
3016 	if (!cfg->enable_self_recovery)
3017 		QDF_BUG(0);
3018 
3019 	scn->recovery = true;
3020 
3021 	if (cbk->set_recovery_in_progress)
3022 		cbk->set_recovery_in_progress(cbk->context, true);
3023 
3024 	pld_is_pci_link_down(sc->dev);
3025 	return -EACCES;
3026 }
3027 
3028 /*
3029  * For now, we use simple on-demand sleep/wake.
3030  * Some possible improvements:
3031  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3032  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3033  *   Careful, though, these functions may be used by
3034  *  interrupt handlers ("atomic")
3035  *  -Don't use host_reg_table for this code; instead use values directly
3036  *  -Use a separate timer to track activity and allow Target to sleep only
3037  *   if it hasn't done anything for a while; may even want to delay some
3038  *   processing for a short while in order to "batch" (e.g.) transmit
3039  *   requests with completion processing into "windows of up time".  Costs
3040  *   some performance, but improves power utilization.
3041  *  -On some platforms, it might be possible to eliminate explicit
3042  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3043  *   recover from the failure by forcing the Target awake.
3044  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3045  *   overhead in some cases. Perhaps this makes more sense when
3046  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3047  *   disabled.
3048  *  -It is possible to compile this code out and simply force the Target
3049  *   to remain awake.  That would yield optimal performance at the cost of
3050  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3051  *
3052  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3053  */
3054 /**
3055  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3056  * @scn: hif_softc pointer.
3057  * @sleep_ok: bool
3058  * @wait_for_it: bool
3059  *
3060  * Output the pipe error counts of each pipe to log file
3061  *
3062  * Return: int
3063  */
3064 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3065 			      bool sleep_ok, bool wait_for_it)
3066 {
3067 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3068 	A_target_id_t pci_addr = scn->mem;
3069 	static int max_delay;
3070 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3071 	static int debug;
3072 	if (scn->recovery)
3073 		return -EACCES;
3074 
3075 	if (qdf_atomic_read(&scn->link_suspended)) {
3076 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3077 		debug = true;
3078 		QDF_ASSERT(0);
3079 		return -EACCES;
3080 	}
3081 
3082 	if (debug) {
3083 		wait_for_it = true;
3084 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3085 				__func__);
3086 		QDF_ASSERT(0);
3087 	}
3088 
3089 	if (sleep_ok) {
3090 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3091 		hif_state->keep_awake_count--;
3092 		if (hif_state->keep_awake_count == 0) {
3093 			/* Allow sleep */
3094 			hif_state->verified_awake = false;
3095 			hif_state->sleep_ticks = qdf_system_ticks();
3096 		}
3097 		if (hif_state->fake_sleep == false) {
3098 			/* Set the Fake Sleep */
3099 			hif_state->fake_sleep = true;
3100 
3101 			/* Start the Sleep Timer */
3102 			qdf_timer_stop(&hif_state->sleep_timer);
3103 			qdf_timer_start(&hif_state->sleep_timer,
3104 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3105 		}
3106 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3107 	} else {
3108 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3109 
3110 		if (hif_state->fake_sleep) {
3111 			hif_state->verified_awake = true;
3112 		} else {
3113 			if (hif_state->keep_awake_count == 0) {
3114 				/* Force AWAKE */
3115 				hif_write32_mb(sc, pci_addr +
3116 					      PCIE_LOCAL_BASE_ADDRESS +
3117 					      PCIE_SOC_WAKE_ADDRESS,
3118 					      PCIE_SOC_WAKE_V_MASK);
3119 			}
3120 		}
3121 		hif_state->keep_awake_count++;
3122 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3123 
3124 		if (wait_for_it && !hif_state->verified_awake) {
3125 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3126 			int tot_delay = 0;
3127 			int curr_delay = 5;
3128 
3129 			for (;; ) {
3130 				if (hif_targ_is_awake(scn, pci_addr)) {
3131 					hif_state->verified_awake = true;
3132 					break;
3133 				}
3134 				if (!hif_pci_targ_is_present(scn, pci_addr))
3135 					break;
3136 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3137 					return hif_log_soc_wakeup_timeout(sc);
3138 
3139 				OS_DELAY(curr_delay);
3140 				tot_delay += curr_delay;
3141 
3142 				if (curr_delay < 50)
3143 					curr_delay += 5;
3144 			}
3145 
3146 			/*
3147 			 * NB: If Target has to come out of Deep Sleep,
3148 			 * this may take a few Msecs. Typically, though
3149 			 * this delay should be <30us.
3150 			 */
3151 			if (tot_delay > max_delay)
3152 				max_delay = tot_delay;
3153 		}
3154 	}
3155 
3156 	if (debug && hif_state->verified_awake) {
3157 		debug = 0;
3158 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3159 			__func__,
3160 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3161 				PCIE_INTR_ENABLE_ADDRESS),
3162 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3163 				PCIE_INTR_CAUSE_ADDRESS),
3164 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3165 				CPU_INTR_ADDRESS),
3166 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3167 				PCIE_INTR_CLR_ADDRESS),
3168 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3169 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3170 	}
3171 
3172 	return 0;
3173 }
3174 
3175 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3176 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3177 {
3178 	uint32_t value;
3179 	void *addr;
3180 
3181 	addr = scn->mem + offset;
3182 	value = hif_read32_mb(scn, addr);
3183 
3184 	{
3185 		unsigned long irq_flags;
3186 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3187 
3188 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3189 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3190 		pcie_access_log[idx].is_write = false;
3191 		pcie_access_log[idx].addr = addr;
3192 		pcie_access_log[idx].value = value;
3193 		pcie_access_log_seqnum++;
3194 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3195 	}
3196 
3197 	return value;
3198 }
3199 
3200 void
3201 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3202 {
3203 	void *addr;
3204 
3205 	addr = scn->mem + (offset);
3206 	hif_write32_mb(scn, addr, value);
3207 
3208 	{
3209 		unsigned long irq_flags;
3210 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3211 
3212 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3213 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3214 		pcie_access_log[idx].is_write = true;
3215 		pcie_access_log[idx].addr = addr;
3216 		pcie_access_log[idx].value = value;
3217 		pcie_access_log_seqnum++;
3218 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3219 	}
3220 }
3221 
3222 /**
3223  * hif_target_dump_access_log() - dump access log
3224  *
3225  * dump access log
3226  *
3227  * Return: n/a
3228  */
3229 void hif_target_dump_access_log(void)
3230 {
3231 	int idx, len, start_idx, cur_idx;
3232 	unsigned long irq_flags;
3233 
3234 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3235 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3236 		len = PCIE_ACCESS_LOG_NUM;
3237 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3238 	} else {
3239 		len = pcie_access_log_seqnum;
3240 		start_idx = 0;
3241 	}
3242 
3243 	for (idx = 0; idx < len; idx++) {
3244 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3245 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3246 		       __func__, idx,
3247 		       pcie_access_log[cur_idx].seqnum,
3248 		       pcie_access_log[cur_idx].is_write,
3249 		       pcie_access_log[cur_idx].addr,
3250 		       pcie_access_log[cur_idx].value);
3251 	}
3252 
3253 	pcie_access_log_seqnum = 0;
3254 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3255 }
3256 #endif
3257 
3258 #ifndef HIF_AHB
3259 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3260 {
3261 	QDF_BUG(0);
3262 	return -EINVAL;
3263 }
3264 
3265 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3266 {
3267 	QDF_BUG(0);
3268 	return -EINVAL;
3269 }
3270 #endif
3271 
3272 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3273 {
3274 	struct ce_tasklet_entry *tasklet_entry = context;
3275 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3276 }
3277 extern const char *ce_name[];
3278 
3279 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3280 {
3281 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3282 
3283 	return pci_scn->ce_msi_irq_num[ce_id];
3284 }
3285 
3286 /* hif_srng_msi_irq_disable() - disable the irq for msi
3287  * @hif_sc: hif context
3288  * @ce_id: which ce to disable copy complete interrupts for
3289  *
3290  * since MSI interrupts are not level based, the system can function
3291  * without disabling these interrupts.  Interrupt mitigation can be
3292  * added here for better system performance.
3293  */
3294 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3295 {
3296 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3297 }
3298 
3299 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3300 {
3301 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3302 }
3303 
3304 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3305 {
3306 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3307 }
3308 
3309 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3310 {
3311 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3312 }
3313 
3314 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3315 {
3316 	int ret;
3317 	int ce_id, irq;
3318 	uint32_t msi_data_start;
3319 	uint32_t msi_data_count;
3320 	uint32_t msi_irq_start;
3321 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3322 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3323 
3324 	/* do wake irq assignment */
3325 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3326 					  &msi_data_count, &msi_data_start,
3327 					  &msi_irq_start);
3328 	if (ret)
3329 		return ret;
3330 
3331 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3332 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3333 			  "wlan_wake_irq", scn);
3334 	if (ret)
3335 		return ret;
3336 
3337 	/* do ce irq assignments */
3338 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3339 					    &msi_data_count, &msi_data_start,
3340 					    &msi_irq_start);
3341 	if (ret)
3342 		goto free_wake_irq;
3343 
3344 	if (ce_srng_based(scn)) {
3345 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3346 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3347 	} else {
3348 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3349 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3350 	}
3351 
3352 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3353 
3354 	/* needs to match the ce_id -> irq data mapping
3355 	 * used in the srng parameter configuration
3356 	 */
3357 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3358 		unsigned int msi_data = (ce_id % msi_data_count) +
3359 			msi_irq_start;
3360 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3361 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3362 			 __func__, ce_id, msi_data, irq,
3363 			 &ce_sc->tasklets[ce_id]);
3364 
3365 		/* implies the ce is also initialized */
3366 		if (!ce_sc->tasklets[ce_id].inited)
3367 			continue;
3368 
3369 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3370 		ret = request_irq(irq, hif_ce_interrupt_handler,
3371 				  IRQF_SHARED,
3372 				  ce_name[ce_id],
3373 				  &ce_sc->tasklets[ce_id]);
3374 		if (ret)
3375 			goto free_irq;
3376 	}
3377 
3378 	return ret;
3379 
3380 free_irq:
3381 	/* the request_irq for the last ce_id failed so skip it. */
3382 	while (ce_id > 0 && ce_id < scn->ce_count) {
3383 		unsigned int msi_data;
3384 
3385 		ce_id--;
3386 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3387 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3388 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3389 	}
3390 
3391 free_wake_irq:
3392 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3393 	scn->wake_irq = 0;
3394 
3395 	return ret;
3396 }
3397 
3398 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3399 {
3400 	int i;
3401 
3402 	for (i = 0; i < hif_ext_group->numirq; i++)
3403 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3404 }
3405 
3406 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3407 {
3408 	int i;
3409 
3410 	for (i = 0; i < hif_ext_group->numirq; i++)
3411 		enable_irq(hif_ext_group->os_irq[i]);
3412 }
3413 
3414 
3415 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3416 			      struct hif_exec_context *hif_ext_group)
3417 {
3418 	int ret = 0;
3419 	int irq = 0;
3420 	int j;
3421 
3422 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3423 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3424 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3425 
3426 	for (j = 0; j < hif_ext_group->numirq; j++) {
3427 		irq = hif_ext_group->irq[j];
3428 
3429 		HIF_DBG("%s: request_irq = %d for grp %d",
3430 			  __func__, irq, hif_ext_group->grp_id);
3431 		ret = request_irq(irq,
3432 				  hif_ext_group_interrupt_handler,
3433 				  IRQF_SHARED, "wlan_EXT_GRP",
3434 				  hif_ext_group);
3435 		if (ret) {
3436 			HIF_ERROR("%s: request_irq failed ret = %d",
3437 				  __func__, ret);
3438 			return -EFAULT;
3439 		}
3440 		hif_ext_group->os_irq[j] = irq;
3441 	}
3442 	hif_ext_group->irq_requested = true;
3443 	return 0;
3444 }
3445 
3446 /**
3447  * hif_configure_irq() - configure interrupt
3448  *
3449  * This function configures interrupt(s)
3450  *
3451  * @sc: PCIe control struct
3452  * @hif_hdl: struct HIF_CE_state
3453  *
3454  * Return: 0 - for success
3455  */
3456 int hif_configure_irq(struct hif_softc *scn)
3457 {
3458 	int ret = 0;
3459 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3460 
3461 	HIF_TRACE("%s: E", __func__);
3462 
3463 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3464 		scn->request_irq_done = false;
3465 		return 0;
3466 	}
3467 
3468 	hif_init_reschedule_tasklet_work(sc);
3469 
3470 	ret = hif_ce_msi_configure_irq(scn);
3471 	if (ret == 0) {
3472 		goto end;
3473 	}
3474 
3475 	switch (scn->target_info.target_type) {
3476 	case TARGET_TYPE_IPQ4019:
3477 		ret = hif_ahb_configure_legacy_irq(sc);
3478 		break;
3479 	case TARGET_TYPE_QCA8074:
3480 	case TARGET_TYPE_QCA8074V2:
3481 	case TARGET_TYPE_QCA6018:
3482 		ret = hif_ahb_configure_irq(sc);
3483 		break;
3484 	default:
3485 		ret = hif_pci_configure_legacy_irq(sc);
3486 		break;
3487 	}
3488 	if (ret < 0) {
3489 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3490 			__func__, ret);
3491 		return ret;
3492 	}
3493 end:
3494 	scn->request_irq_done = true;
3495 	return 0;
3496 }
3497 
3498 /**
3499  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3500  * @scn: hif control structure
3501  *
3502  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3503  * stuck at a polling loop in pcie_address_config in FW
3504  *
3505  * Return: none
3506  */
3507 static void hif_trigger_timer_irq(struct hif_softc *scn)
3508 {
3509 	int tmp;
3510 	/* Trigger IRQ on Peregrine/Swift by setting
3511 	 * IRQ Bit of LF_TIMER 0
3512 	 */
3513 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3514 						SOC_LF_TIMER_STATUS0_ADDRESS));
3515 	/* Set Raw IRQ Bit */
3516 	tmp |= 1;
3517 	/* SOC_LF_TIMER_STATUS0 */
3518 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3519 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3520 }
3521 
3522 /**
3523  * hif_target_sync() : ensure the target is ready
3524  * @scn: hif control structure
3525  *
3526  * Informs fw that we plan to use legacy interupts so that
3527  * it can begin booting. Ensures that the fw finishes booting
3528  * before continuing. Should be called before trying to write
3529  * to the targets other registers for the first time.
3530  *
3531  * Return: none
3532  */
3533 static void hif_target_sync(struct hif_softc *scn)
3534 {
3535 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3536 			    PCIE_INTR_ENABLE_ADDRESS),
3537 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3538 	/* read to flush pcie write */
3539 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3540 			PCIE_INTR_ENABLE_ADDRESS));
3541 
3542 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3543 			PCIE_SOC_WAKE_ADDRESS,
3544 			PCIE_SOC_WAKE_V_MASK);
3545 	while (!hif_targ_is_awake(scn, scn->mem))
3546 		;
3547 
3548 	if (HAS_FW_INDICATOR) {
3549 		int wait_limit = 500;
3550 		int fw_ind = 0;
3551 		int retry_count = 0;
3552 		uint32_t target_type = scn->target_info.target_type;
3553 fw_retry:
3554 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3555 		while (1) {
3556 			fw_ind = hif_read32_mb(scn, scn->mem +
3557 					FW_INDICATOR_ADDRESS);
3558 			if (fw_ind & FW_IND_INITIALIZED)
3559 				break;
3560 			if (wait_limit-- < 0)
3561 				break;
3562 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3563 			    PCIE_INTR_ENABLE_ADDRESS),
3564 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3565 			    /* read to flush pcie write */
3566 			(void)hif_read32_mb(scn, scn->mem +
3567 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3568 
3569 			qdf_mdelay(10);
3570 		}
3571 		if (wait_limit < 0) {
3572 			if (target_type == TARGET_TYPE_AR9888 &&
3573 			    retry_count++ < 2) {
3574 				hif_trigger_timer_irq(scn);
3575 				wait_limit = 500;
3576 				goto fw_retry;
3577 			}
3578 			HIF_TRACE("%s: FW signal timed out",
3579 					__func__);
3580 			qdf_assert_always(0);
3581 		} else {
3582 			HIF_TRACE("%s: Got FW signal, retries = %x",
3583 					__func__, 500-wait_limit);
3584 		}
3585 	}
3586 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3587 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3588 }
3589 
3590 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3591 				     struct device *dev)
3592 {
3593 	struct pld_soc_info info;
3594 
3595 	pld_get_soc_info(dev, &info);
3596 	sc->mem = info.v_addr;
3597 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3598 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3599 }
3600 
3601 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3602 				       struct device *dev)
3603 {}
3604 
3605 static bool hif_is_pld_based_target(int device_id)
3606 {
3607 	switch (device_id) {
3608 	case QCA6290_DEVICE_ID:
3609 	case QCA6290_EMULATION_DEVICE_ID:
3610 #ifdef QCA_WIFI_QCA6390
3611 	case QCA6390_DEVICE_ID:
3612 #endif
3613 	case AR6320_DEVICE_ID:
3614 	case QCN7605_DEVICE_ID:
3615 		return true;
3616 	}
3617 	return false;
3618 }
3619 
3620 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3621 					   int device_id)
3622 {
3623 	if (hif_is_pld_based_target(device_id)) {
3624 		sc->hif_enable_pci = hif_enable_pci_pld;
3625 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3626 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3627 	} else {
3628 		sc->hif_enable_pci = hif_enable_pci_nopld;
3629 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3630 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3631 	}
3632 }
3633 
3634 #ifdef HIF_REG_WINDOW_SUPPORT
3635 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3636 					       u32 target_type)
3637 {
3638 	switch (target_type) {
3639 	case TARGET_TYPE_QCN7605:
3640 		sc->use_register_windowing = true;
3641 		qdf_spinlock_create(&sc->register_access_lock);
3642 		sc->register_window = 0;
3643 		break;
3644 	default:
3645 		sc->use_register_windowing = false;
3646 	}
3647 }
3648 #else
3649 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3650 					       u32 target_type)
3651 {
3652 	sc->use_register_windowing = false;
3653 }
3654 #endif
3655 
3656 /**
3657  * hif_enable_bus(): enable bus
3658  *
3659  * This function enables the bus
3660  *
3661  * @ol_sc: soft_sc struct
3662  * @dev: device pointer
3663  * @bdev: bus dev pointer
3664  * bid: bus id pointer
3665  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3666  * Return: QDF_STATUS
3667  */
3668 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3669 			  struct device *dev, void *bdev,
3670 			  const struct hif_bus_id *bid,
3671 			  enum hif_enable_type type)
3672 {
3673 	int ret = 0;
3674 	uint32_t hif_type, target_type;
3675 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3676 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3677 	uint16_t revision_id = 0;
3678 	int probe_again = 0;
3679 	struct pci_dev *pdev = bdev;
3680 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3681 	struct hif_target_info *tgt_info;
3682 
3683 	if (!ol_sc) {
3684 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3685 		return QDF_STATUS_E_NOMEM;
3686 	}
3687 
3688 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3689 		  __func__, hif_get_conparam(ol_sc), id->device);
3690 
3691 	sc->pdev = pdev;
3692 	sc->dev = &pdev->dev;
3693 	sc->devid = id->device;
3694 	sc->cacheline_sz = dma_get_cache_alignment();
3695 	tgt_info = hif_get_target_info_handle(hif_hdl);
3696 	hif_pci_init_deinit_ops_attach(sc, id->device);
3697 	sc->hif_pci_get_soc_info(sc, dev);
3698 again:
3699 	ret = sc->hif_enable_pci(sc, pdev, id);
3700 	if (ret < 0) {
3701 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3702 		       __func__, ret);
3703 		goto err_enable_pci;
3704 	}
3705 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3706 
3707 	/* Temporary FIX: disable ASPM on peregrine.
3708 	 * Will be removed after the OTP is programmed
3709 	 */
3710 	hif_disable_power_gating(hif_hdl);
3711 
3712 	device_disable_async_suspend(&pdev->dev);
3713 	pci_read_config_word(pdev, 0x08, &revision_id);
3714 
3715 	ret = hif_get_device_type(id->device, revision_id,
3716 						&hif_type, &target_type);
3717 	if (ret < 0) {
3718 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3719 		goto err_tgtstate;
3720 	}
3721 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3722 		  __func__, hif_type, target_type);
3723 
3724 	hif_register_tbl_attach(ol_sc, hif_type);
3725 	hif_target_register_tbl_attach(ol_sc, target_type);
3726 
3727 	hif_pci_init_reg_windowing_support(sc, target_type);
3728 
3729 	tgt_info->target_type = target_type;
3730 
3731 	if (ce_srng_based(ol_sc)) {
3732 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3733 	} else {
3734 		ret = hif_pci_probe_tgt_wakeup(sc);
3735 		if (ret < 0) {
3736 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3737 					__func__, ret);
3738 			if (ret == -EAGAIN)
3739 				probe_again++;
3740 			goto err_tgtstate;
3741 		}
3742 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3743 	}
3744 
3745 	if (!ol_sc->mem_pa) {
3746 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3747 		ret = -EIO;
3748 		goto err_tgtstate;
3749 	}
3750 
3751 	if (!ce_srng_based(ol_sc)) {
3752 		hif_target_sync(ol_sc);
3753 
3754 		if (ADRASTEA_BU)
3755 			hif_vote_link_up(hif_hdl);
3756 	}
3757 
3758 	return 0;
3759 
3760 err_tgtstate:
3761 	hif_disable_pci(sc);
3762 	sc->pci_enabled = false;
3763 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3764 	return QDF_STATUS_E_ABORTED;
3765 
3766 err_enable_pci:
3767 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3768 		int delay_time;
3769 
3770 		HIF_INFO("%s: pci reprobe", __func__);
3771 		/* 10, 40, 90, 100, 100, ... */
3772 		delay_time = max(100, 10 * (probe_again * probe_again));
3773 		qdf_mdelay(delay_time);
3774 		goto again;
3775 	}
3776 	return ret;
3777 }
3778 
3779 /**
3780  * hif_pci_irq_enable() - ce_irq_enable
3781  * @scn: hif_softc
3782  * @ce_id: ce_id
3783  *
3784  * Return: void
3785  */
3786 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3787 {
3788 	uint32_t tmp = 1 << ce_id;
3789 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3790 
3791 	qdf_spin_lock_irqsave(&sc->irq_lock);
3792 	scn->ce_irq_summary &= ~tmp;
3793 	if (scn->ce_irq_summary == 0) {
3794 		/* Enable Legacy PCI line interrupts */
3795 		if (LEGACY_INTERRUPTS(sc) &&
3796 			(scn->target_status != TARGET_STATUS_RESET) &&
3797 			(!qdf_atomic_read(&scn->link_suspended))) {
3798 
3799 			hif_write32_mb(scn, scn->mem +
3800 				(SOC_CORE_BASE_ADDRESS |
3801 				PCIE_INTR_ENABLE_ADDRESS),
3802 				HOST_GROUP0_MASK);
3803 
3804 			hif_read32_mb(scn, scn->mem +
3805 					(SOC_CORE_BASE_ADDRESS |
3806 					PCIE_INTR_ENABLE_ADDRESS));
3807 		}
3808 	}
3809 	if (scn->hif_init_done == true)
3810 		Q_TARGET_ACCESS_END(scn);
3811 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3812 
3813 	/* check for missed firmware crash */
3814 	hif_fw_interrupt_handler(0, scn);
3815 }
3816 
3817 /**
3818  * hif_pci_irq_disable() - ce_irq_disable
3819  * @scn: hif_softc
3820  * @ce_id: ce_id
3821  *
3822  * only applicable to legacy copy engine...
3823  *
3824  * Return: void
3825  */
3826 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3827 {
3828 	/* For Rome only need to wake up target */
3829 	/* target access is maintained until interrupts are re-enabled */
3830 	Q_TARGET_ACCESS_BEGIN(scn);
3831 }
3832 
3833 #ifdef FEATURE_RUNTIME_PM
3834 
3835 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
3836 {
3837 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3838 
3839 	if (NULL == sc)
3840 		return;
3841 
3842 	sc->pm_stats.runtime_get++;
3843 	pm_runtime_get_noresume(sc->dev);
3844 }
3845 
3846 /**
3847  * hif_pm_runtime_get() - do a get opperation on the device
3848  *
3849  * A get opperation will prevent a runtime suspend until a
3850  * corresponding put is done.  This api should be used when sending
3851  * data.
3852  *
3853  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3854  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3855  *
3856  * return: success if the bus is up and a get has been issued
3857  *   otherwise an error code.
3858  */
3859 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
3860 {
3861 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3862 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3863 	int ret;
3864 	int pm_state;
3865 
3866 	if (NULL == scn) {
3867 		HIF_ERROR("%s: Could not do runtime get, scn is null",
3868 				__func__);
3869 		return -EFAULT;
3870 	}
3871 
3872 	pm_state = qdf_atomic_read(&sc->pm_state);
3873 
3874 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
3875 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
3876 		sc->pm_stats.runtime_get++;
3877 		ret = __hif_pm_runtime_get(sc->dev);
3878 
3879 		/* Get can return 1 if the device is already active, just return
3880 		 * success in that case
3881 		 */
3882 		if (ret > 0)
3883 			ret = 0;
3884 
3885 		if (ret)
3886 			hif_pm_runtime_put(hif_ctx);
3887 
3888 		if (ret && ret != -EINPROGRESS) {
3889 			sc->pm_stats.runtime_get_err++;
3890 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
3891 				__func__, qdf_atomic_read(&sc->pm_state), ret);
3892 		}
3893 
3894 		return ret;
3895 	}
3896 
3897 	sc->pm_stats.request_resume++;
3898 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
3899 	ret = hif_pm_request_resume(sc->dev);
3900 
3901 	return -EAGAIN;
3902 }
3903 
3904 /**
3905  * hif_pm_runtime_put() - do a put opperation on the device
3906  *
3907  * A put opperation will allow a runtime suspend after a corresponding
3908  * get was done.  This api should be used when sending data.
3909  *
3910  * This api will return a failure if runtime pm is stopped
3911  * This api will return failure if it would decrement the usage count below 0.
3912  *
3913  * return: QDF_STATUS_SUCCESS if the put is performed
3914  */
3915 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
3916 {
3917 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3918 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3919 	int pm_state, usage_count;
3920 	char *error = NULL;
3921 
3922 	if (NULL == scn) {
3923 		HIF_ERROR("%s: Could not do runtime put, scn is null",
3924 				__func__);
3925 		return -EFAULT;
3926 	}
3927 	usage_count = atomic_read(&sc->dev->power.usage_count);
3928 
3929 	if (usage_count == 1) {
3930 		pm_state = qdf_atomic_read(&sc->pm_state);
3931 
3932 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
3933 			error = "Ignoring unexpected put when runtime pm is disabled";
3934 
3935 	} else if (usage_count == 0) {
3936 		error = "PUT Without a Get Operation";
3937 	}
3938 
3939 	if (error) {
3940 		hif_pci_runtime_pm_warn(sc, error);
3941 		return -EINVAL;
3942 	}
3943 
3944 	sc->pm_stats.runtime_put++;
3945 
3946 	hif_pm_runtime_mark_last_busy(sc->dev);
3947 	hif_pm_runtime_put_auto(sc->dev);
3948 
3949 	return 0;
3950 }
3951 
3952 
3953 /**
3954  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
3955  *                                      reason
3956  * @hif_sc: pci context
3957  * @lock: runtime_pm lock being acquired
3958  *
3959  * Return 0 if successful.
3960  */
3961 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
3962 		*hif_sc, struct hif_pm_runtime_lock *lock)
3963 {
3964 	int ret = 0;
3965 
3966 	/*
3967 	 * We shouldn't be setting context->timeout to zero here when
3968 	 * context is active as we will have a case where Timeout API's
3969 	 * for the same context called back to back.
3970 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
3971 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
3972 	 * API to ensure the timeout version is no more active and
3973 	 * list entry of this context will be deleted during allow suspend.
3974 	 */
3975 	if (lock->active)
3976 		return 0;
3977 
3978 	ret = __hif_pm_runtime_get(hif_sc->dev);
3979 
3980 	/**
3981 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
3982 	 * RPM_SUSPENDING. Any other negative value is an error.
3983 	 * We shouldn't be do runtime_put here as in later point allow
3984 	 * suspend gets called with the the context and there the usage count
3985 	 * is decremented, so suspend will be prevented.
3986 	 */
3987 
3988 	if (ret < 0 && ret != -EINPROGRESS) {
3989 		hif_sc->pm_stats.runtime_get_err++;
3990 		hif_pci_runtime_pm_warn(hif_sc,
3991 				"Prevent Suspend Runtime PM Error");
3992 	}
3993 
3994 	hif_sc->prevent_suspend_cnt++;
3995 
3996 	lock->active = true;
3997 
3998 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
3999 
4000 	hif_sc->pm_stats.prevent_suspend++;
4001 
4002 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4003 		hif_pm_runtime_state_to_string(
4004 			qdf_atomic_read(&hif_sc->pm_state)),
4005 					ret);
4006 
4007 	return ret;
4008 }
4009 
4010 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4011 		struct hif_pm_runtime_lock *lock)
4012 {
4013 	int ret = 0;
4014 	int usage_count;
4015 
4016 	if (hif_sc->prevent_suspend_cnt == 0)
4017 		return ret;
4018 
4019 	if (!lock->active)
4020 		return ret;
4021 
4022 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4023 
4024 	/*
4025 	 * During Driver unload, platform driver increments the usage
4026 	 * count to prevent any runtime suspend getting called.
4027 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4028 	 * usage_count should be one. Ideally this shouldn't happen as
4029 	 * context->active should be active for allow suspend to happen
4030 	 * Handling this case here to prevent any failures.
4031 	 */
4032 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4033 				&& usage_count == 1) || usage_count == 0) {
4034 		hif_pci_runtime_pm_warn(hif_sc,
4035 				"Allow without a prevent suspend");
4036 		return -EINVAL;
4037 	}
4038 
4039 	list_del(&lock->list);
4040 
4041 	hif_sc->prevent_suspend_cnt--;
4042 
4043 	lock->active = false;
4044 	lock->timeout = 0;
4045 
4046 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4047 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4048 
4049 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4050 		hif_pm_runtime_state_to_string(
4051 			qdf_atomic_read(&hif_sc->pm_state)),
4052 					ret);
4053 
4054 	hif_sc->pm_stats.allow_suspend++;
4055 	return ret;
4056 }
4057 
4058 /**
4059  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4060  * @data: calback data that is the pci context
4061  *
4062  * if runtime locks are acquired with a timeout, this function releases
4063  * the locks when the last runtime lock expires.
4064  *
4065  * dummy implementation until lock acquisition is implemented.
4066  */
4067 static void hif_pm_runtime_lock_timeout_fn(void *data)
4068 {
4069 	struct hif_pci_softc *hif_sc = data;
4070 	unsigned long timer_expires;
4071 	struct hif_pm_runtime_lock *context, *temp;
4072 
4073 	spin_lock_bh(&hif_sc->runtime_lock);
4074 
4075 	timer_expires = hif_sc->runtime_timer_expires;
4076 
4077 	/* Make sure we are not called too early, this should take care of
4078 	 * following case
4079 	 *
4080 	 * CPU0                         CPU1 (timeout function)
4081 	 * ----                         ----------------------
4082 	 * spin_lock_irq
4083 	 *                              timeout function called
4084 	 *
4085 	 * mod_timer()
4086 	 *
4087 	 * spin_unlock_irq
4088 	 *                              spin_lock_irq
4089 	 */
4090 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4091 		hif_sc->runtime_timer_expires = 0;
4092 		list_for_each_entry_safe(context, temp,
4093 				&hif_sc->prevent_suspend_list, list) {
4094 			if (context->timeout) {
4095 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4096 				hif_sc->pm_stats.allow_suspend_timeout++;
4097 			}
4098 		}
4099 	}
4100 
4101 	spin_unlock_bh(&hif_sc->runtime_lock);
4102 }
4103 
4104 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4105 		struct hif_pm_runtime_lock *data)
4106 {
4107 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4108 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4109 	struct hif_pm_runtime_lock *context = data;
4110 
4111 	if (!sc->hif_config.enable_runtime_pm)
4112 		return 0;
4113 
4114 	if (!context)
4115 		return -EINVAL;
4116 
4117 	if (in_irq())
4118 		WARN_ON(1);
4119 
4120 	spin_lock_bh(&hif_sc->runtime_lock);
4121 	context->timeout = 0;
4122 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4123 	spin_unlock_bh(&hif_sc->runtime_lock);
4124 
4125 	return 0;
4126 }
4127 
4128 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4129 				struct hif_pm_runtime_lock *data)
4130 {
4131 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4132 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4133 	struct hif_pm_runtime_lock *context = data;
4134 
4135 	if (!sc->hif_config.enable_runtime_pm)
4136 		return 0;
4137 
4138 	if (!context)
4139 		return -EINVAL;
4140 
4141 	if (in_irq())
4142 		WARN_ON(1);
4143 
4144 	spin_lock_bh(&hif_sc->runtime_lock);
4145 
4146 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4147 
4148 	/* The list can be empty as well in cases where
4149 	 * we have one context in the list and the allow
4150 	 * suspend came before the timer expires and we delete
4151 	 * context above from the list.
4152 	 * When list is empty prevent_suspend count will be zero.
4153 	 */
4154 	if (hif_sc->prevent_suspend_cnt == 0 &&
4155 			hif_sc->runtime_timer_expires > 0) {
4156 		qdf_timer_free(&hif_sc->runtime_timer);
4157 		hif_sc->runtime_timer_expires = 0;
4158 	}
4159 
4160 	spin_unlock_bh(&hif_sc->runtime_lock);
4161 
4162 	return 0;
4163 }
4164 
4165 /**
4166  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4167  * @ol_sc: HIF context
4168  * @lock: which lock is being acquired
4169  * @delay: Timeout in milliseconds
4170  *
4171  * Prevent runtime suspend with a timeout after which runtime suspend would be
4172  * allowed. This API uses a single timer to allow the suspend and timer is
4173  * modified if the timeout is changed before timer fires.
4174  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4175  * of starting the timer.
4176  *
4177  * It is wise to try not to use this API and correct the design if possible.
4178  *
4179  * Return: 0 on success and negative error code on failure
4180  */
4181 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4182 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4183 {
4184 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4185 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4186 
4187 	int ret = 0;
4188 	unsigned long expires;
4189 	struct hif_pm_runtime_lock *context = lock;
4190 
4191 	if (hif_is_load_or_unload_in_progress(sc)) {
4192 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4193 				__func__);
4194 		return -EINVAL;
4195 	}
4196 
4197 	if (hif_is_recovery_in_progress(sc)) {
4198 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4199 		return -EINVAL;
4200 	}
4201 
4202 	if (!sc->hif_config.enable_runtime_pm)
4203 		return 0;
4204 
4205 	if (!context)
4206 		return -EINVAL;
4207 
4208 	if (in_irq())
4209 		WARN_ON(1);
4210 
4211 	/*
4212 	 * Don't use internal timer if the timeout is less than auto suspend
4213 	 * delay.
4214 	 */
4215 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4216 		hif_pm_request_resume(hif_sc->dev);
4217 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4218 		return ret;
4219 	}
4220 
4221 	expires = jiffies + msecs_to_jiffies(delay);
4222 	expires += !expires;
4223 
4224 	spin_lock_bh(&hif_sc->runtime_lock);
4225 
4226 	context->timeout = delay;
4227 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4228 	hif_sc->pm_stats.prevent_suspend_timeout++;
4229 
4230 	/* Modify the timer only if new timeout is after already configured
4231 	 * timeout
4232 	 */
4233 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4234 		qdf_timer_mod(&hif_sc->runtime_timer, delay);
4235 		hif_sc->runtime_timer_expires = expires;
4236 	}
4237 
4238 	spin_unlock_bh(&hif_sc->runtime_lock);
4239 
4240 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4241 		hif_pm_runtime_state_to_string(
4242 			qdf_atomic_read(&hif_sc->pm_state)),
4243 					delay, ret);
4244 
4245 	return ret;
4246 }
4247 
4248 /**
4249  * hif_runtime_lock_init() - API to initialize Runtime PM context
4250  * @name: Context name
4251  *
4252  * This API initializes the Runtime PM context of the caller and
4253  * return the pointer.
4254  *
4255  * Return: None
4256  */
4257 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4258 {
4259 	struct hif_pm_runtime_lock *context;
4260 
4261 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4262 
4263 	context = qdf_mem_malloc(sizeof(*context));
4264 	if (!context)
4265 		return -ENOMEM;
4266 
4267 	context->name = name ? name : "Default";
4268 	lock->lock = context;
4269 
4270 	return 0;
4271 }
4272 
4273 /**
4274  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4275  * @data: Runtime PM context
4276  *
4277  * Return: void
4278  */
4279 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4280 			     struct hif_pm_runtime_lock *data)
4281 {
4282 	struct hif_pm_runtime_lock *context = data;
4283 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4284 
4285 	if (!context) {
4286 		HIF_ERROR("Runtime PM wakelock context is NULL");
4287 		return;
4288 	}
4289 
4290 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4291 
4292 	/*
4293 	 * Ensure to delete the context list entry and reduce the usage count
4294 	 * before freeing the context if context is active.
4295 	 */
4296 	if (sc) {
4297 		spin_lock_bh(&sc->runtime_lock);
4298 		__hif_pm_runtime_allow_suspend(sc, context);
4299 		spin_unlock_bh(&sc->runtime_lock);
4300 	}
4301 
4302 	qdf_mem_free(context);
4303 }
4304 #endif /* FEATURE_RUNTIME_PM */
4305 
4306 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4307 {
4308 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4309 
4310 	/* legacy case only has one irq */
4311 	return pci_scn->irq;
4312 }
4313 
4314 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4315 {
4316 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4317 	struct hif_target_info *tgt_info;
4318 
4319 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4320 
4321 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4322 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
4323 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4324 		/*
4325 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4326 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4327 		 * well initialized/defined.
4328 		 */
4329 		return 0;
4330 	}
4331 
4332 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4333 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4334 		return 0;
4335 	}
4336 
4337 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
4338 		  offset, (uint32_t)(offset + sizeof(unsigned int)),
4339 		  sc->mem_len);
4340 
4341 	return -EINVAL;
4342 }
4343 
4344 /**
4345  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4346  * @scn: hif context
4347  *
4348  * Return: true if soc needs driver bmi otherwise false
4349  */
4350 bool hif_pci_needs_bmi(struct hif_softc *scn)
4351 {
4352 	return !ce_srng_based(scn);
4353 }
4354