xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 0626a4da6c07f30da06dd6747e8cc290a60371d8)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 #ifdef CONFIG_WIN
66 extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
67 #endif
68 
69 /*
70  * Top-level interrupt handler for all PCI interrupts from a Target.
71  * When a block of MSI interrupts is allocated, this top-level handler
72  * is not used; instead, we directly call the correct sub-handler.
73  */
74 struct ce_irq_reg_table {
75 	uint32_t irq_enable;
76 	uint32_t irq_status;
77 };
78 
79 #ifndef QCA_WIFI_3_0_ADRASTEA
80 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 }
83 #else
84 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
87 	unsigned int target_enable0, target_enable1;
88 	unsigned int target_cause0, target_cause1;
89 
90 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
91 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
92 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
93 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
94 
95 	if ((target_enable0 & target_cause0) ||
96 	    (target_enable1 & target_cause1)) {
97 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
98 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
99 
100 		if (scn->notice_send)
101 			pld_intr_notify_q6(sc->dev);
102 	}
103 }
104 #endif
105 
106 
107 /**
108  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
109  * @scn: scn
110  *
111  * Return: N/A
112  */
113 static void pci_dispatch_interrupt(struct hif_softc *scn)
114 {
115 	uint32_t intr_summary;
116 	int id;
117 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
118 
119 	if (scn->hif_init_done != true)
120 		return;
121 
122 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
123 		return;
124 
125 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
126 
127 	if (intr_summary == 0) {
128 		if ((scn->target_status != TARGET_STATUS_RESET) &&
129 			(!qdf_atomic_read(&scn->link_suspended))) {
130 
131 			hif_write32_mb(scn, scn->mem +
132 				(SOC_CORE_BASE_ADDRESS |
133 				PCIE_INTR_ENABLE_ADDRESS),
134 				HOST_GROUP0_MASK);
135 
136 			hif_read32_mb(scn, scn->mem +
137 					(SOC_CORE_BASE_ADDRESS |
138 					PCIE_INTR_ENABLE_ADDRESS));
139 		}
140 		Q_TARGET_ACCESS_END(scn);
141 		return;
142 	}
143 	Q_TARGET_ACCESS_END(scn);
144 
145 	scn->ce_irq_summary = intr_summary;
146 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
147 		if (intr_summary & (1 << id)) {
148 			intr_summary &= ~(1 << id);
149 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
150 		}
151 	}
152 }
153 
154 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
155 {
156 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
157 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
158 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
159 
160 	volatile int tmp;
161 	uint16_t val = 0;
162 	uint32_t bar0 = 0;
163 	uint32_t fw_indicator_address, fw_indicator;
164 	bool ssr_irq = false;
165 	unsigned int host_cause, host_enable;
166 
167 	if (LEGACY_INTERRUPTS(sc)) {
168 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
169 			return IRQ_HANDLED;
170 
171 		if (ADRASTEA_BU) {
172 			host_enable = hif_read32_mb(sc, sc->mem +
173 						    PCIE_INTR_ENABLE_ADDRESS);
174 			host_cause = hif_read32_mb(sc, sc->mem +
175 						   PCIE_INTR_CAUSE_ADDRESS);
176 			if (!(host_enable & host_cause)) {
177 				hif_pci_route_adrastea_interrupt(sc);
178 				return IRQ_HANDLED;
179 			}
180 		}
181 
182 		/* Clear Legacy PCI line interrupts
183 		 * IMPORTANT: INTR_CLR regiser has to be set
184 		 * after INTR_ENABLE is set to 0,
185 		 * otherwise interrupt can not be really cleared
186 		 */
187 		hif_write32_mb(sc, sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS |
189 			       PCIE_INTR_ENABLE_ADDRESS), 0);
190 
191 		hif_write32_mb(sc, sc->mem +
192 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
193 			       ADRASTEA_BU ?
194 			       (host_enable & host_cause) :
195 			      HOST_GROUP0_MASK);
196 
197 		if (ADRASTEA_BU)
198 			hif_write32_mb(sc, sc->mem + 0x2f100c,
199 				       (host_cause >> 1));
200 
201 		/* IMPORTANT: this extra read transaction is required to
202 		 * flush the posted write buffer
203 		 */
204 		if (!ADRASTEA_BU) {
205 		tmp =
206 			hif_read32_mb(sc, sc->mem +
207 				     (SOC_CORE_BASE_ADDRESS |
208 				      PCIE_INTR_ENABLE_ADDRESS));
209 
210 		if (tmp == 0xdeadbeef) {
211 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
212 			       __func__);
213 
214 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
215 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
216 			       __func__, val);
217 
218 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
219 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
220 			       __func__, val);
221 
222 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
223 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
224 			       val);
225 
226 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
227 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
228 			       val);
229 
230 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
231 					      &bar0);
232 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
233 			       bar0);
234 
235 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
236 				  __func__,
237 				  hif_read32_mb(sc, sc->mem +
238 						PCIE_LOCAL_BASE_ADDRESS
239 						+ RTC_STATE_ADDRESS));
240 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
241 				  __func__,
242 				  hif_read32_mb(sc, sc->mem +
243 						PCIE_LOCAL_BASE_ADDRESS
244 						+ PCIE_SOC_WAKE_ADDRESS));
245 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
246 				  __func__,
247 				  hif_read32_mb(sc, sc->mem + 0x80008),
248 				  hif_read32_mb(sc, sc->mem + 0x8000c));
249 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
250 				  __func__,
251 				  hif_read32_mb(sc, sc->mem + 0x80010),
252 				  hif_read32_mb(sc, sc->mem + 0x80014));
253 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
254 				  __func__,
255 				  hif_read32_mb(sc, sc->mem + 0x80018),
256 				  hif_read32_mb(sc, sc->mem + 0x8001c));
257 			QDF_BUG(0);
258 		}
259 
260 		PCI_CLR_CAUSE0_REGISTER(sc);
261 		}
262 
263 		if (HAS_FW_INDICATOR) {
264 			fw_indicator_address = hif_state->fw_indicator_address;
265 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
266 			if ((fw_indicator != ~0) &&
267 			   (fw_indicator & FW_IND_EVENT_PENDING))
268 				ssr_irq = true;
269 		}
270 
271 		if (Q_TARGET_ACCESS_END(scn) < 0)
272 			return IRQ_HANDLED;
273 	}
274 	/* TBDXXX: Add support for WMAC */
275 
276 	if (ssr_irq) {
277 		sc->irq_event = irq;
278 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
279 
280 		qdf_atomic_inc(&scn->active_tasklet_cnt);
281 		tasklet_schedule(&sc->intr_tq);
282 	} else {
283 		pci_dispatch_interrupt(scn);
284 	}
285 
286 	return IRQ_HANDLED;
287 }
288 
289 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
290 {
291 	return 1;               /* FIX THIS */
292 }
293 
294 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
295 {
296 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
297 	int i = 0;
298 
299 	if (!irq || !size) {
300 		return -EINVAL;
301 	}
302 
303 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
304 		irq[0] = sc->irq;
305 		return 1;
306 	}
307 
308 	if (sc->num_msi_intrs > size) {
309 		qdf_print("Not enough space in irq buffer to return irqs");
310 		return -EINVAL;
311 	}
312 
313 	for (i = 0; i < sc->num_msi_intrs; i++) {
314 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
315 	}
316 
317 	return sc->num_msi_intrs;
318 }
319 
320 
321 /**
322  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
323  * @scn: hif_softc
324  *
325  * Return: void
326  */
327 #if CONFIG_ATH_PCIE_MAX_PERF == 0
328 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
329 {
330 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
331 	A_target_id_t pci_addr = scn->mem;
332 
333 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
334 	/*
335 	 * If the deferred sleep timer is running cancel it
336 	 * and put the soc into sleep.
337 	 */
338 	if (hif_state->fake_sleep == true) {
339 		qdf_timer_stop(&hif_state->sleep_timer);
340 		if (hif_state->verified_awake == false) {
341 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
342 				      PCIE_SOC_WAKE_ADDRESS,
343 				      PCIE_SOC_WAKE_RESET);
344 		}
345 		hif_state->fake_sleep = false;
346 	}
347 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
348 }
349 #else
350 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
351 {
352 }
353 #endif
354 
355 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
356 	hif_read32_mb(sc, (char *)(mem) + \
357 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
358 
359 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
360 	hif_write32_mb(sc, ((char *)(mem) + \
361 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
362 
363 #ifdef QCA_WIFI_3_0
364 /**
365  * hif_targ_is_awake() - check to see if the target is awake
366  * @hif_ctx: hif context
367  *
368  * emulation never goes to sleep
369  *
370  * Return: true if target is awake
371  */
372 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
373 {
374 	return true;
375 }
376 #else
377 /**
378  * hif_targ_is_awake() - check to see if the target is awake
379  * @hif_ctx: hif context
380  *
381  * Return: true if the targets clocks are on
382  */
383 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
384 {
385 	uint32_t val;
386 
387 	if (scn->recovery)
388 		return false;
389 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
390 		+ RTC_STATE_ADDRESS);
391 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
392 }
393 #endif
394 
395 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
396 static void hif_pci_device_reset(struct hif_pci_softc *sc)
397 {
398 	void __iomem *mem = sc->mem;
399 	int i;
400 	uint32_t val;
401 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
402 
403 	if (!scn->hostdef)
404 		return;
405 
406 	/* NB: Don't check resetok here.  This form of reset
407 	 * is integral to correct operation.
408 	 */
409 
410 	if (!SOC_GLOBAL_RESET_ADDRESS)
411 		return;
412 
413 	if (!mem)
414 		return;
415 
416 	HIF_ERROR("%s: Reset Device", __func__);
417 
418 	/*
419 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
420 	 * writing WAKE_V, the Target may scribble over Host memory!
421 	 */
422 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
423 			       PCIE_SOC_WAKE_V_MASK);
424 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
425 		if (hif_targ_is_awake(scn, mem))
426 			break;
427 
428 		qdf_mdelay(1);
429 	}
430 
431 	/* Put Target, including PCIe, into RESET. */
432 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
433 	val |= 1;
434 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
435 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
436 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
437 		    RTC_STATE_COLD_RESET_MASK)
438 			break;
439 
440 		qdf_mdelay(1);
441 	}
442 
443 	/* Pull Target, including PCIe, out of RESET. */
444 	val &= ~1;
445 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
446 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
447 		if (!
448 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
449 		     RTC_STATE_COLD_RESET_MASK))
450 			break;
451 
452 		qdf_mdelay(1);
453 	}
454 
455 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
456 			       PCIE_SOC_WAKE_RESET);
457 }
458 
459 /* CPU warm reset function
460  * Steps:
461  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
462  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
463  *    correctly on WARM reset
464  * 3. Clear TARGET CPU LF timer interrupt
465  * 4. Reset all CEs to clear any pending CE tarnsactions
466  * 5. Warm reset CPU
467  */
468 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
469 {
470 	void __iomem *mem = sc->mem;
471 	int i;
472 	uint32_t val;
473 	uint32_t fw_indicator;
474 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
475 
476 	/* NB: Don't check resetok here.  This form of reset is
477 	 * integral to correct operation.
478 	 */
479 
480 	if (!mem)
481 		return;
482 
483 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
484 
485 	/*
486 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
487 	 * writing WAKE_V, the Target may scribble over Host memory!
488 	 */
489 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
490 			       PCIE_SOC_WAKE_V_MASK);
491 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
492 		if (hif_targ_is_awake(scn, mem))
493 			break;
494 		qdf_mdelay(1);
495 	}
496 
497 	/*
498 	 * Disable Pending interrupts
499 	 */
500 	val =
501 		hif_read32_mb(sc, mem +
502 			     (SOC_CORE_BASE_ADDRESS |
503 			      PCIE_INTR_CAUSE_ADDRESS));
504 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
505 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
506 	/* Target CPU Intr Cause */
507 	val = hif_read32_mb(sc, mem +
508 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
509 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
510 
511 	val =
512 		hif_read32_mb(sc, mem +
513 			     (SOC_CORE_BASE_ADDRESS |
514 			      PCIE_INTR_ENABLE_ADDRESS));
515 	hif_write32_mb(sc, (mem +
516 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
517 	hif_write32_mb(sc, (mem +
518 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
519 		       HOST_GROUP0_MASK);
520 
521 	qdf_mdelay(100);
522 
523 	/* Clear FW_INDICATOR_ADDRESS */
524 	if (HAS_FW_INDICATOR) {
525 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
526 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
527 	}
528 
529 	/* Clear Target LF Timer interrupts */
530 	val =
531 		hif_read32_mb(sc, mem +
532 			     (RTC_SOC_BASE_ADDRESS +
533 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
534 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
535 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
536 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
537 	hif_write32_mb(sc, mem +
538 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
539 		      val);
540 
541 	/* Reset CE */
542 	val =
543 		hif_read32_mb(sc, mem +
544 			     (RTC_SOC_BASE_ADDRESS |
545 			      SOC_RESET_CONTROL_ADDRESS));
546 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
547 	hif_write32_mb(sc, (mem +
548 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
549 		      val);
550 	val =
551 		hif_read32_mb(sc, mem +
552 			     (RTC_SOC_BASE_ADDRESS |
553 			      SOC_RESET_CONTROL_ADDRESS));
554 	qdf_mdelay(10);
555 
556 	/* CE unreset */
557 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
558 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
559 		       SOC_RESET_CONTROL_ADDRESS), val);
560 	val =
561 		hif_read32_mb(sc, mem +
562 			     (RTC_SOC_BASE_ADDRESS |
563 			      SOC_RESET_CONTROL_ADDRESS));
564 	qdf_mdelay(10);
565 
566 	/* Read Target CPU Intr Cause */
567 	val = hif_read32_mb(sc, mem +
568 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
569 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
570 		    __func__, val);
571 
572 	/* CPU warm RESET */
573 	val =
574 		hif_read32_mb(sc, mem +
575 			     (RTC_SOC_BASE_ADDRESS |
576 			      SOC_RESET_CONTROL_ADDRESS));
577 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
578 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
579 		       SOC_RESET_CONTROL_ADDRESS), val);
580 	val =
581 		hif_read32_mb(sc, mem +
582 			     (RTC_SOC_BASE_ADDRESS |
583 			      SOC_RESET_CONTROL_ADDRESS));
584 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
585 		    __func__, val);
586 
587 	qdf_mdelay(100);
588 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
589 
590 }
591 
592 #ifndef QCA_WIFI_3_0
593 /* only applicable to legacy ce */
594 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
595 {
596 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
597 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
598 	void __iomem *mem = sc->mem;
599 	uint32_t val;
600 
601 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
602 		return ATH_ISR_NOSCHED;
603 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
604 	if (Q_TARGET_ACCESS_END(scn) < 0)
605 		return ATH_ISR_SCHED;
606 
607 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
608 
609 	if (val & FW_IND_HELPER)
610 		return 0;
611 
612 	return 1;
613 }
614 #endif
615 
616 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
617 {
618 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
619 	uint16_t device_id = 0;
620 	uint32_t val;
621 	uint16_t timeout_count = 0;
622 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
623 
624 	/* Check device ID from PCIe configuration space for link status */
625 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
626 	if (device_id != sc->devid) {
627 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
628 			  __func__, device_id, sc->devid);
629 		return -EACCES;
630 	}
631 
632 	/* Check PCIe local register for bar/memory access */
633 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
634 			   RTC_STATE_ADDRESS);
635 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
636 
637 	/* Try to wake up taget if it sleeps */
638 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
639 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
640 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
641 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
642 		PCIE_SOC_WAKE_ADDRESS));
643 
644 	/* Check if taget can be woken up */
645 	while (!hif_targ_is_awake(scn, sc->mem)) {
646 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
647 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
648 				__func__,
649 				hif_read32_mb(sc, sc->mem +
650 					     PCIE_LOCAL_BASE_ADDRESS +
651 					     RTC_STATE_ADDRESS),
652 				hif_read32_mb(sc, sc->mem +
653 					     PCIE_LOCAL_BASE_ADDRESS +
654 					PCIE_SOC_WAKE_ADDRESS));
655 			return -EACCES;
656 		}
657 
658 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
659 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
660 
661 		qdf_mdelay(100);
662 		timeout_count += 100;
663 	}
664 
665 	/* Check Power register for SoC internal bus issues */
666 	val =
667 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
668 			     SOC_POWER_REG_OFFSET);
669 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
670 
671 	return 0;
672 }
673 
674 /**
675  * __hif_pci_dump_registers(): dump other PCI debug registers
676  * @scn: struct hif_softc
677  *
678  * This function dumps pci debug registers.  The parrent function
679  * dumps the copy engine registers before calling this function.
680  *
681  * Return: void
682  */
683 static void __hif_pci_dump_registers(struct hif_softc *scn)
684 {
685 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
686 	void __iomem *mem = sc->mem;
687 	uint32_t val, i, j;
688 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
689 	uint32_t ce_base;
690 
691 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
692 		return;
693 
694 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
695 	val =
696 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
697 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
698 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
699 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
700 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
701 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
702 
703 	/* DEBUG_CONTROL_ENABLE = 0x1 */
704 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
705 			   WLAN_DEBUG_CONTROL_OFFSET);
706 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
707 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
708 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
709 		      WLAN_DEBUG_CONTROL_OFFSET, val);
710 
711 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
712 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
713 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
714 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
715 			    WLAN_DEBUG_CONTROL_OFFSET));
716 
717 	HIF_INFO_MED("%s: Debug CE", __func__);
718 	/* Loop CE debug output */
719 	/* AMBA_DEBUG_BUS_SEL = 0xc */
720 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
721 			    AMBA_DEBUG_BUS_OFFSET);
722 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
723 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
724 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
725 		       val);
726 
727 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
728 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
729 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
730 				   CE_WRAPPER_DEBUG_OFFSET);
731 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
732 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
733 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
734 			      CE_WRAPPER_DEBUG_OFFSET, val);
735 
736 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
737 			    __func__, wrapper_idx[i],
738 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
739 				AMBA_DEBUG_BUS_OFFSET),
740 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
741 				CE_WRAPPER_DEBUG_OFFSET));
742 
743 		if (wrapper_idx[i] <= 7) {
744 			for (j = 0; j <= 5; j++) {
745 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
746 				/* For (j=0~5) write CE_DEBUG_SEL = j */
747 				val =
748 					hif_read32_mb(sc, mem + ce_base +
749 						     CE_DEBUG_OFFSET);
750 				val &= ~CE_DEBUG_SEL_MASK;
751 				val |= CE_DEBUG_SEL_SET(j);
752 				hif_write32_mb(sc, mem + ce_base +
753 					       CE_DEBUG_OFFSET, val);
754 
755 				/* read (@gpio_athr_wlan_reg)
756 				 * WLAN_DEBUG_OUT_DATA
757 				 */
758 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
759 						    + WLAN_DEBUG_OUT_OFFSET);
760 				val = WLAN_DEBUG_OUT_DATA_GET(val);
761 
762 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
763 					    __func__, j,
764 					    hif_read32_mb(sc, mem + ce_base +
765 						    CE_DEBUG_OFFSET), val);
766 			}
767 		} else {
768 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
769 			val =
770 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
771 					     WLAN_DEBUG_OUT_OFFSET);
772 			val = WLAN_DEBUG_OUT_DATA_GET(val);
773 
774 			HIF_INFO_MED("%s: out: %x", __func__, val);
775 		}
776 	}
777 
778 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
779 	/* Loop PCIe debug output */
780 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
781 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
782 			    AMBA_DEBUG_BUS_OFFSET);
783 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
784 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
785 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
786 		       AMBA_DEBUG_BUS_OFFSET, val);
787 
788 	for (i = 0; i <= 8; i++) {
789 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
790 		val =
791 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
792 				     AMBA_DEBUG_BUS_OFFSET);
793 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
794 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
795 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
796 			       AMBA_DEBUG_BUS_OFFSET, val);
797 
798 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
799 		val =
800 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
801 				     WLAN_DEBUG_OUT_OFFSET);
802 		val = WLAN_DEBUG_OUT_DATA_GET(val);
803 
804 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
805 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
806 				    WLAN_DEBUG_OUT_OFFSET), val,
807 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
808 				    WLAN_DEBUG_OUT_OFFSET));
809 	}
810 
811 	Q_TARGET_ACCESS_END(scn);
812 }
813 
814 /**
815  * hif_dump_registers(): dump bus debug registers
816  * @scn: struct hif_opaque_softc
817  *
818  * This function dumps hif bus debug registers
819  *
820  * Return: 0 for success or error code
821  */
822 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
823 {
824 	int status;
825 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
826 
827 	status = hif_dump_ce_registers(scn);
828 
829 	if (status)
830 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
831 
832 	/* dump non copy engine pci registers */
833 	__hif_pci_dump_registers(scn);
834 
835 	return 0;
836 }
837 
838 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
839 
840 /* worker thread to schedule wlan_tasklet in SLUB debug build */
841 static void reschedule_tasklet_work_handler(void *arg)
842 {
843 	struct hif_pci_softc *sc = arg;
844 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
845 
846 	if (!scn) {
847 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
848 		return;
849 	}
850 
851 	if (scn->hif_init_done == false) {
852 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
853 		return;
854 	}
855 
856 	tasklet_schedule(&sc->intr_tq);
857 }
858 
859 /**
860  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
861  * work
862  * @sc: HIF PCI Context
863  *
864  * Return: void
865  */
866 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
867 {
868 	qdf_create_work(0, &sc->reschedule_tasklet_work,
869 				reschedule_tasklet_work_handler, NULL);
870 }
871 #else
872 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
873 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
874 
875 void wlan_tasklet(unsigned long data)
876 {
877 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
878 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
879 
880 	if (scn->hif_init_done == false)
881 		goto end;
882 
883 	if (qdf_atomic_read(&scn->link_suspended))
884 		goto end;
885 
886 	if (!ADRASTEA_BU) {
887 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
888 		if (scn->target_status == TARGET_STATUS_RESET)
889 			goto end;
890 	}
891 
892 end:
893 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
894 	qdf_atomic_dec(&scn->active_tasklet_cnt);
895 }
896 
897 #ifdef FEATURE_RUNTIME_PM
898 static const char *hif_pm_runtime_state_to_string(uint32_t state)
899 {
900 	switch (state) {
901 	case HIF_PM_RUNTIME_STATE_NONE:
902 		return "INIT_STATE";
903 	case HIF_PM_RUNTIME_STATE_ON:
904 		return "ON";
905 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
906 		return "INPROGRESS";
907 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
908 		return "SUSPENDED";
909 	default:
910 		return "INVALID STATE";
911 	}
912 }
913 
914 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
915 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
916 /**
917  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
918  * @sc: hif_pci_softc context
919  * @msg: log message
920  *
921  * log runtime pm stats when something seems off.
922  *
923  * Return: void
924  */
925 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
926 {
927 	struct hif_pm_runtime_lock *ctx;
928 
929 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
930 			msg, atomic_read(&sc->dev->power.usage_count),
931 			hif_pm_runtime_state_to_string(
932 					atomic_read(&sc->pm_state)),
933 			sc->prevent_suspend_cnt);
934 
935 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
936 			sc->dev->power.runtime_status,
937 			sc->dev->power.runtime_error,
938 			sc->dev->power.disable_depth,
939 			sc->dev->power.autosuspend_delay);
940 
941 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
942 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
943 			sc->pm_stats.request_resume);
944 
945 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
946 			sc->pm_stats.allow_suspend,
947 			sc->pm_stats.prevent_suspend);
948 
949 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
950 			sc->pm_stats.prevent_suspend_timeout,
951 			sc->pm_stats.allow_suspend_timeout);
952 
953 	HIF_ERROR("Suspended: %u, resumed: %u count",
954 			sc->pm_stats.suspended,
955 			sc->pm_stats.resumed);
956 
957 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
958 			sc->pm_stats.suspend_err,
959 			sc->pm_stats.runtime_get_err);
960 
961 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
962 
963 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
964 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
965 	}
966 
967 	WARN_ON(1);
968 }
969 
970 /**
971  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
972  * @s: file to print to
973  * @data: unused
974  *
975  * debugging tool added to the debug fs for displaying runtimepm stats
976  *
977  * Return: 0
978  */
979 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
980 {
981 	struct hif_pci_softc *sc = s->private;
982 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
983 		"SUSPENDED"};
984 	unsigned int msecs_age;
985 	int pm_state = atomic_read(&sc->pm_state);
986 	unsigned long timer_expires;
987 	struct hif_pm_runtime_lock *ctx;
988 
989 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
990 			autopm_state[pm_state]);
991 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
992 			sc->pm_stats.last_resume_caller);
993 
994 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
995 		msecs_age = jiffies_to_msecs(
996 				jiffies - sc->pm_stats.suspend_jiffies);
997 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
998 				msecs_age / 1000, msecs_age % 1000);
999 	}
1000 
1001 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1002 			atomic_read(&sc->dev->power.usage_count));
1003 
1004 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1005 			sc->prevent_suspend_cnt);
1006 
1007 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1008 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1009 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1010 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1011 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1012 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1013 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1014 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1015 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1016 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1017 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1018 
1019 	timer_expires = sc->runtime_timer_expires;
1020 	if (timer_expires > 0) {
1021 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1022 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1023 				msecs_age / 1000, msecs_age % 1000);
1024 	}
1025 
1026 	spin_lock_bh(&sc->runtime_lock);
1027 	if (list_empty(&sc->prevent_suspend_list)) {
1028 		spin_unlock_bh(&sc->runtime_lock);
1029 		return 0;
1030 	}
1031 
1032 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1033 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1034 		seq_printf(s, "%s", ctx->name);
1035 		if (ctx->timeout)
1036 			seq_printf(s, "(%d ms)", ctx->timeout);
1037 		seq_puts(s, " ");
1038 	}
1039 	seq_puts(s, "\n");
1040 	spin_unlock_bh(&sc->runtime_lock);
1041 
1042 	return 0;
1043 }
1044 #undef HIF_PCI_RUNTIME_PM_STATS
1045 
1046 /**
1047  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1048  * @inode
1049  * @file
1050  *
1051  * Return: linux error code of single_open.
1052  */
1053 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1054 {
1055 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1056 			inode->i_private);
1057 }
1058 
1059 static const struct file_operations hif_pci_runtime_pm_fops = {
1060 	.owner          = THIS_MODULE,
1061 	.open           = hif_pci_runtime_pm_open,
1062 	.release        = single_release,
1063 	.read           = seq_read,
1064 	.llseek         = seq_lseek,
1065 };
1066 
1067 /**
1068  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1069  * @sc: pci context
1070  *
1071  * creates a debugfs entry to debug the runtime pm feature.
1072  */
1073 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1074 {
1075 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1076 					0400, NULL, sc,
1077 					&hif_pci_runtime_pm_fops);
1078 }
1079 
1080 /**
1081  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1082  * @sc: pci context
1083  *
1084  * removes the debugfs entry to debug the runtime pm feature.
1085  */
1086 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1087 {
1088 	debugfs_remove(sc->pm_dentry);
1089 }
1090 
1091 static void hif_runtime_init(struct device *dev, int delay)
1092 {
1093 	pm_runtime_set_autosuspend_delay(dev, delay);
1094 	pm_runtime_use_autosuspend(dev);
1095 	pm_runtime_allow(dev);
1096 	pm_runtime_mark_last_busy(dev);
1097 	pm_runtime_put_noidle(dev);
1098 	pm_suspend_ignore_children(dev, true);
1099 }
1100 
1101 static void hif_runtime_exit(struct device *dev)
1102 {
1103 	pm_runtime_get_noresume(dev);
1104 	pm_runtime_set_active(dev);
1105 }
1106 
1107 static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
1108 
1109 /**
1110  * hif_pm_runtime_start(): start the runtime pm
1111  * @sc: pci context
1112  *
1113  * After this call, runtime pm will be active.
1114  */
1115 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1116 {
1117 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1118 	uint32_t mode = hif_get_conparam(ol_sc);
1119 
1120 	if (!ol_sc->hif_config.enable_runtime_pm) {
1121 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1122 		return;
1123 	}
1124 
1125 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1126 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1127 				__func__);
1128 		return;
1129 	}
1130 
1131 	setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1132 			(unsigned long)sc);
1133 
1134 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1135 			ol_sc->hif_config.runtime_pm_delay);
1136 
1137 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1138 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1139 	hif_runtime_pm_debugfs_create(sc);
1140 }
1141 
1142 /**
1143  * hif_pm_runtime_stop(): stop runtime pm
1144  * @sc: pci context
1145  *
1146  * Turns off runtime pm and frees corresponding resources
1147  * that were acquired by hif_runtime_pm_start().
1148  */
1149 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1150 {
1151 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1152 	uint32_t mode = hif_get_conparam(ol_sc);
1153 
1154 	if (!ol_sc->hif_config.enable_runtime_pm)
1155 		return;
1156 
1157 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1158 		return;
1159 
1160 	hif_runtime_exit(sc->dev);
1161 	hif_pm_runtime_resume(sc->dev);
1162 
1163 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1164 
1165 	hif_runtime_pm_debugfs_remove(sc);
1166 	del_timer_sync(&sc->runtime_timer);
1167 	/* doesn't wait for penting trafic unlike cld-2.0 */
1168 }
1169 
1170 /**
1171  * hif_pm_runtime_open(): initialize runtime pm
1172  * @sc: pci data structure
1173  *
1174  * Early initialization
1175  */
1176 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1177 {
1178 	spin_lock_init(&sc->runtime_lock);
1179 
1180 	qdf_atomic_init(&sc->pm_state);
1181 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1182 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1183 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1184 }
1185 
1186 /**
1187  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1188  * @sc: pci context
1189  *
1190  * Ensure we have only one vote against runtime suspend before closing
1191  * the runtime suspend feature.
1192  *
1193  * all gets by the wlan driver should have been returned
1194  * one vote should remain as part of cnss_runtime_exit
1195  *
1196  * needs to be revisited if we share the root complex.
1197  */
1198 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1199 {
1200 	struct hif_pm_runtime_lock *ctx, *tmp;
1201 
1202 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1203 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1204 	else
1205 		return;
1206 
1207 	spin_lock_bh(&sc->runtime_lock);
1208 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1209 		spin_unlock_bh(&sc->runtime_lock);
1210 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1211 		spin_lock_bh(&sc->runtime_lock);
1212 	}
1213 	spin_unlock_bh(&sc->runtime_lock);
1214 
1215 	/* ensure 1 and only 1 usage count so that when the wlan
1216 	 * driver is re-insmodded runtime pm won't be
1217 	 * disabled also ensures runtime pm doesn't get
1218 	 * broken on by being less than 1.
1219 	 */
1220 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1221 		atomic_set(&sc->dev->power.usage_count, 1);
1222 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1223 		hif_pm_runtime_put_auto(sc->dev);
1224 }
1225 
1226 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1227 					  struct hif_pm_runtime_lock *lock);
1228 
1229 /**
1230  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1231  * @sc: PCIe Context
1232  *
1233  * API is used to empty the runtime pm prevent suspend list.
1234  *
1235  * Return: void
1236  */
1237 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1238 {
1239 	struct hif_pm_runtime_lock *ctx, *tmp;
1240 
1241 	spin_lock_bh(&sc->runtime_lock);
1242 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1243 		__hif_pm_runtime_allow_suspend(sc, ctx);
1244 	}
1245 	spin_unlock_bh(&sc->runtime_lock);
1246 }
1247 
1248 /**
1249  * hif_pm_runtime_close(): close runtime pm
1250  * @sc: pci bus handle
1251  *
1252  * ensure runtime_pm is stopped before closing the driver
1253  */
1254 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1255 {
1256 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1257 
1258 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1259 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1260 		return;
1261 
1262 	hif_pm_runtime_stop(sc);
1263 
1264 	hif_is_recovery_in_progress(scn) ?
1265 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1266 		hif_pm_runtime_sanitize_on_exit(sc);
1267 }
1268 #else
1269 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1270 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1271 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1272 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1273 #endif
1274 
1275 /**
1276  * hif_disable_power_gating() - disable HW power gating
1277  * @hif_ctx: hif context
1278  *
1279  * disables pcie L1 power states
1280  */
1281 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1282 {
1283 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1284 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1285 
1286 	if (NULL == scn) {
1287 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1288 		       __func__);
1289 		return;
1290 	}
1291 
1292 	/* Disable ASPM when pkt log is enabled */
1293 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1294 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1295 }
1296 
1297 /**
1298  * hif_enable_power_gating() - enable HW power gating
1299  * @hif_ctx: hif context
1300  *
1301  * enables pcie L1 power states
1302  */
1303 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1304 {
1305 	if (NULL == sc) {
1306 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1307 		       __func__);
1308 		return;
1309 	}
1310 
1311 	/* Re-enable ASPM after firmware/OTP download is complete */
1312 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1313 }
1314 
1315 /**
1316  * hif_enable_power_management() - enable power management
1317  * @hif_ctx: hif context
1318  *
1319  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1320  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1321  *
1322  * note: epping mode does not call this function as it does not
1323  *       care about saving power.
1324  */
1325 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1326 				 bool is_packet_log_enabled)
1327 {
1328 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1329 
1330 	if (pci_ctx == NULL) {
1331 		HIF_ERROR("%s, hif_ctx null", __func__);
1332 		return;
1333 	}
1334 
1335 	hif_pm_runtime_start(pci_ctx);
1336 
1337 	if (!is_packet_log_enabled)
1338 		hif_enable_power_gating(pci_ctx);
1339 
1340 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1341 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1342 	    !ce_srng_based(hif_sc)) {
1343 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1344 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1345 			HIF_ERROR("%s, failed to set target to sleep",
1346 				  __func__);
1347 	}
1348 }
1349 
1350 /**
1351  * hif_disable_power_management() - disable power management
1352  * @hif_ctx: hif context
1353  *
1354  * Currently disables runtime pm. Should be updated to behave
1355  * if runtime pm is not started. Should be updated to take care
1356  * of aspm and soc sleep for driver load.
1357  */
1358 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1359 {
1360 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1361 
1362 	if (pci_ctx == NULL) {
1363 		HIF_ERROR("%s, hif_ctx null", __func__);
1364 		return;
1365 	}
1366 
1367 	hif_pm_runtime_stop(pci_ctx);
1368 }
1369 
1370 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1371 {
1372 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1373 
1374 	if (pci_ctx == NULL) {
1375 		HIF_ERROR("%s, hif_ctx null", __func__);
1376 		return;
1377 	}
1378 	hif_display_ce_stats(&pci_ctx->ce_sc);
1379 }
1380 
1381 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1382 {
1383 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1384 
1385 	if (pci_ctx == NULL) {
1386 		HIF_ERROR("%s, hif_ctx null", __func__);
1387 		return;
1388 	}
1389 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1390 }
1391 
1392 #define ATH_PCI_PROBE_RETRY_MAX 3
1393 /**
1394  * hif_bus_open(): hif_bus_open
1395  * @scn: scn
1396  * @bus_type: bus type
1397  *
1398  * Return: n/a
1399  */
1400 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1401 {
1402 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1403 
1404 	hif_ctx->bus_type = bus_type;
1405 	hif_pm_runtime_open(sc);
1406 
1407 	qdf_spinlock_create(&sc->irq_lock);
1408 
1409 	return hif_ce_open(hif_ctx);
1410 }
1411 
1412 /**
1413  * hif_wake_target_cpu() - wake the target's cpu
1414  * @scn: hif context
1415  *
1416  * Send an interrupt to the device to wake up the Target CPU
1417  * so it has an opportunity to notice any changed state.
1418  */
1419 static void hif_wake_target_cpu(struct hif_softc *scn)
1420 {
1421 	QDF_STATUS rv;
1422 	uint32_t core_ctrl;
1423 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1424 
1425 	rv = hif_diag_read_access(hif_hdl,
1426 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1427 				  &core_ctrl);
1428 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1429 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1430 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1431 
1432 	rv = hif_diag_write_access(hif_hdl,
1433 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1434 				   core_ctrl);
1435 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1436 }
1437 
1438 /**
1439  * soc_wake_reset() - allow the target to go to sleep
1440  * @scn: hif_softc
1441  *
1442  * Clear the force wake register.  This is done by
1443  * hif_sleep_entry and cancel defered timer sleep.
1444  */
1445 static void soc_wake_reset(struct hif_softc *scn)
1446 {
1447 	hif_write32_mb(scn, scn->mem +
1448 		PCIE_LOCAL_BASE_ADDRESS +
1449 		PCIE_SOC_WAKE_ADDRESS,
1450 		PCIE_SOC_WAKE_RESET);
1451 }
1452 
1453 /**
1454  * hif_sleep_entry() - gate target sleep
1455  * @arg: hif context
1456  *
1457  * This function is the callback for the sleep timer.
1458  * Check if last force awake critical section was at least
1459  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1460  * allow the target to go to sleep and cancel the sleep timer.
1461  * otherwise reschedule the sleep timer.
1462  */
1463 static void hif_sleep_entry(void *arg)
1464 {
1465 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1466 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1467 	uint32_t idle_ms;
1468 
1469 	if (scn->recovery)
1470 		return;
1471 
1472 	if (hif_is_driver_unloading(scn))
1473 		return;
1474 
1475 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1476 	if (hif_state->verified_awake == false) {
1477 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1478 						    - hif_state->sleep_ticks);
1479 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1480 			if (!qdf_atomic_read(&scn->link_suspended)) {
1481 				soc_wake_reset(scn);
1482 				hif_state->fake_sleep = false;
1483 			}
1484 		} else {
1485 			qdf_timer_stop(&hif_state->sleep_timer);
1486 			qdf_timer_start(&hif_state->sleep_timer,
1487 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1488 		}
1489 	} else {
1490 		qdf_timer_stop(&hif_state->sleep_timer);
1491 		qdf_timer_start(&hif_state->sleep_timer,
1492 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1493 	}
1494 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1495 }
1496 
1497 #define HIF_HIA_MAX_POLL_LOOP    1000000
1498 #define HIF_HIA_POLLING_DELAY_MS 10
1499 
1500 #ifdef CONFIG_WIN
1501 static void hif_set_hia_extnd(struct hif_softc *scn)
1502 {
1503 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1504 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1505 	uint32_t target_type = tgt_info->target_type;
1506 
1507 	HIF_TRACE("%s: E", __func__);
1508 
1509 	if ((target_type == TARGET_TYPE_AR900B) ||
1510 			target_type == TARGET_TYPE_QCA9984 ||
1511 			target_type == TARGET_TYPE_QCA9888) {
1512 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1513 		 * in RTC space
1514 		 */
1515 		tgt_info->target_revision
1516 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1517 					+ CHIP_ID_ADDRESS));
1518 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1519 			  target_type, tgt_info->target_revision);
1520 	}
1521 
1522 	{
1523 		uint32_t flag2_value = 0;
1524 		uint32_t flag2_targ_addr =
1525 			host_interest_item_address(target_type,
1526 			offsetof(struct host_interest_s, hi_skip_clock_init));
1527 
1528 		if ((ar900b_20_targ_clk != -1) &&
1529 			(frac != -1) && (intval != -1)) {
1530 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1531 				&flag2_value);
1532 			qdf_print("\n Setting clk_override");
1533 			flag2_value |= CLOCK_OVERRIDE;
1534 
1535 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1536 					flag2_value);
1537 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1538 		} else {
1539 			qdf_print("\n CLOCK PLL skipped");
1540 		}
1541 	}
1542 
1543 	if (target_type == TARGET_TYPE_AR900B
1544 			|| target_type == TARGET_TYPE_QCA9984
1545 			|| target_type == TARGET_TYPE_QCA9888) {
1546 
1547 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1548 		 * this would be supplied through module parameters,
1549 		 * if not supplied assumed default or same behavior as 1.0.
1550 		 * Assume 1.0 clock can't be tuned, reset to defaults
1551 		 */
1552 
1553 		qdf_print(KERN_INFO
1554 			  "%s: setting the target pll frac %x intval %x",
1555 			  __func__, frac, intval);
1556 
1557 		/* do not touch frac, and int val, let them be default -1,
1558 		 * if desired, host can supply these through module params
1559 		 */
1560 		if (frac != -1 || intval != -1) {
1561 			uint32_t flag2_value = 0;
1562 			uint32_t flag2_targ_addr;
1563 
1564 			flag2_targ_addr =
1565 				host_interest_item_address(target_type,
1566 				offsetof(struct host_interest_s,
1567 					hi_clock_info));
1568 			hif_diag_read_access(hif_hdl,
1569 				flag2_targ_addr, &flag2_value);
1570 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1571 				  flag2_value);
1572 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1573 			qdf_print("\n INT Val %x  Address %x",
1574 				  intval, flag2_value + 4);
1575 			hif_diag_write_access(hif_hdl,
1576 					flag2_value + 4, intval);
1577 		} else {
1578 			qdf_print(KERN_INFO
1579 				  "%s: no frac provided, skipping pre-configuring PLL",
1580 				  __func__);
1581 		}
1582 
1583 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1584 		if ((target_type == TARGET_TYPE_AR900B)
1585 			&& (tgt_info->target_revision == AR900B_REV_2)
1586 			&& ar900b_20_targ_clk != -1) {
1587 			uint32_t flag2_value = 0;
1588 			uint32_t flag2_targ_addr;
1589 
1590 			flag2_targ_addr
1591 				= host_interest_item_address(target_type,
1592 					offsetof(struct host_interest_s,
1593 					hi_desired_cpu_speed_hz));
1594 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1595 							&flag2_value);
1596 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1597 				  flag2_value);
1598 			hif_diag_write_access(hif_hdl, flag2_value,
1599 				ar900b_20_targ_clk/*300000000u*/);
1600 		} else if (target_type == TARGET_TYPE_QCA9888) {
1601 			uint32_t flag2_targ_addr;
1602 
1603 			if (200000000u != qca9888_20_targ_clk) {
1604 				qca9888_20_targ_clk = 300000000u;
1605 				/* Setting the target clock speed to 300 mhz */
1606 			}
1607 
1608 			flag2_targ_addr
1609 				= host_interest_item_address(target_type,
1610 					offsetof(struct host_interest_s,
1611 					hi_desired_cpu_speed_hz));
1612 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1613 				qca9888_20_targ_clk);
1614 		} else {
1615 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1616 				  __func__);
1617 		}
1618 	} else {
1619 		if (frac != -1 || intval != -1) {
1620 			uint32_t flag2_value = 0;
1621 			uint32_t flag2_targ_addr =
1622 				host_interest_item_address(target_type,
1623 					offsetof(struct host_interest_s,
1624 							hi_clock_info));
1625 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1626 						&flag2_value);
1627 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1628 				  flag2_value);
1629 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1630 			qdf_print("\n INT Val %x  Address %x", intval,
1631 				  flag2_value + 4);
1632 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1633 					      intval);
1634 		}
1635 	}
1636 }
1637 
1638 #else
1639 
1640 static void hif_set_hia_extnd(struct hif_softc *scn)
1641 {
1642 }
1643 
1644 #endif
1645 
1646 /**
1647  * hif_set_hia() - fill out the host interest area
1648  * @scn: hif context
1649  *
1650  * This is replaced by hif_wlan_enable for integrated targets.
1651  * This fills out the host interest area.  The firmware will
1652  * process these memory addresses when it is first brought out
1653  * of reset.
1654  *
1655  * Return: 0 for success.
1656  */
1657 static int hif_set_hia(struct hif_softc *scn)
1658 {
1659 	QDF_STATUS rv;
1660 	uint32_t interconnect_targ_addr = 0;
1661 	uint32_t pcie_state_targ_addr = 0;
1662 	uint32_t pipe_cfg_targ_addr = 0;
1663 	uint32_t svc_to_pipe_map = 0;
1664 	uint32_t pcie_config_flags = 0;
1665 	uint32_t flag2_value = 0;
1666 	uint32_t flag2_targ_addr = 0;
1667 #ifdef QCA_WIFI_3_0
1668 	uint32_t host_interest_area = 0;
1669 	uint8_t i;
1670 #else
1671 	uint32_t ealloc_value = 0;
1672 	uint32_t ealloc_targ_addr = 0;
1673 	uint8_t banks_switched = 1;
1674 	uint32_t chip_id;
1675 #endif
1676 	uint32_t pipe_cfg_addr;
1677 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1678 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1679 	uint32_t target_type = tgt_info->target_type;
1680 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1681 	static struct CE_pipe_config *target_ce_config;
1682 	struct service_to_pipe *target_service_to_ce_map;
1683 
1684 	HIF_TRACE("%s: E", __func__);
1685 
1686 	hif_get_target_ce_config(scn,
1687 				 &target_ce_config, &target_ce_config_sz,
1688 				 &target_service_to_ce_map,
1689 				 &target_service_to_ce_map_sz,
1690 				 NULL, NULL);
1691 
1692 	if (ADRASTEA_BU)
1693 		return QDF_STATUS_SUCCESS;
1694 
1695 #ifdef QCA_WIFI_3_0
1696 	i = 0;
1697 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1698 		host_interest_area = hif_read32_mb(scn, scn->mem +
1699 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1700 		if ((host_interest_area & 0x01) == 0) {
1701 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1702 			host_interest_area = 0;
1703 			i++;
1704 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1705 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1706 		} else {
1707 			host_interest_area &= (~0x01);
1708 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1709 			break;
1710 		}
1711 	}
1712 
1713 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1714 		HIF_ERROR("%s: hia polling timeout", __func__);
1715 		return -EIO;
1716 	}
1717 
1718 	if (host_interest_area == 0) {
1719 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1720 		return -EIO;
1721 	}
1722 
1723 	interconnect_targ_addr = host_interest_area +
1724 			offsetof(struct host_interest_area_t,
1725 			hi_interconnect_state);
1726 
1727 	flag2_targ_addr = host_interest_area +
1728 			offsetof(struct host_interest_area_t, hi_option_flag2);
1729 
1730 #else
1731 	interconnect_targ_addr = hif_hia_item_address(target_type,
1732 		offsetof(struct host_interest_s, hi_interconnect_state));
1733 	ealloc_targ_addr = hif_hia_item_address(target_type,
1734 		offsetof(struct host_interest_s, hi_early_alloc));
1735 	flag2_targ_addr = hif_hia_item_address(target_type,
1736 		offsetof(struct host_interest_s, hi_option_flag2));
1737 #endif
1738 	/* Supply Target-side CE configuration */
1739 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1740 			  &pcie_state_targ_addr);
1741 	if (rv != QDF_STATUS_SUCCESS) {
1742 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1743 			  __func__, interconnect_targ_addr, rv);
1744 		goto done;
1745 	}
1746 	if (pcie_state_targ_addr == 0) {
1747 		rv = QDF_STATUS_E_FAILURE;
1748 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1749 		goto done;
1750 	}
1751 	pipe_cfg_addr = pcie_state_targ_addr +
1752 			  offsetof(struct pcie_state_s,
1753 			  pipe_cfg_addr);
1754 	rv = hif_diag_read_access(hif_hdl,
1755 			  pipe_cfg_addr,
1756 			  &pipe_cfg_targ_addr);
1757 	if (rv != QDF_STATUS_SUCCESS) {
1758 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1759 			__func__, pipe_cfg_addr, rv);
1760 		goto done;
1761 	}
1762 	if (pipe_cfg_targ_addr == 0) {
1763 		rv = QDF_STATUS_E_FAILURE;
1764 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1765 		goto done;
1766 	}
1767 
1768 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1769 			(uint8_t *) target_ce_config,
1770 			target_ce_config_sz);
1771 
1772 	if (rv != QDF_STATUS_SUCCESS) {
1773 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1774 		goto done;
1775 	}
1776 
1777 	rv = hif_diag_read_access(hif_hdl,
1778 			  pcie_state_targ_addr +
1779 			  offsetof(struct pcie_state_s,
1780 			   svc_to_pipe_map),
1781 			  &svc_to_pipe_map);
1782 	if (rv != QDF_STATUS_SUCCESS) {
1783 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1784 		goto done;
1785 	}
1786 	if (svc_to_pipe_map == 0) {
1787 		rv = QDF_STATUS_E_FAILURE;
1788 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1789 		goto done;
1790 	}
1791 
1792 	rv = hif_diag_write_mem(hif_hdl,
1793 			svc_to_pipe_map,
1794 			(uint8_t *) target_service_to_ce_map,
1795 			target_service_to_ce_map_sz);
1796 	if (rv != QDF_STATUS_SUCCESS) {
1797 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1798 		goto done;
1799 	}
1800 
1801 	rv = hif_diag_read_access(hif_hdl,
1802 			pcie_state_targ_addr +
1803 			offsetof(struct pcie_state_s,
1804 			config_flags),
1805 			&pcie_config_flags);
1806 	if (rv != QDF_STATUS_SUCCESS) {
1807 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1808 		goto done;
1809 	}
1810 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1811 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1812 #else
1813 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1814 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1815 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1816 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1817 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1818 #endif
1819 	rv = hif_diag_write_mem(hif_hdl,
1820 			pcie_state_targ_addr +
1821 			offsetof(struct pcie_state_s,
1822 			config_flags),
1823 			(uint8_t *) &pcie_config_flags,
1824 			sizeof(pcie_config_flags));
1825 	if (rv != QDF_STATUS_SUCCESS) {
1826 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1827 		goto done;
1828 	}
1829 
1830 #ifndef QCA_WIFI_3_0
1831 	/* configure early allocation */
1832 	ealloc_targ_addr = hif_hia_item_address(target_type,
1833 						offsetof(
1834 						struct host_interest_s,
1835 						hi_early_alloc));
1836 
1837 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1838 			&ealloc_value);
1839 	if (rv != QDF_STATUS_SUCCESS) {
1840 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1841 		goto done;
1842 	}
1843 
1844 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1845 	ealloc_value |=
1846 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1847 		 HI_EARLY_ALLOC_MAGIC_MASK);
1848 
1849 	rv = hif_diag_read_access(hif_hdl,
1850 			  CHIP_ID_ADDRESS |
1851 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1852 	if (rv != QDF_STATUS_SUCCESS) {
1853 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1854 		goto done;
1855 	}
1856 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1857 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1858 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1859 		case 0x2:       /* ROME 1.3 */
1860 			/* 2 banks are switched to IRAM */
1861 			banks_switched = 2;
1862 			break;
1863 		case 0x4:       /* ROME 2.1 */
1864 		case 0x5:       /* ROME 2.2 */
1865 			banks_switched = 6;
1866 			break;
1867 		case 0x8:       /* ROME 3.0 */
1868 		case 0x9:       /* ROME 3.1 */
1869 		case 0xA:       /* ROME 3.2 */
1870 			banks_switched = 9;
1871 			break;
1872 		case 0x0:       /* ROME 1.0 */
1873 		case 0x1:       /* ROME 1.1 */
1874 		default:
1875 			/* 3 banks are switched to IRAM */
1876 			banks_switched = 3;
1877 			break;
1878 		}
1879 	}
1880 
1881 	ealloc_value |=
1882 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1883 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1884 
1885 	rv = hif_diag_write_access(hif_hdl,
1886 				ealloc_targ_addr,
1887 				ealloc_value);
1888 	if (rv != QDF_STATUS_SUCCESS) {
1889 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1890 		goto done;
1891 	}
1892 #endif
1893 	if ((target_type == TARGET_TYPE_AR900B)
1894 			|| (target_type == TARGET_TYPE_QCA9984)
1895 			|| (target_type == TARGET_TYPE_QCA9888)
1896 			|| (target_type == TARGET_TYPE_AR9888)) {
1897 		hif_set_hia_extnd(scn);
1898 	}
1899 
1900 	/* Tell Target to proceed with initialization */
1901 	flag2_targ_addr = hif_hia_item_address(target_type,
1902 						offsetof(
1903 						struct host_interest_s,
1904 						hi_option_flag2));
1905 
1906 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1907 			  &flag2_value);
1908 	if (rv != QDF_STATUS_SUCCESS) {
1909 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1910 		goto done;
1911 	}
1912 
1913 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1914 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1915 			   flag2_value);
1916 	if (rv != QDF_STATUS_SUCCESS) {
1917 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1918 		goto done;
1919 	}
1920 
1921 	hif_wake_target_cpu(scn);
1922 
1923 done:
1924 
1925 	return rv;
1926 }
1927 
1928 /**
1929  * hif_bus_configure() - configure the pcie bus
1930  * @hif_sc: pointer to the hif context.
1931  *
1932  * return: 0 for success. nonzero for failure.
1933  */
1934 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1935 {
1936 	int status = 0;
1937 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1938 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1939 
1940 	hif_ce_prepare_config(hif_sc);
1941 
1942 	/* initialize sleep state adjust variables */
1943 	hif_state->sleep_timer_init = true;
1944 	hif_state->keep_awake_count = 0;
1945 	hif_state->fake_sleep = false;
1946 	hif_state->sleep_ticks = 0;
1947 
1948 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1949 			       hif_sleep_entry, (void *)hif_state,
1950 			       QDF_TIMER_TYPE_WAKE_APPS);
1951 	hif_state->sleep_timer_init = true;
1952 
1953 	status = hif_wlan_enable(hif_sc);
1954 	if (status) {
1955 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1956 			  __func__, status);
1957 		goto timer_free;
1958 	}
1959 
1960 	A_TARGET_ACCESS_LIKELY(hif_sc);
1961 
1962 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1963 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1964 	    !ce_srng_based(hif_sc)) {
1965 		/*
1966 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1967 		 * prevent sleep when we want to keep firmware always awake
1968 		 * note: when we want to keep firmware always awake,
1969 		 *       hif_target_sleep_state_adjust will point to a dummy
1970 		 *       function, and hif_pci_target_sleep_state_adjust must
1971 		 *       be called instead.
1972 		 * note: bus type check is here because AHB bus is reusing
1973 		 *       hif_pci_bus_configure code.
1974 		 */
1975 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1976 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1977 					false, true) < 0) {
1978 				status = -EACCES;
1979 				goto disable_wlan;
1980 			}
1981 		}
1982 	}
1983 
1984 	/* todo: consider replacing this with an srng field */
1985 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1986 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2)) &&
1987 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1988 		hif_sc->per_ce_irq = true;
1989 	}
1990 
1991 	status = hif_config_ce(hif_sc);
1992 	if (status)
1993 		goto disable_wlan;
1994 
1995 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
1996 	if (hif_needs_bmi(hif_osc)) {
1997 		status = hif_set_hia(hif_sc);
1998 		if (status)
1999 			goto unconfig_ce;
2000 
2001 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2002 
2003 		hif_register_bmi_callbacks(hif_sc);
2004 	}
2005 
2006 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2007 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2)) &&
2008 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2009 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2010 						__func__);
2011 	else {
2012 		status = hif_configure_irq(hif_sc);
2013 		if (status < 0)
2014 			goto unconfig_ce;
2015 	}
2016 
2017 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2018 
2019 	return status;
2020 
2021 unconfig_ce:
2022 	hif_unconfig_ce(hif_sc);
2023 disable_wlan:
2024 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2025 	hif_wlan_disable(hif_sc);
2026 
2027 timer_free:
2028 	qdf_timer_stop(&hif_state->sleep_timer);
2029 	qdf_timer_free(&hif_state->sleep_timer);
2030 	hif_state->sleep_timer_init = false;
2031 
2032 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2033 	return status;
2034 }
2035 
2036 /**
2037  * hif_bus_close(): hif_bus_close
2038  *
2039  * Return: n/a
2040  */
2041 void hif_pci_close(struct hif_softc *hif_sc)
2042 {
2043 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2044 
2045 	hif_pm_runtime_close(hif_pci_sc);
2046 	hif_ce_close(hif_sc);
2047 }
2048 
2049 #define BAR_NUM 0
2050 
2051 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2052 				struct pci_dev *pdev,
2053 				const struct pci_device_id *id)
2054 {
2055 	void __iomem *mem;
2056 	int ret = 0;
2057 	uint16_t device_id = 0;
2058 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2059 
2060 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2061 	if (device_id != id->device)  {
2062 		HIF_ERROR(
2063 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2064 		   __func__, device_id, id->device);
2065 		/* pci link is down, so returing with error code */
2066 		return -EIO;
2067 	}
2068 
2069 	/* FIXME: temp. commenting out assign_resource
2070 	 * call for dev_attach to work on 2.6.38 kernel
2071 	 */
2072 #if (!defined(__LINUX_ARM_ARCH__))
2073 	if (pci_assign_resource(pdev, BAR_NUM)) {
2074 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2075 		return -EIO;
2076 	}
2077 #endif
2078 	if (pci_enable_device(pdev)) {
2079 		HIF_ERROR("%s: pci_enable_device error",
2080 			   __func__);
2081 		return -EIO;
2082 	}
2083 
2084 	/* Request MMIO resources */
2085 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2086 	if (ret) {
2087 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2088 		ret = -EIO;
2089 		goto err_region;
2090 	}
2091 
2092 #ifdef CONFIG_ARM_LPAE
2093 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2094 	 * for 32 bits device also.
2095 	 */
2096 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2097 	if (ret) {
2098 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2099 		goto err_dma;
2100 	}
2101 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2102 	if (ret) {
2103 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2104 		goto err_dma;
2105 	}
2106 #else
2107 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2108 	if (ret) {
2109 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2110 		goto err_dma;
2111 	}
2112 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2113 	if (ret) {
2114 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2115 			   __func__);
2116 		goto err_dma;
2117 	}
2118 #endif
2119 
2120 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2121 
2122 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2123 	pci_set_master(pdev);
2124 
2125 	/* Arrange for access to Target SoC registers. */
2126 	mem = pci_iomap(pdev, BAR_NUM, 0);
2127 	if (!mem) {
2128 		HIF_ERROR("%s: PCI iomap error", __func__);
2129 		ret = -EIO;
2130 		goto err_iomap;
2131 	}
2132 
2133 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
2134 
2135 	sc->mem = mem;
2136 
2137 	/* Hawkeye emulation specific change */
2138 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2139 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2140 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2141 		(device_id == RUMIM2M_DEVICE_ID_NODE3)) {
2142 		mem = mem + 0x0c000000;
2143 		sc->mem = mem;
2144 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2145 			__func__, sc->mem);
2146 	}
2147 
2148 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2149 	ol_sc->mem = mem;
2150 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2151 	sc->pci_enabled = true;
2152 	return ret;
2153 
2154 err_iomap:
2155 	pci_clear_master(pdev);
2156 err_dma:
2157 	pci_release_region(pdev, BAR_NUM);
2158 err_region:
2159 	pci_disable_device(pdev);
2160 	return ret;
2161 }
2162 
2163 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2164 			      struct pci_dev *pdev,
2165 			      const struct pci_device_id *id)
2166 {
2167 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2168 	sc->pci_enabled = true;
2169 	return 0;
2170 }
2171 
2172 
2173 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
2174 {
2175 	pci_disable_msi(sc->pdev);
2176 	pci_iounmap(sc->pdev, sc->mem);
2177 	pci_clear_master(sc->pdev);
2178 	pci_release_region(sc->pdev, BAR_NUM);
2179 	pci_disable_device(sc->pdev);
2180 }
2181 
2182 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
2183 
2184 static void hif_disable_pci(struct hif_pci_softc *sc)
2185 {
2186 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2187 
2188 	if (ol_sc == NULL) {
2189 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2190 		return;
2191 	}
2192 	hif_pci_device_reset(sc);
2193 	sc->hif_pci_deinit(sc);
2194 
2195 	sc->mem = NULL;
2196 	ol_sc->mem = NULL;
2197 }
2198 
2199 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2200 {
2201 	int ret = 0;
2202 	int targ_awake_limit = 500;
2203 #ifndef QCA_WIFI_3_0
2204 	uint32_t fw_indicator;
2205 #endif
2206 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2207 
2208 	/*
2209 	 * Verify that the Target was started cleanly.*
2210 	 * The case where this is most likely is with an AUX-powered
2211 	 * Target and a Host in WoW mode. If the Host crashes,
2212 	 * loses power, or is restarted (without unloading the driver)
2213 	 * then the Target is left (aux) powered and running.  On a
2214 	 * subsequent driver load, the Target is in an unexpected state.
2215 	 * We try to catch that here in order to reset the Target and
2216 	 * retry the probe.
2217 	 */
2218 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2219 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2220 	while (!hif_targ_is_awake(scn, sc->mem)) {
2221 		if (0 == targ_awake_limit) {
2222 			HIF_ERROR("%s: target awake timeout", __func__);
2223 			ret = -EAGAIN;
2224 			goto end;
2225 		}
2226 		qdf_mdelay(1);
2227 		targ_awake_limit--;
2228 	}
2229 
2230 #if PCIE_BAR0_READY_CHECKING
2231 	{
2232 		int wait_limit = 200;
2233 		/* Synchronization point: wait the BAR0 is configured */
2234 		while (wait_limit-- &&
2235 			   !(hif_read32_mb(sc, c->mem +
2236 					  PCIE_LOCAL_BASE_ADDRESS +
2237 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2238 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2239 			qdf_mdelay(10);
2240 		}
2241 		if (wait_limit < 0) {
2242 			/* AR6320v1 doesn't support checking of BAR0
2243 			 * configuration, takes one sec to wait BAR0 ready
2244 			 */
2245 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2246 				    __func__);
2247 		}
2248 	}
2249 #endif
2250 
2251 #ifndef QCA_WIFI_3_0
2252 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2253 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2254 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2255 
2256 	if (fw_indicator & FW_IND_INITIALIZED) {
2257 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2258 			   __func__);
2259 		ret = -EAGAIN;
2260 		goto end;
2261 	}
2262 #endif
2263 
2264 end:
2265 	return ret;
2266 }
2267 
2268 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2269 {
2270 	int ret = 0;
2271 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2272 	uint32_t target_type = scn->target_info.target_type;
2273 
2274 	HIF_TRACE("%s: E", __func__);
2275 
2276 	/* do notn support MSI or MSI IRQ failed */
2277 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2278 	ret = request_irq(sc->pdev->irq,
2279 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2280 			  "wlan_pci", sc);
2281 	if (ret) {
2282 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2283 		goto end;
2284 	}
2285 	scn->wake_irq = sc->pdev->irq;
2286 	/* Use sc->irq instead of sc->pdev-irq
2287 	 * platform_device pdev doesn't have an irq field
2288 	 */
2289 	sc->irq = sc->pdev->irq;
2290 	/* Use Legacy PCI Interrupts */
2291 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2292 		  PCIE_INTR_ENABLE_ADDRESS),
2293 		  HOST_GROUP0_MASK);
2294 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2295 			       PCIE_INTR_ENABLE_ADDRESS));
2296 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2297 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2298 
2299 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2300 			(target_type == TARGET_TYPE_AR900B)  ||
2301 			(target_type == TARGET_TYPE_QCA9984) ||
2302 			(target_type == TARGET_TYPE_AR9888) ||
2303 			(target_type == TARGET_TYPE_QCA9888) ||
2304 			(target_type == TARGET_TYPE_AR6320V1) ||
2305 			(target_type == TARGET_TYPE_AR6320V2) ||
2306 			(target_type == TARGET_TYPE_AR6320V3)) {
2307 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2308 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2309 	}
2310 end:
2311 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2312 			  "%s: X, ret = %d", __func__, ret);
2313 	return ret;
2314 }
2315 
2316 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2317 {
2318 	int ret;
2319 	int ce_id, irq;
2320 	uint32_t msi_data_start;
2321 	uint32_t msi_data_count;
2322 	uint32_t msi_irq_start;
2323 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2324 
2325 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2326 					    &msi_data_count, &msi_data_start,
2327 					    &msi_irq_start);
2328 	if (ret)
2329 		return ret;
2330 
2331 	/* needs to match the ce_id -> irq data mapping
2332 	 * used in the srng parameter configuration
2333 	 */
2334 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2335 		unsigned int msi_data;
2336 
2337 		if (!ce_sc->tasklets[ce_id].inited)
2338 			continue;
2339 
2340 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2341 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2342 
2343 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2344 			  ce_id, msi_data, irq);
2345 
2346 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2347 	}
2348 
2349 	return ret;
2350 }
2351 
2352 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2353 {
2354 	int i, j, irq;
2355 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2356 	struct hif_exec_context *hif_ext_group;
2357 
2358 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2359 		hif_ext_group = hif_state->hif_ext_group[i];
2360 		if (hif_ext_group->irq_requested) {
2361 			hif_ext_group->irq_requested = false;
2362 			for (j = 0; j < hif_ext_group->numirq; j++) {
2363 				irq = hif_ext_group->os_irq[j];
2364 				free_irq(irq, hif_ext_group);
2365 			}
2366 			hif_ext_group->numirq = 0;
2367 		}
2368 	}
2369 }
2370 
2371 /**
2372  * hif_nointrs(): disable IRQ
2373  *
2374  * This function stops interrupt(s)
2375  *
2376  * @scn: struct hif_softc
2377  *
2378  * Return: none
2379  */
2380 void hif_pci_nointrs(struct hif_softc *scn)
2381 {
2382 	int i, ret;
2383 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2384 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2385 
2386 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2387 
2388 	if (scn->request_irq_done == false)
2389 		return;
2390 
2391 	hif_pci_deconfigure_grp_irq(scn);
2392 
2393 	ret = hif_ce_srng_msi_free_irq(scn);
2394 	if (ret != -EINVAL) {
2395 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2396 
2397 		if (scn->wake_irq)
2398 			free_irq(scn->wake_irq, scn);
2399 		scn->wake_irq = 0;
2400 	} else if (sc->num_msi_intrs > 0) {
2401 		/* MSI interrupt(s) */
2402 		for (i = 0; i < sc->num_msi_intrs; i++)
2403 			free_irq(sc->irq + i, sc);
2404 		sc->num_msi_intrs = 0;
2405 	} else {
2406 		/* Legacy PCI line interrupt
2407 		 * Use sc->irq instead of sc->pdev-irq
2408 		 * platform_device pdev doesn't have an irq field
2409 		 */
2410 		free_irq(sc->irq, sc);
2411 	}
2412 	scn->request_irq_done = false;
2413 }
2414 
2415 /**
2416  * hif_disable_bus(): hif_disable_bus
2417  *
2418  * This function disables the bus
2419  *
2420  * @bdev: bus dev
2421  *
2422  * Return: none
2423  */
2424 void hif_pci_disable_bus(struct hif_softc *scn)
2425 {
2426 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2427 	struct pci_dev *pdev;
2428 	void __iomem *mem;
2429 	struct hif_target_info *tgt_info = &scn->target_info;
2430 
2431 	/* Attach did not succeed, all resources have been
2432 	 * freed in error handler
2433 	 */
2434 	if (!sc)
2435 		return;
2436 
2437 	pdev = sc->pdev;
2438 	if (ADRASTEA_BU) {
2439 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2440 
2441 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2442 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2443 			       HOST_GROUP0_MASK);
2444 	}
2445 
2446 #if defined(CPU_WARM_RESET_WAR)
2447 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2448 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2449 	 * verified for AR9888_REV1
2450 	 */
2451 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2452 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2453 		hif_pci_device_warm_reset(sc);
2454 	else
2455 		hif_pci_device_reset(sc);
2456 #else
2457 	hif_pci_device_reset(sc);
2458 #endif
2459 	mem = (void __iomem *)sc->mem;
2460 	if (mem) {
2461 		hif_dump_pipe_debug_count(scn);
2462 		if (scn->athdiag_procfs_inited) {
2463 			athdiag_procfs_remove();
2464 			scn->athdiag_procfs_inited = false;
2465 		}
2466 		sc->hif_pci_deinit(sc);
2467 		scn->mem = NULL;
2468 	}
2469 	HIF_INFO("%s: X", __func__);
2470 }
2471 
2472 #define OL_ATH_PCI_PM_CONTROL 0x44
2473 
2474 #ifdef FEATURE_RUNTIME_PM
2475 /**
2476  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2477  * @scn: hif context
2478  * @flag: prevent linkdown if true otherwise allow
2479  *
2480  * this api should only be called as part of bus prevent linkdown
2481  */
2482 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2483 {
2484 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2485 
2486 	if (flag)
2487 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2488 	else
2489 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2490 }
2491 #else
2492 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2493 {
2494 }
2495 #endif
2496 
2497 #if defined(CONFIG_PCI_MSM)
2498 /**
2499  * hif_bus_prevent_linkdown(): allow or permit linkdown
2500  * @flag: true prevents linkdown, false allows
2501  *
2502  * Calls into the platform driver to vote against taking down the
2503  * pcie link.
2504  *
2505  * Return: n/a
2506  */
2507 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2508 {
2509 	int errno;
2510 
2511 	HIF_DBG("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2512 	hif_runtime_prevent_linkdown(scn, flag);
2513 
2514 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2515 	if (errno)
2516 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2517 			  __func__, errno);
2518 }
2519 #else
2520 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2521 {
2522 	HIF_DBG("wlan: %s pcie power collapse",
2523 			(flag ? "disable" : "enable"));
2524 	hif_runtime_prevent_linkdown(scn, flag);
2525 }
2526 #endif
2527 
2528 static int hif_mark_wake_irq_wakeable(struct hif_softc *scn)
2529 {
2530 	int errno;
2531 
2532 	errno = enable_irq_wake(scn->wake_irq);
2533 	if (errno) {
2534 		HIF_ERROR("%s: Failed to mark wake IRQ: %d", __func__, errno);
2535 		return errno;
2536 	}
2537 
2538 	return 0;
2539 }
2540 
2541 /**
2542  * hif_pci_bus_suspend(): prepare hif for suspend
2543  *
2544  * Enables pci bus wake irq based on link suspend voting.
2545  *
2546  * Return: 0 for success and non-zero error code for failure
2547  */
2548 int hif_pci_bus_suspend(struct hif_softc *scn)
2549 {
2550 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2551 		return 0;
2552 
2553 	/* pci link is staying up; enable wake irq */
2554 	return hif_mark_wake_irq_wakeable(scn);
2555 }
2556 
2557 /**
2558  * __hif_check_link_status() - API to check if PCIe link is active/not
2559  * @scn: HIF Context
2560  *
2561  * API reads the PCIe config space to verify if PCIe link training is
2562  * successful or not.
2563  *
2564  * Return: Success/Failure
2565  */
2566 static int __hif_check_link_status(struct hif_softc *scn)
2567 {
2568 	uint16_t dev_id = 0;
2569 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2570 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2571 
2572 	if (!sc) {
2573 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2574 		return -EINVAL;
2575 	}
2576 
2577 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2578 
2579 	if (dev_id == sc->devid)
2580 		return 0;
2581 
2582 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2583 	       __func__, dev_id);
2584 
2585 	scn->recovery = true;
2586 
2587 	if (cbk && cbk->set_recovery_in_progress)
2588 		cbk->set_recovery_in_progress(cbk->context, true);
2589 	else
2590 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2591 
2592 	pld_is_pci_link_down(sc->dev);
2593 	return -EACCES;
2594 }
2595 
2596 static int hif_unmark_wake_irq_wakeable(struct hif_softc *scn)
2597 {
2598 	int errno;
2599 
2600 	errno = disable_irq_wake(scn->wake_irq);
2601 	if (errno) {
2602 		HIF_ERROR("%s: Failed to unmark wake IRQ: %d", __func__, errno);
2603 		return errno;
2604 	}
2605 
2606 	return 0;
2607 }
2608 
2609 /**
2610  * hif_pci_bus_resume(): prepare hif for resume
2611  *
2612  * Disables pci bus wake irq based on link suspend voting.
2613  *
2614  * Return: 0 for success and non-zero error code for failure
2615  */
2616 int hif_pci_bus_resume(struct hif_softc *scn)
2617 {
2618 	int ret;
2619 
2620 	ret = __hif_check_link_status(scn);
2621 	if (ret)
2622 		return ret;
2623 
2624 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2625 		return 0;
2626 
2627 	/* pci link is up; disable wake irq */
2628 	return hif_unmark_wake_irq_wakeable(scn);
2629 }
2630 
2631 /**
2632  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2633  * @scn: hif context
2634  *
2635  * Ensure that if we received the wakeup message before the irq
2636  * was disabled that the message is pocessed before suspending.
2637  *
2638  * Return: -EBUSY if we fail to flush the tasklets.
2639  */
2640 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2641 {
2642 	if (hif_drain_tasklets(scn) != 0)
2643 		return -EBUSY;
2644 
2645 	/* Stop the HIF Sleep Timer */
2646 	hif_cancel_deferred_target_sleep(scn);
2647 
2648 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2649 		qdf_atomic_set(&scn->link_suspended, 1);
2650 
2651 	return 0;
2652 }
2653 
2654 /**
2655  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2656  * @scn: hif context
2657  *
2658  * Ensure that if we received the wakeup message before the irq
2659  * was disabled that the message is pocessed before suspending.
2660  *
2661  * Return: -EBUSY if we fail to flush the tasklets.
2662  */
2663 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2664 {
2665 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2666 		qdf_atomic_set(&scn->link_suspended, 0);
2667 
2668 	return 0;
2669 }
2670 
2671 #ifdef FEATURE_RUNTIME_PM
2672 /**
2673  * __hif_runtime_pm_set_state(): utility function
2674  * @state: state to set
2675  *
2676  * indexes into the runtime pm state and sets it.
2677  */
2678 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2679 				enum hif_pm_runtime_state state)
2680 {
2681 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2682 
2683 	if (NULL == sc) {
2684 		HIF_ERROR("%s: HIF_CTX not initialized",
2685 		       __func__);
2686 		return;
2687 	}
2688 
2689 	qdf_atomic_set(&sc->pm_state, state);
2690 }
2691 
2692 /**
2693  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2694  *
2695  * Notify hif that a runtime pm opperation has started
2696  */
2697 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2698 {
2699 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2700 }
2701 
2702 /**
2703  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2704  *
2705  * Notify hif that a the runtime pm state should be on
2706  */
2707 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2708 {
2709 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2710 }
2711 
2712 /**
2713  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2714  *
2715  * Notify hif that a runtime suspend attempt has been completed successfully
2716  */
2717 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2718 {
2719 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2720 }
2721 
2722 /**
2723  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2724  */
2725 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2726 {
2727 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2728 
2729 	if (sc == NULL)
2730 		return;
2731 
2732 	sc->pm_stats.suspended++;
2733 	sc->pm_stats.suspend_jiffies = jiffies;
2734 }
2735 
2736 /**
2737  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2738  *
2739  * log a failed runtime suspend
2740  * mark last busy to prevent immediate runtime suspend
2741  */
2742 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2743 {
2744 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2745 
2746 	if (sc == NULL)
2747 		return;
2748 
2749 	sc->pm_stats.suspend_err++;
2750 }
2751 
2752 /**
2753  * hif_log_runtime_resume_success() - log a successful runtime resume
2754  *
2755  * log a successful runtime resume
2756  * mark last busy to prevent immediate runtime suspend
2757  */
2758 static void hif_log_runtime_resume_success(void *hif_ctx)
2759 {
2760 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2761 
2762 	if (sc == NULL)
2763 		return;
2764 
2765 	sc->pm_stats.resumed++;
2766 }
2767 
2768 /**
2769  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2770  *
2771  * Record the failure.
2772  * mark last busy to delay a retry.
2773  * adjust the runtime_pm state.
2774  */
2775 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2776 {
2777 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2778 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2779 
2780 	hif_log_runtime_suspend_failure(hif_ctx);
2781 	if (hif_pci_sc != NULL)
2782 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2783 	hif_runtime_pm_set_state_on(scn);
2784 }
2785 
2786 /**
2787  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2788  *
2789  * Makes sure that the pci link will be taken down by the suspend opperation.
2790  * If the hif layer is configured to leave the bus on, runtime suspend will
2791  * not save any power.
2792  *
2793  * Set the runtime suspend state to in progress.
2794  *
2795  * return -EINVAL if the bus won't go down.  otherwise return 0
2796  */
2797 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2798 {
2799 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2800 
2801 	if (!hif_can_suspend_link(hif_ctx)) {
2802 		HIF_ERROR("Runtime PM not supported for link up suspend");
2803 		return -EINVAL;
2804 	}
2805 
2806 	hif_runtime_pm_set_state_inprogress(scn);
2807 	return 0;
2808 }
2809 
2810 /**
2811  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2812  *
2813  * Record the success.
2814  * adjust the runtime_pm state
2815  */
2816 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
2817 {
2818 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2819 
2820 	hif_runtime_pm_set_state_suspended(scn);
2821 	hif_log_runtime_suspend_success(scn);
2822 }
2823 
2824 /**
2825  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2826  *
2827  * update the runtime pm state.
2828  */
2829 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
2830 {
2831 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2832 
2833 	hif_runtime_pm_set_state_inprogress(scn);
2834 }
2835 
2836 /**
2837  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2838  *
2839  * record the success.
2840  * adjust the runtime_pm state
2841  */
2842 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
2843 {
2844 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2845 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2846 
2847 	hif_log_runtime_resume_success(hif_ctx);
2848 	if (hif_pci_sc != NULL)
2849 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2850 	hif_runtime_pm_set_state_on(scn);
2851 }
2852 
2853 /**
2854  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2855  *
2856  * Return: 0 for success and non-zero error code for failure
2857  */
2858 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2859 {
2860 	int errno;
2861 
2862 	errno = hif_bus_suspend(hif_ctx);
2863 	if (errno) {
2864 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
2865 		return errno;
2866 	}
2867 
2868 	errno = hif_apps_irqs_disable(hif_ctx);
2869 	if (errno) {
2870 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
2871 		goto bus_resume;
2872 	}
2873 
2874 	errno = hif_bus_suspend_noirq(hif_ctx);
2875 	if (errno) {
2876 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
2877 		goto irqs_enable;
2878 	}
2879 
2880 	/* link should always be down; skip enable wake irq */
2881 
2882 	return 0;
2883 
2884 irqs_enable:
2885 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2886 
2887 bus_resume:
2888 	QDF_BUG(!hif_bus_resume(hif_ctx));
2889 
2890 	return errno;
2891 }
2892 
2893 /**
2894  * hif_fastpath_resume() - resume fastpath for runtimepm
2895  *
2896  * ensure that the fastpath write index register is up to date
2897  * since runtime pm may cause ce_send_fast to skip the register
2898  * write.
2899  *
2900  * fastpath only applicable to legacy copy engine
2901  */
2902 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
2903 {
2904 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2905 	struct CE_state *ce_state;
2906 
2907 	if (!scn)
2908 		return;
2909 
2910 	if (scn->fastpath_mode_on) {
2911 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2912 			return;
2913 
2914 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
2915 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2916 
2917 		/*war_ce_src_ring_write_idx_set */
2918 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2919 				ce_state->src_ring->write_index);
2920 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2921 		Q_TARGET_ACCESS_END(scn);
2922 	}
2923 }
2924 
2925 /**
2926  * hif_runtime_resume() - do the bus resume part of a runtime resume
2927  *
2928  *  Return: 0 for success and non-zero error code for failure
2929  */
2930 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
2931 {
2932 	/* link should always be down; skip disable wake irq */
2933 
2934 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
2935 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2936 	QDF_BUG(!hif_bus_resume(hif_ctx));
2937 	return 0;
2938 }
2939 #endif /* #ifdef FEATURE_RUNTIME_PM */
2940 
2941 #if CONFIG_PCIE_64BIT_MSI
2942 static void hif_free_msi_ctx(struct hif_softc *scn)
2943 {
2944 	struct hif_pci_softc *sc = scn->hif_sc;
2945 	struct hif_msi_info *info = &sc->msi_info;
2946 	struct device *dev = scn->qdf_dev->dev;
2947 
2948 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2949 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2950 	info->magic = NULL;
2951 	info->magic_dma = 0;
2952 }
2953 #else
2954 static void hif_free_msi_ctx(struct hif_softc *scn)
2955 {
2956 }
2957 #endif
2958 
2959 void hif_pci_disable_isr(struct hif_softc *scn)
2960 {
2961 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2962 
2963 	hif_exec_kill(&scn->osc);
2964 	hif_nointrs(scn);
2965 	hif_free_msi_ctx(scn);
2966 	/* Cancel the pending tasklet */
2967 	ce_tasklet_kill(scn);
2968 	tasklet_kill(&sc->intr_tq);
2969 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2970 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2971 }
2972 
2973 /* Function to reset SoC */
2974 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2975 {
2976 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2977 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2978 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2979 
2980 #if defined(CPU_WARM_RESET_WAR)
2981 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2982 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2983 	 * verified for AR9888_REV1
2984 	 */
2985 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2986 		hif_pci_device_warm_reset(sc);
2987 	else
2988 		hif_pci_device_reset(sc);
2989 #else
2990 	hif_pci_device_reset(sc);
2991 #endif
2992 }
2993 
2994 #ifdef CONFIG_PCI_MSM
2995 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2996 {
2997 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2998 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2999 }
3000 #else
3001 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3002 #endif
3003 
3004 /**
3005  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3006  * @sc: HIF PCIe Context
3007  *
3008  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3009  *
3010  * Return: Failure to caller
3011  */
3012 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3013 {
3014 	uint16_t val = 0;
3015 	uint32_t bar = 0;
3016 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3017 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3018 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3019 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
3020 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
3021 	A_target_id_t pci_addr = scn->mem;
3022 
3023 	HIF_ERROR("%s: keep_awake_count = %d",
3024 			__func__, hif_state->keep_awake_count);
3025 
3026 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3027 
3028 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3029 
3030 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3031 
3032 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3033 
3034 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3035 
3036 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3037 
3038 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3039 
3040 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3041 
3042 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3043 
3044 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3045 
3046 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3047 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3048 						PCIE_SOC_WAKE_ADDRESS));
3049 
3050 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3051 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3052 							RTC_STATE_ADDRESS));
3053 
3054 	HIF_ERROR("%s:error, wakeup target", __func__);
3055 	hif_msm_pcie_debug_info(sc);
3056 
3057 	if (!cfg->enable_self_recovery)
3058 		QDF_BUG(0);
3059 
3060 	scn->recovery = true;
3061 
3062 	if (cbk->set_recovery_in_progress)
3063 		cbk->set_recovery_in_progress(cbk->context, true);
3064 
3065 	pld_is_pci_link_down(sc->dev);
3066 	return -EACCES;
3067 }
3068 
3069 /*
3070  * For now, we use simple on-demand sleep/wake.
3071  * Some possible improvements:
3072  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3073  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3074  *   Careful, though, these functions may be used by
3075  *  interrupt handlers ("atomic")
3076  *  -Don't use host_reg_table for this code; instead use values directly
3077  *  -Use a separate timer to track activity and allow Target to sleep only
3078  *   if it hasn't done anything for a while; may even want to delay some
3079  *   processing for a short while in order to "batch" (e.g.) transmit
3080  *   requests with completion processing into "windows of up time".  Costs
3081  *   some performance, but improves power utilization.
3082  *  -On some platforms, it might be possible to eliminate explicit
3083  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3084  *   recover from the failure by forcing the Target awake.
3085  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3086  *   overhead in some cases. Perhaps this makes more sense when
3087  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3088  *   disabled.
3089  *  -It is possible to compile this code out and simply force the Target
3090  *   to remain awake.  That would yield optimal performance at the cost of
3091  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3092  *
3093  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3094  */
3095 /**
3096  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3097  * @scn: hif_softc pointer.
3098  * @sleep_ok: bool
3099  * @wait_for_it: bool
3100  *
3101  * Output the pipe error counts of each pipe to log file
3102  *
3103  * Return: int
3104  */
3105 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3106 			      bool sleep_ok, bool wait_for_it)
3107 {
3108 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3109 	A_target_id_t pci_addr = scn->mem;
3110 	static int max_delay;
3111 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3112 	static int debug;
3113 	if (scn->recovery)
3114 		return -EACCES;
3115 
3116 	if (qdf_atomic_read(&scn->link_suspended)) {
3117 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3118 		debug = true;
3119 		QDF_ASSERT(0);
3120 		return -EACCES;
3121 	}
3122 
3123 	if (debug) {
3124 		wait_for_it = true;
3125 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3126 				__func__);
3127 		QDF_ASSERT(0);
3128 	}
3129 
3130 	if (sleep_ok) {
3131 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3132 		hif_state->keep_awake_count--;
3133 		if (hif_state->keep_awake_count == 0) {
3134 			/* Allow sleep */
3135 			hif_state->verified_awake = false;
3136 			hif_state->sleep_ticks = qdf_system_ticks();
3137 		}
3138 		if (hif_state->fake_sleep == false) {
3139 			/* Set the Fake Sleep */
3140 			hif_state->fake_sleep = true;
3141 
3142 			/* Start the Sleep Timer */
3143 			qdf_timer_stop(&hif_state->sleep_timer);
3144 			qdf_timer_start(&hif_state->sleep_timer,
3145 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3146 		}
3147 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3148 	} else {
3149 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3150 
3151 		if (hif_state->fake_sleep) {
3152 			hif_state->verified_awake = true;
3153 		} else {
3154 			if (hif_state->keep_awake_count == 0) {
3155 				/* Force AWAKE */
3156 				hif_write32_mb(sc, pci_addr +
3157 					      PCIE_LOCAL_BASE_ADDRESS +
3158 					      PCIE_SOC_WAKE_ADDRESS,
3159 					      PCIE_SOC_WAKE_V_MASK);
3160 			}
3161 		}
3162 		hif_state->keep_awake_count++;
3163 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3164 
3165 		if (wait_for_it && !hif_state->verified_awake) {
3166 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3167 			int tot_delay = 0;
3168 			int curr_delay = 5;
3169 
3170 			for (;; ) {
3171 				if (hif_targ_is_awake(scn, pci_addr)) {
3172 					hif_state->verified_awake = true;
3173 					break;
3174 				}
3175 				if (!hif_pci_targ_is_present(scn, pci_addr))
3176 					break;
3177 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3178 					return hif_log_soc_wakeup_timeout(sc);
3179 
3180 				OS_DELAY(curr_delay);
3181 				tot_delay += curr_delay;
3182 
3183 				if (curr_delay < 50)
3184 					curr_delay += 5;
3185 			}
3186 
3187 			/*
3188 			 * NB: If Target has to come out of Deep Sleep,
3189 			 * this may take a few Msecs. Typically, though
3190 			 * this delay should be <30us.
3191 			 */
3192 			if (tot_delay > max_delay)
3193 				max_delay = tot_delay;
3194 		}
3195 	}
3196 
3197 	if (debug && hif_state->verified_awake) {
3198 		debug = 0;
3199 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3200 			__func__,
3201 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3202 				PCIE_INTR_ENABLE_ADDRESS),
3203 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3204 				PCIE_INTR_CAUSE_ADDRESS),
3205 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3206 				CPU_INTR_ADDRESS),
3207 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3208 				PCIE_INTR_CLR_ADDRESS),
3209 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3210 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3211 	}
3212 
3213 	return 0;
3214 }
3215 
3216 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3217 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3218 {
3219 	uint32_t value;
3220 	void *addr;
3221 
3222 	addr = scn->mem + offset;
3223 	value = hif_read32_mb(scn, addr);
3224 
3225 	{
3226 		unsigned long irq_flags;
3227 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3228 
3229 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3230 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3231 		pcie_access_log[idx].is_write = false;
3232 		pcie_access_log[idx].addr = addr;
3233 		pcie_access_log[idx].value = value;
3234 		pcie_access_log_seqnum++;
3235 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3236 	}
3237 
3238 	return value;
3239 }
3240 
3241 void
3242 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3243 {
3244 	void *addr;
3245 
3246 	addr = scn->mem + (offset);
3247 	hif_write32_mb(scn, addr, value);
3248 
3249 	{
3250 		unsigned long irq_flags;
3251 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3252 
3253 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3254 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3255 		pcie_access_log[idx].is_write = true;
3256 		pcie_access_log[idx].addr = addr;
3257 		pcie_access_log[idx].value = value;
3258 		pcie_access_log_seqnum++;
3259 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3260 	}
3261 }
3262 
3263 /**
3264  * hif_target_dump_access_log() - dump access log
3265  *
3266  * dump access log
3267  *
3268  * Return: n/a
3269  */
3270 void hif_target_dump_access_log(void)
3271 {
3272 	int idx, len, start_idx, cur_idx;
3273 	unsigned long irq_flags;
3274 
3275 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3276 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3277 		len = PCIE_ACCESS_LOG_NUM;
3278 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3279 	} else {
3280 		len = pcie_access_log_seqnum;
3281 		start_idx = 0;
3282 	}
3283 
3284 	for (idx = 0; idx < len; idx++) {
3285 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3286 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3287 		       __func__, idx,
3288 		       pcie_access_log[cur_idx].seqnum,
3289 		       pcie_access_log[cur_idx].is_write,
3290 		       pcie_access_log[cur_idx].addr,
3291 		       pcie_access_log[cur_idx].value);
3292 	}
3293 
3294 	pcie_access_log_seqnum = 0;
3295 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3296 }
3297 #endif
3298 
3299 #ifndef HIF_AHB
3300 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3301 {
3302 	QDF_BUG(0);
3303 	return -EINVAL;
3304 }
3305 
3306 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3307 {
3308 	QDF_BUG(0);
3309 	return -EINVAL;
3310 }
3311 #endif
3312 
3313 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3314 {
3315 	struct ce_tasklet_entry *tasklet_entry = context;
3316 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3317 }
3318 extern const char *ce_name[];
3319 
3320 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3321 {
3322 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3323 
3324 	return pci_scn->ce_msi_irq_num[ce_id];
3325 }
3326 
3327 /* hif_srng_msi_irq_disable() - disable the irq for msi
3328  * @hif_sc: hif context
3329  * @ce_id: which ce to disable copy complete interrupts for
3330  *
3331  * since MSI interrupts are not level based, the system can function
3332  * without disabling these interrupts.  Interrupt mitigation can be
3333  * added here for better system performance.
3334  */
3335 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3336 {
3337 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3338 }
3339 
3340 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3341 {
3342 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3343 }
3344 
3345 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3346 {
3347 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3348 }
3349 
3350 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3351 {
3352 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3353 }
3354 
3355 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3356 {
3357 	int ret;
3358 	int ce_id, irq;
3359 	uint32_t msi_data_start;
3360 	uint32_t msi_data_count;
3361 	uint32_t msi_irq_start;
3362 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3363 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3364 
3365 	/* do wake irq assignment */
3366 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3367 					  &msi_data_count, &msi_data_start,
3368 					  &msi_irq_start);
3369 	if (ret)
3370 		return ret;
3371 
3372 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3373 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3374 			  "wlan_wake_irq", scn);
3375 	if (ret)
3376 		return ret;
3377 
3378 	/* do ce irq assignments */
3379 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3380 					    &msi_data_count, &msi_data_start,
3381 					    &msi_irq_start);
3382 	if (ret)
3383 		goto free_wake_irq;
3384 
3385 	if (ce_srng_based(scn)) {
3386 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3387 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3388 	} else {
3389 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3390 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3391 	}
3392 
3393 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3394 
3395 	/* needs to match the ce_id -> irq data mapping
3396 	 * used in the srng parameter configuration
3397 	 */
3398 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3399 		unsigned int msi_data = (ce_id % msi_data_count) +
3400 			msi_irq_start;
3401 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3402 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3403 			 __func__, ce_id, msi_data, irq,
3404 			 &ce_sc->tasklets[ce_id]);
3405 
3406 		/* implies the ce is also initialized */
3407 		if (!ce_sc->tasklets[ce_id].inited)
3408 			continue;
3409 
3410 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3411 		ret = request_irq(irq, hif_ce_interrupt_handler,
3412 				  IRQF_SHARED,
3413 				  ce_name[ce_id],
3414 				  &ce_sc->tasklets[ce_id]);
3415 		if (ret)
3416 			goto free_irq;
3417 	}
3418 
3419 	return ret;
3420 
3421 free_irq:
3422 	/* the request_irq for the last ce_id failed so skip it. */
3423 	while (ce_id > 0 && ce_id < scn->ce_count) {
3424 		unsigned int msi_data;
3425 
3426 		ce_id--;
3427 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3428 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3429 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3430 	}
3431 
3432 free_wake_irq:
3433 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3434 	scn->wake_irq = 0;
3435 
3436 	return ret;
3437 }
3438 
3439 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3440 {
3441 	int i;
3442 
3443 	for (i = 0; i < hif_ext_group->numirq; i++)
3444 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3445 }
3446 
3447 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3448 {
3449 	int i;
3450 
3451 	for (i = 0; i < hif_ext_group->numirq; i++)
3452 		enable_irq(hif_ext_group->os_irq[i]);
3453 }
3454 
3455 
3456 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3457 			      struct hif_exec_context *hif_ext_group)
3458 {
3459 	int ret = 0;
3460 	int irq = 0;
3461 	int j;
3462 
3463 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3464 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3465 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3466 
3467 	for (j = 0; j < hif_ext_group->numirq; j++) {
3468 		irq = hif_ext_group->irq[j];
3469 
3470 		HIF_DBG("%s: request_irq = %d for grp %d",
3471 			  __func__, irq, hif_ext_group->grp_id);
3472 		ret = request_irq(irq,
3473 				  hif_ext_group_interrupt_handler,
3474 				  IRQF_SHARED, "wlan_EXT_GRP",
3475 				  hif_ext_group);
3476 		if (ret) {
3477 			HIF_ERROR("%s: request_irq failed ret = %d",
3478 				  __func__, ret);
3479 			return -EFAULT;
3480 		}
3481 		hif_ext_group->os_irq[j] = irq;
3482 	}
3483 	hif_ext_group->irq_requested = true;
3484 	return 0;
3485 }
3486 
3487 /**
3488  * hif_configure_irq() - configure interrupt
3489  *
3490  * This function configures interrupt(s)
3491  *
3492  * @sc: PCIe control struct
3493  * @hif_hdl: struct HIF_CE_state
3494  *
3495  * Return: 0 - for success
3496  */
3497 int hif_configure_irq(struct hif_softc *scn)
3498 {
3499 	int ret = 0;
3500 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3501 
3502 	HIF_TRACE("%s: E", __func__);
3503 
3504 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3505 		scn->request_irq_done = false;
3506 		return 0;
3507 	}
3508 
3509 	hif_init_reschedule_tasklet_work(sc);
3510 
3511 	ret = hif_ce_msi_configure_irq(scn);
3512 	if (ret == 0) {
3513 		goto end;
3514 	}
3515 
3516 	switch (scn->target_info.target_type) {
3517 	case TARGET_TYPE_IPQ4019:
3518 		ret = hif_ahb_configure_legacy_irq(sc);
3519 		break;
3520 	case TARGET_TYPE_QCA8074:
3521 	case TARGET_TYPE_QCA8074V2:
3522 		ret = hif_ahb_configure_irq(sc);
3523 		break;
3524 	default:
3525 		ret = hif_pci_configure_legacy_irq(sc);
3526 		break;
3527 	}
3528 	if (ret < 0) {
3529 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3530 			__func__, ret);
3531 		return ret;
3532 	}
3533 end:
3534 	scn->request_irq_done = true;
3535 	return 0;
3536 }
3537 
3538 /**
3539  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3540  * @scn: hif control structure
3541  *
3542  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3543  * stuck at a polling loop in pcie_address_config in FW
3544  *
3545  * Return: none
3546  */
3547 static void hif_trigger_timer_irq(struct hif_softc *scn)
3548 {
3549 	int tmp;
3550 	/* Trigger IRQ on Peregrine/Swift by setting
3551 	 * IRQ Bit of LF_TIMER 0
3552 	 */
3553 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3554 						SOC_LF_TIMER_STATUS0_ADDRESS));
3555 	/* Set Raw IRQ Bit */
3556 	tmp |= 1;
3557 	/* SOC_LF_TIMER_STATUS0 */
3558 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3559 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3560 }
3561 
3562 /**
3563  * hif_target_sync() : ensure the target is ready
3564  * @scn: hif control structure
3565  *
3566  * Informs fw that we plan to use legacy interupts so that
3567  * it can begin booting. Ensures that the fw finishes booting
3568  * before continuing. Should be called before trying to write
3569  * to the targets other registers for the first time.
3570  *
3571  * Return: none
3572  */
3573 static void hif_target_sync(struct hif_softc *scn)
3574 {
3575 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3576 			    PCIE_INTR_ENABLE_ADDRESS),
3577 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3578 	/* read to flush pcie write */
3579 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3580 			PCIE_INTR_ENABLE_ADDRESS));
3581 
3582 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3583 			PCIE_SOC_WAKE_ADDRESS,
3584 			PCIE_SOC_WAKE_V_MASK);
3585 	while (!hif_targ_is_awake(scn, scn->mem))
3586 		;
3587 
3588 	if (HAS_FW_INDICATOR) {
3589 		int wait_limit = 500;
3590 		int fw_ind = 0;
3591 		int retry_count = 0;
3592 		uint32_t target_type = scn->target_info.target_type;
3593 fw_retry:
3594 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3595 		while (1) {
3596 			fw_ind = hif_read32_mb(scn, scn->mem +
3597 					FW_INDICATOR_ADDRESS);
3598 			if (fw_ind & FW_IND_INITIALIZED)
3599 				break;
3600 			if (wait_limit-- < 0)
3601 				break;
3602 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3603 			    PCIE_INTR_ENABLE_ADDRESS),
3604 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3605 			    /* read to flush pcie write */
3606 			(void)hif_read32_mb(scn, scn->mem +
3607 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3608 
3609 			qdf_mdelay(10);
3610 		}
3611 		if (wait_limit < 0) {
3612 			if (target_type == TARGET_TYPE_AR9888 &&
3613 			    retry_count++ < 2) {
3614 				hif_trigger_timer_irq(scn);
3615 				wait_limit = 500;
3616 				goto fw_retry;
3617 			}
3618 			HIF_TRACE("%s: FW signal timed out",
3619 					__func__);
3620 			qdf_assert_always(0);
3621 		} else {
3622 			HIF_TRACE("%s: Got FW signal, retries = %x",
3623 					__func__, 500-wait_limit);
3624 		}
3625 	}
3626 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3627 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3628 }
3629 
3630 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3631 				     struct device *dev)
3632 {
3633 	struct pld_soc_info info;
3634 
3635 	pld_get_soc_info(dev, &info);
3636 	sc->mem = info.v_addr;
3637 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3638 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3639 }
3640 
3641 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3642 				       struct device *dev)
3643 {}
3644 
3645 static bool hif_is_pld_based_target(int device_id)
3646 {
3647 	switch (device_id) {
3648 	case QCA6290_DEVICE_ID:
3649 	case QCA6290_EMULATION_DEVICE_ID:
3650 #ifdef QCA_WIFI_QCA6390
3651 	case QCA6390_DEVICE_ID:
3652 #endif
3653 	case AR6320_DEVICE_ID:
3654 	case QCN7605_DEVICE_ID:
3655 		return true;
3656 	}
3657 	return false;
3658 }
3659 
3660 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3661 					   int device_id)
3662 {
3663 	if (hif_is_pld_based_target(device_id)) {
3664 		sc->hif_enable_pci = hif_enable_pci_pld;
3665 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3666 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3667 	} else {
3668 		sc->hif_enable_pci = hif_enable_pci_nopld;
3669 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3670 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3671 	}
3672 }
3673 
3674 #ifdef HIF_REG_WINDOW_SUPPORT
3675 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3676 					       u32 target_type)
3677 {
3678 	switch (target_type) {
3679 	case TARGET_TYPE_QCN7605:
3680 		sc->use_register_windowing = true;
3681 		qdf_spinlock_create(&sc->register_access_lock);
3682 		sc->register_window = 0;
3683 		break;
3684 	default:
3685 		sc->use_register_windowing = false;
3686 	}
3687 }
3688 #else
3689 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3690 					       u32 target_type)
3691 {
3692 	sc->use_register_windowing = false;
3693 }
3694 #endif
3695 
3696 /**
3697  * hif_enable_bus(): enable bus
3698  *
3699  * This function enables the bus
3700  *
3701  * @ol_sc: soft_sc struct
3702  * @dev: device pointer
3703  * @bdev: bus dev pointer
3704  * bid: bus id pointer
3705  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3706  * Return: QDF_STATUS
3707  */
3708 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3709 			  struct device *dev, void *bdev,
3710 			  const struct hif_bus_id *bid,
3711 			  enum hif_enable_type type)
3712 {
3713 	int ret = 0;
3714 	uint32_t hif_type, target_type;
3715 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3716 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3717 	uint16_t revision_id;
3718 	int probe_again = 0;
3719 	struct pci_dev *pdev = bdev;
3720 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3721 	struct hif_target_info *tgt_info;
3722 
3723 	if (!ol_sc) {
3724 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3725 		return QDF_STATUS_E_NOMEM;
3726 	}
3727 
3728 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3729 		  __func__, hif_get_conparam(ol_sc), id->device);
3730 
3731 	sc->pdev = pdev;
3732 	sc->dev = &pdev->dev;
3733 	sc->devid = id->device;
3734 	sc->cacheline_sz = dma_get_cache_alignment();
3735 	tgt_info = hif_get_target_info_handle(hif_hdl);
3736 	hif_pci_init_deinit_ops_attach(sc, id->device);
3737 	sc->hif_pci_get_soc_info(sc, dev);
3738 again:
3739 	ret = sc->hif_enable_pci(sc, pdev, id);
3740 	if (ret < 0) {
3741 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3742 		       __func__, ret);
3743 		goto err_enable_pci;
3744 	}
3745 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3746 
3747 	/* Temporary FIX: disable ASPM on peregrine.
3748 	 * Will be removed after the OTP is programmed
3749 	 */
3750 	hif_disable_power_gating(hif_hdl);
3751 
3752 	device_disable_async_suspend(&pdev->dev);
3753 	pci_read_config_word(pdev, 0x08, &revision_id);
3754 
3755 	ret = hif_get_device_type(id->device, revision_id,
3756 						&hif_type, &target_type);
3757 	if (ret < 0) {
3758 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3759 		goto err_tgtstate;
3760 	}
3761 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3762 		  __func__, hif_type, target_type);
3763 
3764 	hif_register_tbl_attach(ol_sc, hif_type);
3765 	hif_target_register_tbl_attach(ol_sc, target_type);
3766 
3767 	hif_pci_init_reg_windowing_support(sc, target_type);
3768 
3769 	tgt_info->target_type = target_type;
3770 
3771 	if (ce_srng_based(ol_sc)) {
3772 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3773 	} else {
3774 		ret = hif_pci_probe_tgt_wakeup(sc);
3775 		if (ret < 0) {
3776 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3777 					__func__, ret);
3778 			if (ret == -EAGAIN)
3779 				probe_again++;
3780 			goto err_tgtstate;
3781 		}
3782 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3783 	}
3784 
3785 	if (!ol_sc->mem_pa) {
3786 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3787 		ret = -EIO;
3788 		goto err_tgtstate;
3789 	}
3790 
3791 	if (!ce_srng_based(ol_sc)) {
3792 		hif_target_sync(ol_sc);
3793 
3794 		if (ADRASTEA_BU)
3795 			hif_vote_link_up(hif_hdl);
3796 	}
3797 
3798 	return 0;
3799 
3800 err_tgtstate:
3801 	hif_disable_pci(sc);
3802 	sc->pci_enabled = false;
3803 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3804 	return QDF_STATUS_E_ABORTED;
3805 
3806 err_enable_pci:
3807 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3808 		int delay_time;
3809 
3810 		HIF_INFO("%s: pci reprobe", __func__);
3811 		/* 10, 40, 90, 100, 100, ... */
3812 		delay_time = max(100, 10 * (probe_again * probe_again));
3813 		qdf_mdelay(delay_time);
3814 		goto again;
3815 	}
3816 	return ret;
3817 }
3818 
3819 /**
3820  * hif_pci_irq_enable() - ce_irq_enable
3821  * @scn: hif_softc
3822  * @ce_id: ce_id
3823  *
3824  * Return: void
3825  */
3826 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3827 {
3828 	uint32_t tmp = 1 << ce_id;
3829 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3830 
3831 	qdf_spin_lock_irqsave(&sc->irq_lock);
3832 	scn->ce_irq_summary &= ~tmp;
3833 	if (scn->ce_irq_summary == 0) {
3834 		/* Enable Legacy PCI line interrupts */
3835 		if (LEGACY_INTERRUPTS(sc) &&
3836 			(scn->target_status != TARGET_STATUS_RESET) &&
3837 			(!qdf_atomic_read(&scn->link_suspended))) {
3838 
3839 			hif_write32_mb(scn, scn->mem +
3840 				(SOC_CORE_BASE_ADDRESS |
3841 				PCIE_INTR_ENABLE_ADDRESS),
3842 				HOST_GROUP0_MASK);
3843 
3844 			hif_read32_mb(scn, scn->mem +
3845 					(SOC_CORE_BASE_ADDRESS |
3846 					PCIE_INTR_ENABLE_ADDRESS));
3847 		}
3848 	}
3849 	if (scn->hif_init_done == true)
3850 		Q_TARGET_ACCESS_END(scn);
3851 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3852 
3853 	/* check for missed firmware crash */
3854 	hif_fw_interrupt_handler(0, scn);
3855 }
3856 
3857 /**
3858  * hif_pci_irq_disable() - ce_irq_disable
3859  * @scn: hif_softc
3860  * @ce_id: ce_id
3861  *
3862  * only applicable to legacy copy engine...
3863  *
3864  * Return: void
3865  */
3866 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3867 {
3868 	/* For Rome only need to wake up target */
3869 	/* target access is maintained until interrupts are re-enabled */
3870 	Q_TARGET_ACCESS_BEGIN(scn);
3871 }
3872 
3873 #ifdef FEATURE_RUNTIME_PM
3874 
3875 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
3876 {
3877 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3878 
3879 	if (NULL == sc)
3880 		return;
3881 
3882 	sc->pm_stats.runtime_get++;
3883 	pm_runtime_get_noresume(sc->dev);
3884 }
3885 
3886 /**
3887  * hif_pm_runtime_get() - do a get opperation on the device
3888  *
3889  * A get opperation will prevent a runtime suspend until a
3890  * corresponding put is done.  This api should be used when sending
3891  * data.
3892  *
3893  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3894  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3895  *
3896  * return: success if the bus is up and a get has been issued
3897  *   otherwise an error code.
3898  */
3899 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
3900 {
3901 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3902 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3903 	int ret;
3904 	int pm_state;
3905 
3906 	if (NULL == scn) {
3907 		HIF_ERROR("%s: Could not do runtime get, scn is null",
3908 				__func__);
3909 		return -EFAULT;
3910 	}
3911 
3912 	pm_state = qdf_atomic_read(&sc->pm_state);
3913 
3914 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
3915 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
3916 		sc->pm_stats.runtime_get++;
3917 		ret = __hif_pm_runtime_get(sc->dev);
3918 
3919 		/* Get can return 1 if the device is already active, just return
3920 		 * success in that case
3921 		 */
3922 		if (ret > 0)
3923 			ret = 0;
3924 
3925 		if (ret)
3926 			hif_pm_runtime_put(hif_ctx);
3927 
3928 		if (ret && ret != -EINPROGRESS) {
3929 			sc->pm_stats.runtime_get_err++;
3930 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
3931 				__func__, qdf_atomic_read(&sc->pm_state), ret);
3932 		}
3933 
3934 		return ret;
3935 	}
3936 
3937 	sc->pm_stats.request_resume++;
3938 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
3939 	ret = hif_pm_request_resume(sc->dev);
3940 
3941 	return -EAGAIN;
3942 }
3943 
3944 /**
3945  * hif_pm_runtime_put() - do a put opperation on the device
3946  *
3947  * A put opperation will allow a runtime suspend after a corresponding
3948  * get was done.  This api should be used when sending data.
3949  *
3950  * This api will return a failure if runtime pm is stopped
3951  * This api will return failure if it would decrement the usage count below 0.
3952  *
3953  * return: QDF_STATUS_SUCCESS if the put is performed
3954  */
3955 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
3956 {
3957 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3958 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3959 	int pm_state, usage_count;
3960 	char *error = NULL;
3961 
3962 	if (NULL == scn) {
3963 		HIF_ERROR("%s: Could not do runtime put, scn is null",
3964 				__func__);
3965 		return -EFAULT;
3966 	}
3967 	usage_count = atomic_read(&sc->dev->power.usage_count);
3968 
3969 	if (usage_count == 1) {
3970 		pm_state = qdf_atomic_read(&sc->pm_state);
3971 
3972 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
3973 			error = "Ignoring unexpected put when runtime pm is disabled";
3974 
3975 	} else if (usage_count == 0) {
3976 		error = "PUT Without a Get Operation";
3977 	}
3978 
3979 	if (error) {
3980 		hif_pci_runtime_pm_warn(sc, error);
3981 		return -EINVAL;
3982 	}
3983 
3984 	sc->pm_stats.runtime_put++;
3985 
3986 	hif_pm_runtime_mark_last_busy(sc->dev);
3987 	hif_pm_runtime_put_auto(sc->dev);
3988 
3989 	return 0;
3990 }
3991 
3992 
3993 /**
3994  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
3995  *                                      reason
3996  * @hif_sc: pci context
3997  * @lock: runtime_pm lock being acquired
3998  *
3999  * Return 0 if successful.
4000  */
4001 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4002 		*hif_sc, struct hif_pm_runtime_lock *lock)
4003 {
4004 	int ret = 0;
4005 
4006 	/*
4007 	 * We shouldn't be setting context->timeout to zero here when
4008 	 * context is active as we will have a case where Timeout API's
4009 	 * for the same context called back to back.
4010 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4011 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4012 	 * API to ensure the timeout version is no more active and
4013 	 * list entry of this context will be deleted during allow suspend.
4014 	 */
4015 	if (lock->active)
4016 		return 0;
4017 
4018 	ret = __hif_pm_runtime_get(hif_sc->dev);
4019 
4020 	/**
4021 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4022 	 * RPM_SUSPENDING. Any other negative value is an error.
4023 	 * We shouldn't be do runtime_put here as in later point allow
4024 	 * suspend gets called with the the context and there the usage count
4025 	 * is decremented, so suspend will be prevented.
4026 	 */
4027 
4028 	if (ret < 0 && ret != -EINPROGRESS) {
4029 		hif_sc->pm_stats.runtime_get_err++;
4030 		hif_pci_runtime_pm_warn(hif_sc,
4031 				"Prevent Suspend Runtime PM Error");
4032 	}
4033 
4034 	hif_sc->prevent_suspend_cnt++;
4035 
4036 	lock->active = true;
4037 
4038 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4039 
4040 	hif_sc->pm_stats.prevent_suspend++;
4041 
4042 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4043 		hif_pm_runtime_state_to_string(
4044 			qdf_atomic_read(&hif_sc->pm_state)),
4045 					ret);
4046 
4047 	return ret;
4048 }
4049 
4050 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4051 		struct hif_pm_runtime_lock *lock)
4052 {
4053 	int ret = 0;
4054 	int usage_count;
4055 
4056 	if (hif_sc->prevent_suspend_cnt == 0)
4057 		return ret;
4058 
4059 	if (!lock->active)
4060 		return ret;
4061 
4062 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4063 
4064 	/*
4065 	 * During Driver unload, platform driver increments the usage
4066 	 * count to prevent any runtime suspend getting called.
4067 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4068 	 * usage_count should be one. Ideally this shouldn't happen as
4069 	 * context->active should be active for allow suspend to happen
4070 	 * Handling this case here to prevent any failures.
4071 	 */
4072 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4073 				&& usage_count == 1) || usage_count == 0) {
4074 		hif_pci_runtime_pm_warn(hif_sc,
4075 				"Allow without a prevent suspend");
4076 		return -EINVAL;
4077 	}
4078 
4079 	list_del(&lock->list);
4080 
4081 	hif_sc->prevent_suspend_cnt--;
4082 
4083 	lock->active = false;
4084 	lock->timeout = 0;
4085 
4086 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4087 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4088 
4089 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4090 		hif_pm_runtime_state_to_string(
4091 			qdf_atomic_read(&hif_sc->pm_state)),
4092 					ret);
4093 
4094 	hif_sc->pm_stats.allow_suspend++;
4095 	return ret;
4096 }
4097 
4098 /**
4099  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4100  * @data: calback data that is the pci context
4101  *
4102  * if runtime locks are acquired with a timeout, this function releases
4103  * the locks when the last runtime lock expires.
4104  *
4105  * dummy implementation until lock acquisition is implemented.
4106  */
4107 static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
4108 {
4109 	struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
4110 	unsigned long timer_expires;
4111 	struct hif_pm_runtime_lock *context, *temp;
4112 
4113 	spin_lock_bh(&hif_sc->runtime_lock);
4114 
4115 	timer_expires = hif_sc->runtime_timer_expires;
4116 
4117 	/* Make sure we are not called too early, this should take care of
4118 	 * following case
4119 	 *
4120 	 * CPU0                         CPU1 (timeout function)
4121 	 * ----                         ----------------------
4122 	 * spin_lock_irq
4123 	 *                              timeout function called
4124 	 *
4125 	 * mod_timer()
4126 	 *
4127 	 * spin_unlock_irq
4128 	 *                              spin_lock_irq
4129 	 */
4130 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4131 		hif_sc->runtime_timer_expires = 0;
4132 		list_for_each_entry_safe(context, temp,
4133 				&hif_sc->prevent_suspend_list, list) {
4134 			if (context->timeout) {
4135 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4136 				hif_sc->pm_stats.allow_suspend_timeout++;
4137 			}
4138 		}
4139 	}
4140 
4141 	spin_unlock_bh(&hif_sc->runtime_lock);
4142 }
4143 
4144 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4145 		struct hif_pm_runtime_lock *data)
4146 {
4147 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4148 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4149 	struct hif_pm_runtime_lock *context = data;
4150 
4151 	if (!sc->hif_config.enable_runtime_pm)
4152 		return 0;
4153 
4154 	if (!context)
4155 		return -EINVAL;
4156 
4157 	if (in_irq())
4158 		WARN_ON(1);
4159 
4160 	spin_lock_bh(&hif_sc->runtime_lock);
4161 	context->timeout = 0;
4162 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4163 	spin_unlock_bh(&hif_sc->runtime_lock);
4164 
4165 	return 0;
4166 }
4167 
4168 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4169 				struct hif_pm_runtime_lock *data)
4170 {
4171 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4172 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4173 	struct hif_pm_runtime_lock *context = data;
4174 
4175 	if (!sc->hif_config.enable_runtime_pm)
4176 		return 0;
4177 
4178 	if (!context)
4179 		return -EINVAL;
4180 
4181 	if (in_irq())
4182 		WARN_ON(1);
4183 
4184 	spin_lock_bh(&hif_sc->runtime_lock);
4185 
4186 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4187 
4188 	/* The list can be empty as well in cases where
4189 	 * we have one context in the list and the allow
4190 	 * suspend came before the timer expires and we delete
4191 	 * context above from the list.
4192 	 * When list is empty prevent_suspend count will be zero.
4193 	 */
4194 	if (hif_sc->prevent_suspend_cnt == 0 &&
4195 			hif_sc->runtime_timer_expires > 0) {
4196 		del_timer(&hif_sc->runtime_timer);
4197 		hif_sc->runtime_timer_expires = 0;
4198 	}
4199 
4200 	spin_unlock_bh(&hif_sc->runtime_lock);
4201 
4202 	return 0;
4203 }
4204 
4205 /**
4206  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4207  * @ol_sc: HIF context
4208  * @lock: which lock is being acquired
4209  * @delay: Timeout in milliseconds
4210  *
4211  * Prevent runtime suspend with a timeout after which runtime suspend would be
4212  * allowed. This API uses a single timer to allow the suspend and timer is
4213  * modified if the timeout is changed before timer fires.
4214  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4215  * of starting the timer.
4216  *
4217  * It is wise to try not to use this API and correct the design if possible.
4218  *
4219  * Return: 0 on success and negative error code on failure
4220  */
4221 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4222 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4223 {
4224 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4225 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4226 
4227 	int ret = 0;
4228 	unsigned long expires;
4229 	struct hif_pm_runtime_lock *context = lock;
4230 
4231 	if (hif_is_load_or_unload_in_progress(sc)) {
4232 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4233 				__func__);
4234 		return -EINVAL;
4235 	}
4236 
4237 	if (hif_is_recovery_in_progress(sc)) {
4238 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4239 		return -EINVAL;
4240 	}
4241 
4242 	if (!sc->hif_config.enable_runtime_pm)
4243 		return 0;
4244 
4245 	if (!context)
4246 		return -EINVAL;
4247 
4248 	if (in_irq())
4249 		WARN_ON(1);
4250 
4251 	/*
4252 	 * Don't use internal timer if the timeout is less than auto suspend
4253 	 * delay.
4254 	 */
4255 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4256 		hif_pm_request_resume(hif_sc->dev);
4257 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4258 		return ret;
4259 	}
4260 
4261 	expires = jiffies + msecs_to_jiffies(delay);
4262 	expires += !expires;
4263 
4264 	spin_lock_bh(&hif_sc->runtime_lock);
4265 
4266 	context->timeout = delay;
4267 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4268 	hif_sc->pm_stats.prevent_suspend_timeout++;
4269 
4270 	/* Modify the timer only if new timeout is after already configured
4271 	 * timeout
4272 	 */
4273 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4274 		mod_timer(&hif_sc->runtime_timer, expires);
4275 		hif_sc->runtime_timer_expires = expires;
4276 	}
4277 
4278 	spin_unlock_bh(&hif_sc->runtime_lock);
4279 
4280 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4281 		hif_pm_runtime_state_to_string(
4282 			qdf_atomic_read(&hif_sc->pm_state)),
4283 					delay, ret);
4284 
4285 	return ret;
4286 }
4287 
4288 /**
4289  * hif_runtime_lock_init() - API to initialize Runtime PM context
4290  * @name: Context name
4291  *
4292  * This API initializes the Runtime PM context of the caller and
4293  * return the pointer.
4294  *
4295  * Return: None
4296  */
4297 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4298 {
4299 	struct hif_pm_runtime_lock *context;
4300 
4301 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4302 
4303 	context = qdf_mem_malloc(sizeof(*context));
4304 	if (!context) {
4305 		HIF_ERROR("%s: No memory for Runtime PM wakelock context",
4306 			  __func__);
4307 		return -ENOMEM;
4308 	}
4309 
4310 	context->name = name ? name : "Default";
4311 	lock->lock = context;
4312 
4313 	return 0;
4314 }
4315 
4316 /**
4317  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4318  * @data: Runtime PM context
4319  *
4320  * Return: void
4321  */
4322 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4323 			     struct hif_pm_runtime_lock *data)
4324 {
4325 	struct hif_pm_runtime_lock *context = data;
4326 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4327 
4328 	if (!context) {
4329 		HIF_ERROR("Runtime PM wakelock context is NULL");
4330 		return;
4331 	}
4332 
4333 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4334 
4335 	/*
4336 	 * Ensure to delete the context list entry and reduce the usage count
4337 	 * before freeing the context if context is active.
4338 	 */
4339 	if (sc) {
4340 		spin_lock_bh(&sc->runtime_lock);
4341 		__hif_pm_runtime_allow_suspend(sc, context);
4342 		spin_unlock_bh(&sc->runtime_lock);
4343 	}
4344 
4345 	qdf_mem_free(context);
4346 }
4347 #endif /* FEATURE_RUNTIME_PM */
4348 
4349 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4350 {
4351 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4352 
4353 	/* legacy case only has one irq */
4354 	return pci_scn->irq;
4355 }
4356 
4357 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4358 {
4359 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4360 	struct hif_target_info *tgt_info;
4361 
4362 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4363 
4364 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4365 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
4366 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4367 		/*
4368 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4369 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4370 		 * well initialized/defined.
4371 		 */
4372 		return 0;
4373 	}
4374 
4375 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4376 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4377 		return 0;
4378 	}
4379 
4380 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%lx (max 0x%zx)\n",
4381 		 offset, offset + sizeof(unsigned int), sc->mem_len);
4382 
4383 	return -EINVAL;
4384 }
4385 
4386 /**
4387  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4388  * @scn: hif context
4389  *
4390  * Return: true if soc needs driver bmi otherwise false
4391  */
4392 bool hif_pci_needs_bmi(struct hif_softc *scn)
4393 {
4394 	return !ce_srng_based(scn);
4395 }
4396