xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision dd4dc88b837a295134aa9869114a2efee0f4894b)
1 /*
2  * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 /*
66  * Top-level interrupt handler for all PCI interrupts from a Target.
67  * When a block of MSI interrupts is allocated, this top-level handler
68  * is not used; instead, we directly call the correct sub-handler.
69  */
70 struct ce_irq_reg_table {
71 	uint32_t irq_enable;
72 	uint32_t irq_status;
73 };
74 
75 #ifndef QCA_WIFI_3_0_ADRASTEA
76 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
77 {
78 }
79 #else
80 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
83 	unsigned int target_enable0, target_enable1;
84 	unsigned int target_cause0, target_cause1;
85 
86 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
87 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
88 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
89 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
90 
91 	if ((target_enable0 & target_cause0) ||
92 	    (target_enable1 & target_cause1)) {
93 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
94 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
95 
96 		if (scn->notice_send)
97 			pld_intr_notify_q6(sc->dev);
98 	}
99 }
100 #endif
101 
102 
103 /**
104  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
105  * @scn: scn
106  *
107  * Return: N/A
108  */
109 static void pci_dispatch_interrupt(struct hif_softc *scn)
110 {
111 	uint32_t intr_summary;
112 	int id;
113 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
114 
115 	if (scn->hif_init_done != true)
116 		return;
117 
118 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
119 		return;
120 
121 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
122 
123 	if (intr_summary == 0) {
124 		if ((scn->target_status != TARGET_STATUS_RESET) &&
125 			(!qdf_atomic_read(&scn->link_suspended))) {
126 
127 			hif_write32_mb(scn, scn->mem +
128 				(SOC_CORE_BASE_ADDRESS |
129 				PCIE_INTR_ENABLE_ADDRESS),
130 				HOST_GROUP0_MASK);
131 
132 			hif_read32_mb(scn, scn->mem +
133 					(SOC_CORE_BASE_ADDRESS |
134 					PCIE_INTR_ENABLE_ADDRESS));
135 		}
136 		Q_TARGET_ACCESS_END(scn);
137 		return;
138 	}
139 	Q_TARGET_ACCESS_END(scn);
140 
141 	scn->ce_irq_summary = intr_summary;
142 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
143 		if (intr_summary & (1 << id)) {
144 			intr_summary &= ~(1 << id);
145 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
146 		}
147 	}
148 }
149 
150 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
151 {
152 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
153 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
154 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
155 
156 	volatile int tmp;
157 	uint16_t val = 0;
158 	uint32_t bar0 = 0;
159 	uint32_t fw_indicator_address, fw_indicator;
160 	bool ssr_irq = false;
161 	unsigned int host_cause, host_enable;
162 
163 	if (LEGACY_INTERRUPTS(sc)) {
164 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
165 			return IRQ_HANDLED;
166 
167 		if (ADRASTEA_BU) {
168 			host_enable = hif_read32_mb(sc, sc->mem +
169 						    PCIE_INTR_ENABLE_ADDRESS);
170 			host_cause = hif_read32_mb(sc, sc->mem +
171 						   PCIE_INTR_CAUSE_ADDRESS);
172 			if (!(host_enable & host_cause)) {
173 				hif_pci_route_adrastea_interrupt(sc);
174 				return IRQ_HANDLED;
175 			}
176 		}
177 
178 		/* Clear Legacy PCI line interrupts
179 		 * IMPORTANT: INTR_CLR regiser has to be set
180 		 * after INTR_ENABLE is set to 0,
181 		 * otherwise interrupt can not be really cleared
182 		 */
183 		hif_write32_mb(sc, sc->mem +
184 			      (SOC_CORE_BASE_ADDRESS |
185 			       PCIE_INTR_ENABLE_ADDRESS), 0);
186 
187 		hif_write32_mb(sc, sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
189 			       ADRASTEA_BU ?
190 			       (host_enable & host_cause) :
191 			      HOST_GROUP0_MASK);
192 
193 		if (ADRASTEA_BU)
194 			hif_write32_mb(sc, sc->mem + 0x2f100c,
195 				       (host_cause >> 1));
196 
197 		/* IMPORTANT: this extra read transaction is required to
198 		 * flush the posted write buffer
199 		 */
200 		if (!ADRASTEA_BU) {
201 		tmp =
202 			hif_read32_mb(sc, sc->mem +
203 				     (SOC_CORE_BASE_ADDRESS |
204 				      PCIE_INTR_ENABLE_ADDRESS));
205 
206 		if (tmp == 0xdeadbeef) {
207 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
208 			       __func__);
209 
210 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
211 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
212 			       __func__, val);
213 
214 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
215 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
216 			       __func__, val);
217 
218 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
219 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
220 			       val);
221 
222 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
223 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
224 			       val);
225 
226 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
227 					      &bar0);
228 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
229 			       bar0);
230 
231 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
232 				  __func__,
233 				  hif_read32_mb(sc, sc->mem +
234 						PCIE_LOCAL_BASE_ADDRESS
235 						+ RTC_STATE_ADDRESS));
236 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
237 				  __func__,
238 				  hif_read32_mb(sc, sc->mem +
239 						PCIE_LOCAL_BASE_ADDRESS
240 						+ PCIE_SOC_WAKE_ADDRESS));
241 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
242 				  __func__,
243 				  hif_read32_mb(sc, sc->mem + 0x80008),
244 				  hif_read32_mb(sc, sc->mem + 0x8000c));
245 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
246 				  __func__,
247 				  hif_read32_mb(sc, sc->mem + 0x80010),
248 				  hif_read32_mb(sc, sc->mem + 0x80014));
249 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
250 				  __func__,
251 				  hif_read32_mb(sc, sc->mem + 0x80018),
252 				  hif_read32_mb(sc, sc->mem + 0x8001c));
253 			QDF_BUG(0);
254 		}
255 
256 		PCI_CLR_CAUSE0_REGISTER(sc);
257 		}
258 
259 		if (HAS_FW_INDICATOR) {
260 			fw_indicator_address = hif_state->fw_indicator_address;
261 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
262 			if ((fw_indicator != ~0) &&
263 			   (fw_indicator & FW_IND_EVENT_PENDING))
264 				ssr_irq = true;
265 		}
266 
267 		if (Q_TARGET_ACCESS_END(scn) < 0)
268 			return IRQ_HANDLED;
269 	}
270 	/* TBDXXX: Add support for WMAC */
271 
272 	if (ssr_irq) {
273 		sc->irq_event = irq;
274 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
275 
276 		qdf_atomic_inc(&scn->active_tasklet_cnt);
277 		tasklet_schedule(&sc->intr_tq);
278 	} else {
279 		pci_dispatch_interrupt(scn);
280 	}
281 
282 	return IRQ_HANDLED;
283 }
284 
285 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
286 {
287 	return 1;               /* FIX THIS */
288 }
289 
290 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
291 {
292 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
293 	int i = 0;
294 
295 	if (!irq || !size) {
296 		return -EINVAL;
297 	}
298 
299 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
300 		irq[0] = sc->irq;
301 		return 1;
302 	}
303 
304 	if (sc->num_msi_intrs > size) {
305 		qdf_print("Not enough space in irq buffer to return irqs");
306 		return -EINVAL;
307 	}
308 
309 	for (i = 0; i < sc->num_msi_intrs; i++) {
310 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
311 	}
312 
313 	return sc->num_msi_intrs;
314 }
315 
316 
317 /**
318  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
319  * @scn: hif_softc
320  *
321  * Return: void
322  */
323 #if CONFIG_ATH_PCIE_MAX_PERF == 0
324 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
325 {
326 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
327 	A_target_id_t pci_addr = scn->mem;
328 
329 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
330 	/*
331 	 * If the deferred sleep timer is running cancel it
332 	 * and put the soc into sleep.
333 	 */
334 	if (hif_state->fake_sleep == true) {
335 		qdf_timer_stop(&hif_state->sleep_timer);
336 		if (hif_state->verified_awake == false) {
337 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
338 				      PCIE_SOC_WAKE_ADDRESS,
339 				      PCIE_SOC_WAKE_RESET);
340 		}
341 		hif_state->fake_sleep = false;
342 	}
343 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
344 }
345 #else
346 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
347 {
348 }
349 #endif
350 
351 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
352 	hif_read32_mb(sc, (char *)(mem) + \
353 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
354 
355 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
356 	hif_write32_mb(sc, ((char *)(mem) + \
357 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
358 
359 #ifdef QCA_WIFI_3_0
360 /**
361  * hif_targ_is_awake() - check to see if the target is awake
362  * @hif_ctx: hif context
363  *
364  * emulation never goes to sleep
365  *
366  * Return: true if target is awake
367  */
368 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
369 {
370 	return true;
371 }
372 #else
373 /**
374  * hif_targ_is_awake() - check to see if the target is awake
375  * @hif_ctx: hif context
376  *
377  * Return: true if the targets clocks are on
378  */
379 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
380 {
381 	uint32_t val;
382 
383 	if (scn->recovery)
384 		return false;
385 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
386 		+ RTC_STATE_ADDRESS);
387 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
388 }
389 #endif
390 
391 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
392 static void hif_pci_device_reset(struct hif_pci_softc *sc)
393 {
394 	void __iomem *mem = sc->mem;
395 	int i;
396 	uint32_t val;
397 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
398 
399 	if (!scn->hostdef)
400 		return;
401 
402 	/* NB: Don't check resetok here.  This form of reset
403 	 * is integral to correct operation.
404 	 */
405 
406 	if (!SOC_GLOBAL_RESET_ADDRESS)
407 		return;
408 
409 	if (!mem)
410 		return;
411 
412 	HIF_ERROR("%s: Reset Device", __func__);
413 
414 	/*
415 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
416 	 * writing WAKE_V, the Target may scribble over Host memory!
417 	 */
418 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
419 			       PCIE_SOC_WAKE_V_MASK);
420 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
421 		if (hif_targ_is_awake(scn, mem))
422 			break;
423 
424 		qdf_mdelay(1);
425 	}
426 
427 	/* Put Target, including PCIe, into RESET. */
428 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
429 	val |= 1;
430 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
431 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
432 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
433 		    RTC_STATE_COLD_RESET_MASK)
434 			break;
435 
436 		qdf_mdelay(1);
437 	}
438 
439 	/* Pull Target, including PCIe, out of RESET. */
440 	val &= ~1;
441 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
442 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
443 		if (!
444 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
445 		     RTC_STATE_COLD_RESET_MASK))
446 			break;
447 
448 		qdf_mdelay(1);
449 	}
450 
451 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
452 			       PCIE_SOC_WAKE_RESET);
453 }
454 
455 /* CPU warm reset function
456  * Steps:
457  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
458  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
459  *    correctly on WARM reset
460  * 3. Clear TARGET CPU LF timer interrupt
461  * 4. Reset all CEs to clear any pending CE tarnsactions
462  * 5. Warm reset CPU
463  */
464 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
465 {
466 	void __iomem *mem = sc->mem;
467 	int i;
468 	uint32_t val;
469 	uint32_t fw_indicator;
470 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
471 
472 	/* NB: Don't check resetok here.  This form of reset is
473 	 * integral to correct operation.
474 	 */
475 
476 	if (!mem)
477 		return;
478 
479 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
480 
481 	/*
482 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
483 	 * writing WAKE_V, the Target may scribble over Host memory!
484 	 */
485 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
486 			       PCIE_SOC_WAKE_V_MASK);
487 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
488 		if (hif_targ_is_awake(scn, mem))
489 			break;
490 		qdf_mdelay(1);
491 	}
492 
493 	/*
494 	 * Disable Pending interrupts
495 	 */
496 	val =
497 		hif_read32_mb(sc, mem +
498 			     (SOC_CORE_BASE_ADDRESS |
499 			      PCIE_INTR_CAUSE_ADDRESS));
500 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
501 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
502 	/* Target CPU Intr Cause */
503 	val = hif_read32_mb(sc, mem +
504 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
505 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
506 
507 	val =
508 		hif_read32_mb(sc, mem +
509 			     (SOC_CORE_BASE_ADDRESS |
510 			      PCIE_INTR_ENABLE_ADDRESS));
511 	hif_write32_mb(sc, (mem +
512 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
513 	hif_write32_mb(sc, (mem +
514 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
515 		       HOST_GROUP0_MASK);
516 
517 	qdf_mdelay(100);
518 
519 	/* Clear FW_INDICATOR_ADDRESS */
520 	if (HAS_FW_INDICATOR) {
521 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
522 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
523 	}
524 
525 	/* Clear Target LF Timer interrupts */
526 	val =
527 		hif_read32_mb(sc, mem +
528 			     (RTC_SOC_BASE_ADDRESS +
529 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
530 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
531 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
532 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
533 	hif_write32_mb(sc, mem +
534 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
535 		      val);
536 
537 	/* Reset CE */
538 	val =
539 		hif_read32_mb(sc, mem +
540 			     (RTC_SOC_BASE_ADDRESS |
541 			      SOC_RESET_CONTROL_ADDRESS));
542 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
543 	hif_write32_mb(sc, (mem +
544 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
545 		      val);
546 	val =
547 		hif_read32_mb(sc, mem +
548 			     (RTC_SOC_BASE_ADDRESS |
549 			      SOC_RESET_CONTROL_ADDRESS));
550 	qdf_mdelay(10);
551 
552 	/* CE unreset */
553 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
554 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
555 		       SOC_RESET_CONTROL_ADDRESS), val);
556 	val =
557 		hif_read32_mb(sc, mem +
558 			     (RTC_SOC_BASE_ADDRESS |
559 			      SOC_RESET_CONTROL_ADDRESS));
560 	qdf_mdelay(10);
561 
562 	/* Read Target CPU Intr Cause */
563 	val = hif_read32_mb(sc, mem +
564 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
565 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
566 		    __func__, val);
567 
568 	/* CPU warm RESET */
569 	val =
570 		hif_read32_mb(sc, mem +
571 			     (RTC_SOC_BASE_ADDRESS |
572 			      SOC_RESET_CONTROL_ADDRESS));
573 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
574 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
575 		       SOC_RESET_CONTROL_ADDRESS), val);
576 	val =
577 		hif_read32_mb(sc, mem +
578 			     (RTC_SOC_BASE_ADDRESS |
579 			      SOC_RESET_CONTROL_ADDRESS));
580 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
581 		    __func__, val);
582 
583 	qdf_mdelay(100);
584 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
585 
586 }
587 
588 #ifndef QCA_WIFI_3_0
589 /* only applicable to legacy ce */
590 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
591 {
592 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
593 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
594 	void __iomem *mem = sc->mem;
595 	uint32_t val;
596 
597 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
598 		return ATH_ISR_NOSCHED;
599 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
600 	if (Q_TARGET_ACCESS_END(scn) < 0)
601 		return ATH_ISR_SCHED;
602 
603 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
604 
605 	if (val & FW_IND_HELPER)
606 		return 0;
607 
608 	return 1;
609 }
610 #endif
611 
612 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
613 {
614 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
615 	uint16_t device_id = 0;
616 	uint32_t val;
617 	uint16_t timeout_count = 0;
618 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
619 
620 	/* Check device ID from PCIe configuration space for link status */
621 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
622 	if (device_id != sc->devid) {
623 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
624 			  __func__, device_id, sc->devid);
625 		return -EACCES;
626 	}
627 
628 	/* Check PCIe local register for bar/memory access */
629 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
630 			   RTC_STATE_ADDRESS);
631 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
632 
633 	/* Try to wake up taget if it sleeps */
634 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
635 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
636 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
637 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
638 		PCIE_SOC_WAKE_ADDRESS));
639 
640 	/* Check if taget can be woken up */
641 	while (!hif_targ_is_awake(scn, sc->mem)) {
642 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
643 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
644 				__func__,
645 				hif_read32_mb(sc, sc->mem +
646 					     PCIE_LOCAL_BASE_ADDRESS +
647 					     RTC_STATE_ADDRESS),
648 				hif_read32_mb(sc, sc->mem +
649 					     PCIE_LOCAL_BASE_ADDRESS +
650 					PCIE_SOC_WAKE_ADDRESS));
651 			return -EACCES;
652 		}
653 
654 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
655 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
656 
657 		qdf_mdelay(100);
658 		timeout_count += 100;
659 	}
660 
661 	/* Check Power register for SoC internal bus issues */
662 	val =
663 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
664 			     SOC_POWER_REG_OFFSET);
665 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
666 
667 	return 0;
668 }
669 
670 /**
671  * __hif_pci_dump_registers(): dump other PCI debug registers
672  * @scn: struct hif_softc
673  *
674  * This function dumps pci debug registers.  The parrent function
675  * dumps the copy engine registers before calling this function.
676  *
677  * Return: void
678  */
679 static void __hif_pci_dump_registers(struct hif_softc *scn)
680 {
681 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
682 	void __iomem *mem = sc->mem;
683 	uint32_t val, i, j;
684 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
685 	uint32_t ce_base;
686 
687 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
688 		return;
689 
690 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
691 	val =
692 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
693 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
694 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
695 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
696 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
697 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
698 
699 	/* DEBUG_CONTROL_ENABLE = 0x1 */
700 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
701 			   WLAN_DEBUG_CONTROL_OFFSET);
702 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
703 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
704 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
705 		      WLAN_DEBUG_CONTROL_OFFSET, val);
706 
707 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
708 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
709 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
710 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
711 			    WLAN_DEBUG_CONTROL_OFFSET));
712 
713 	HIF_INFO_MED("%s: Debug CE", __func__);
714 	/* Loop CE debug output */
715 	/* AMBA_DEBUG_BUS_SEL = 0xc */
716 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
717 			    AMBA_DEBUG_BUS_OFFSET);
718 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
719 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
720 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
721 		       val);
722 
723 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
724 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
725 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
726 				   CE_WRAPPER_DEBUG_OFFSET);
727 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
728 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
729 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
730 			      CE_WRAPPER_DEBUG_OFFSET, val);
731 
732 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
733 			    __func__, wrapper_idx[i],
734 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
735 				AMBA_DEBUG_BUS_OFFSET),
736 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
737 				CE_WRAPPER_DEBUG_OFFSET));
738 
739 		if (wrapper_idx[i] <= 7) {
740 			for (j = 0; j <= 5; j++) {
741 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
742 				/* For (j=0~5) write CE_DEBUG_SEL = j */
743 				val =
744 					hif_read32_mb(sc, mem + ce_base +
745 						     CE_DEBUG_OFFSET);
746 				val &= ~CE_DEBUG_SEL_MASK;
747 				val |= CE_DEBUG_SEL_SET(j);
748 				hif_write32_mb(sc, mem + ce_base +
749 					       CE_DEBUG_OFFSET, val);
750 
751 				/* read (@gpio_athr_wlan_reg)
752 				 * WLAN_DEBUG_OUT_DATA
753 				 */
754 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
755 						    + WLAN_DEBUG_OUT_OFFSET);
756 				val = WLAN_DEBUG_OUT_DATA_GET(val);
757 
758 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
759 					    __func__, j,
760 					    hif_read32_mb(sc, mem + ce_base +
761 						    CE_DEBUG_OFFSET), val);
762 			}
763 		} else {
764 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
765 			val =
766 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
767 					     WLAN_DEBUG_OUT_OFFSET);
768 			val = WLAN_DEBUG_OUT_DATA_GET(val);
769 
770 			HIF_INFO_MED("%s: out: %x", __func__, val);
771 		}
772 	}
773 
774 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
775 	/* Loop PCIe debug output */
776 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
777 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
778 			    AMBA_DEBUG_BUS_OFFSET);
779 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
780 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
781 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
782 		       AMBA_DEBUG_BUS_OFFSET, val);
783 
784 	for (i = 0; i <= 8; i++) {
785 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
786 		val =
787 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
788 				     AMBA_DEBUG_BUS_OFFSET);
789 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
790 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
791 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
792 			       AMBA_DEBUG_BUS_OFFSET, val);
793 
794 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
795 		val =
796 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
797 				     WLAN_DEBUG_OUT_OFFSET);
798 		val = WLAN_DEBUG_OUT_DATA_GET(val);
799 
800 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
801 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
802 				    WLAN_DEBUG_OUT_OFFSET), val,
803 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
804 				    WLAN_DEBUG_OUT_OFFSET));
805 	}
806 
807 	Q_TARGET_ACCESS_END(scn);
808 }
809 
810 /**
811  * hif_dump_registers(): dump bus debug registers
812  * @scn: struct hif_opaque_softc
813  *
814  * This function dumps hif bus debug registers
815  *
816  * Return: 0 for success or error code
817  */
818 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
819 {
820 	int status;
821 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
822 
823 	status = hif_dump_ce_registers(scn);
824 
825 	if (status)
826 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
827 
828 	/* dump non copy engine pci registers */
829 	__hif_pci_dump_registers(scn);
830 
831 	return 0;
832 }
833 
834 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
835 
836 /* worker thread to schedule wlan_tasklet in SLUB debug build */
837 static void reschedule_tasklet_work_handler(void *arg)
838 {
839 	struct hif_pci_softc *sc = arg;
840 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
841 
842 	if (!scn) {
843 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
844 		return;
845 	}
846 
847 	if (scn->hif_init_done == false) {
848 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
849 		return;
850 	}
851 
852 	tasklet_schedule(&sc->intr_tq);
853 }
854 
855 /**
856  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
857  * work
858  * @sc: HIF PCI Context
859  *
860  * Return: void
861  */
862 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
863 {
864 	qdf_create_work(0, &sc->reschedule_tasklet_work,
865 				reschedule_tasklet_work_handler, NULL);
866 }
867 #else
868 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
869 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
870 
871 void wlan_tasklet(unsigned long data)
872 {
873 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
874 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
875 
876 	if (scn->hif_init_done == false)
877 		goto end;
878 
879 	if (qdf_atomic_read(&scn->link_suspended))
880 		goto end;
881 
882 	if (!ADRASTEA_BU) {
883 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
884 		if (scn->target_status == TARGET_STATUS_RESET)
885 			goto end;
886 	}
887 
888 end:
889 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
890 	qdf_atomic_dec(&scn->active_tasklet_cnt);
891 }
892 
893 #ifdef FEATURE_RUNTIME_PM
894 static const char *hif_pm_runtime_state_to_string(uint32_t state)
895 {
896 	switch (state) {
897 	case HIF_PM_RUNTIME_STATE_NONE:
898 		return "INIT_STATE";
899 	case HIF_PM_RUNTIME_STATE_ON:
900 		return "ON";
901 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
902 		return "INPROGRESS";
903 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
904 		return "SUSPENDED";
905 	default:
906 		return "INVALID STATE";
907 	}
908 }
909 
910 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
911 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
912 /**
913  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
914  * @sc: hif_pci_softc context
915  * @msg: log message
916  *
917  * log runtime pm stats when something seems off.
918  *
919  * Return: void
920  */
921 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
922 {
923 	struct hif_pm_runtime_lock *ctx;
924 
925 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
926 			msg, atomic_read(&sc->dev->power.usage_count),
927 			hif_pm_runtime_state_to_string(
928 					atomic_read(&sc->pm_state)),
929 			sc->prevent_suspend_cnt);
930 
931 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
932 			sc->dev->power.runtime_status,
933 			sc->dev->power.runtime_error,
934 			sc->dev->power.disable_depth,
935 			sc->dev->power.autosuspend_delay);
936 
937 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
938 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
939 			sc->pm_stats.request_resume);
940 
941 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
942 			sc->pm_stats.allow_suspend,
943 			sc->pm_stats.prevent_suspend);
944 
945 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
946 			sc->pm_stats.prevent_suspend_timeout,
947 			sc->pm_stats.allow_suspend_timeout);
948 
949 	HIF_ERROR("Suspended: %u, resumed: %u count",
950 			sc->pm_stats.suspended,
951 			sc->pm_stats.resumed);
952 
953 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
954 			sc->pm_stats.suspend_err,
955 			sc->pm_stats.runtime_get_err);
956 
957 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
958 
959 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
960 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
961 	}
962 
963 	WARN_ON(1);
964 }
965 
966 /**
967  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
968  * @s: file to print to
969  * @data: unused
970  *
971  * debugging tool added to the debug fs for displaying runtimepm stats
972  *
973  * Return: 0
974  */
975 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
976 {
977 	struct hif_pci_softc *sc = s->private;
978 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
979 		"SUSPENDED"};
980 	unsigned int msecs_age;
981 	int pm_state = atomic_read(&sc->pm_state);
982 	unsigned long timer_expires;
983 	struct hif_pm_runtime_lock *ctx;
984 
985 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
986 			autopm_state[pm_state]);
987 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
988 			sc->pm_stats.last_resume_caller);
989 
990 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
991 		msecs_age = jiffies_to_msecs(
992 				jiffies - sc->pm_stats.suspend_jiffies);
993 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
994 				msecs_age / 1000, msecs_age % 1000);
995 	}
996 
997 	seq_printf(s, "%30s: %d\n", "PM Usage count",
998 			atomic_read(&sc->dev->power.usage_count));
999 
1000 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1001 			sc->prevent_suspend_cnt);
1002 
1003 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1004 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1005 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1006 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1007 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1008 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1009 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1010 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1011 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1012 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1013 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1014 
1015 	timer_expires = sc->runtime_timer_expires;
1016 	if (timer_expires > 0) {
1017 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1018 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1019 				msecs_age / 1000, msecs_age % 1000);
1020 	}
1021 
1022 	spin_lock_bh(&sc->runtime_lock);
1023 	if (list_empty(&sc->prevent_suspend_list)) {
1024 		spin_unlock_bh(&sc->runtime_lock);
1025 		return 0;
1026 	}
1027 
1028 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1029 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1030 		seq_printf(s, "%s", ctx->name);
1031 		if (ctx->timeout)
1032 			seq_printf(s, "(%d ms)", ctx->timeout);
1033 		seq_puts(s, " ");
1034 	}
1035 	seq_puts(s, "\n");
1036 	spin_unlock_bh(&sc->runtime_lock);
1037 
1038 	return 0;
1039 }
1040 #undef HIF_PCI_RUNTIME_PM_STATS
1041 
1042 /**
1043  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1044  * @inode
1045  * @file
1046  *
1047  * Return: linux error code of single_open.
1048  */
1049 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1050 {
1051 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1052 			inode->i_private);
1053 }
1054 
1055 static const struct file_operations hif_pci_runtime_pm_fops = {
1056 	.owner          = THIS_MODULE,
1057 	.open           = hif_pci_runtime_pm_open,
1058 	.release        = single_release,
1059 	.read           = seq_read,
1060 	.llseek         = seq_lseek,
1061 };
1062 
1063 /**
1064  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1065  * @sc: pci context
1066  *
1067  * creates a debugfs entry to debug the runtime pm feature.
1068  */
1069 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1070 {
1071 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1072 					0400, NULL, sc,
1073 					&hif_pci_runtime_pm_fops);
1074 }
1075 
1076 /**
1077  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1078  * @sc: pci context
1079  *
1080  * removes the debugfs entry to debug the runtime pm feature.
1081  */
1082 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1083 {
1084 	debugfs_remove(sc->pm_dentry);
1085 }
1086 
1087 static void hif_runtime_init(struct device *dev, int delay)
1088 {
1089 	pm_runtime_set_autosuspend_delay(dev, delay);
1090 	pm_runtime_use_autosuspend(dev);
1091 	pm_runtime_allow(dev);
1092 	pm_runtime_mark_last_busy(dev);
1093 	pm_runtime_put_noidle(dev);
1094 	pm_suspend_ignore_children(dev, true);
1095 }
1096 
1097 static void hif_runtime_exit(struct device *dev)
1098 {
1099 	pm_runtime_get_noresume(dev);
1100 	pm_runtime_set_active(dev);
1101 }
1102 
1103 static void hif_pm_runtime_lock_timeout_fn(void *data);
1104 
1105 /**
1106  * hif_pm_runtime_start(): start the runtime pm
1107  * @sc: pci context
1108  *
1109  * After this call, runtime pm will be active.
1110  */
1111 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1112 {
1113 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1114 	uint32_t mode = hif_get_conparam(ol_sc);
1115 
1116 	if (!ol_sc->hif_config.enable_runtime_pm) {
1117 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1118 		return;
1119 	}
1120 
1121 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1122 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1123 				__func__);
1124 		return;
1125 	}
1126 
1127 	qdf_timer_init(NULL, &sc->runtime_timer,
1128 		       hif_pm_runtime_lock_timeout_fn,
1129 		       sc, QDF_TIMER_TYPE_WAKE_APPS);
1130 
1131 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1132 			ol_sc->hif_config.runtime_pm_delay);
1133 
1134 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1135 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1136 	hif_runtime_pm_debugfs_create(sc);
1137 }
1138 
1139 /**
1140  * hif_pm_runtime_stop(): stop runtime pm
1141  * @sc: pci context
1142  *
1143  * Turns off runtime pm and frees corresponding resources
1144  * that were acquired by hif_runtime_pm_start().
1145  */
1146 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1147 {
1148 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1149 	uint32_t mode = hif_get_conparam(ol_sc);
1150 
1151 	if (!ol_sc->hif_config.enable_runtime_pm)
1152 		return;
1153 
1154 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1155 		return;
1156 
1157 	hif_runtime_exit(sc->dev);
1158 	hif_pm_runtime_resume(sc->dev);
1159 
1160 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1161 
1162 	hif_runtime_pm_debugfs_remove(sc);
1163 	qdf_timer_free(&sc->runtime_timer);
1164 	/* doesn't wait for penting trafic unlike cld-2.0 */
1165 }
1166 
1167 /**
1168  * hif_pm_runtime_open(): initialize runtime pm
1169  * @sc: pci data structure
1170  *
1171  * Early initialization
1172  */
1173 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1174 {
1175 	spin_lock_init(&sc->runtime_lock);
1176 
1177 	qdf_atomic_init(&sc->pm_state);
1178 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1179 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1180 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1181 }
1182 
1183 /**
1184  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1185  * @sc: pci context
1186  *
1187  * Ensure we have only one vote against runtime suspend before closing
1188  * the runtime suspend feature.
1189  *
1190  * all gets by the wlan driver should have been returned
1191  * one vote should remain as part of cnss_runtime_exit
1192  *
1193  * needs to be revisited if we share the root complex.
1194  */
1195 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1196 {
1197 	struct hif_pm_runtime_lock *ctx, *tmp;
1198 
1199 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1200 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1201 	else
1202 		return;
1203 
1204 	spin_lock_bh(&sc->runtime_lock);
1205 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1206 		spin_unlock_bh(&sc->runtime_lock);
1207 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1208 		spin_lock_bh(&sc->runtime_lock);
1209 	}
1210 	spin_unlock_bh(&sc->runtime_lock);
1211 
1212 	/* ensure 1 and only 1 usage count so that when the wlan
1213 	 * driver is re-insmodded runtime pm won't be
1214 	 * disabled also ensures runtime pm doesn't get
1215 	 * broken on by being less than 1.
1216 	 */
1217 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1218 		atomic_set(&sc->dev->power.usage_count, 1);
1219 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1220 		hif_pm_runtime_put_auto(sc->dev);
1221 }
1222 
1223 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1224 					  struct hif_pm_runtime_lock *lock);
1225 
1226 /**
1227  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1228  * @sc: PCIe Context
1229  *
1230  * API is used to empty the runtime pm prevent suspend list.
1231  *
1232  * Return: void
1233  */
1234 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1235 {
1236 	struct hif_pm_runtime_lock *ctx, *tmp;
1237 
1238 	spin_lock_bh(&sc->runtime_lock);
1239 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1240 		__hif_pm_runtime_allow_suspend(sc, ctx);
1241 	}
1242 	spin_unlock_bh(&sc->runtime_lock);
1243 }
1244 
1245 /**
1246  * hif_pm_runtime_close(): close runtime pm
1247  * @sc: pci bus handle
1248  *
1249  * ensure runtime_pm is stopped before closing the driver
1250  */
1251 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1252 {
1253 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1254 
1255 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1256 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1257 		return;
1258 
1259 	hif_pm_runtime_stop(sc);
1260 
1261 	hif_is_recovery_in_progress(scn) ?
1262 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1263 		hif_pm_runtime_sanitize_on_exit(sc);
1264 }
1265 #else
1266 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1267 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1268 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1269 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1270 #endif
1271 
1272 /**
1273  * hif_disable_power_gating() - disable HW power gating
1274  * @hif_ctx: hif context
1275  *
1276  * disables pcie L1 power states
1277  */
1278 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1279 {
1280 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1281 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1282 
1283 	if (!scn) {
1284 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1285 		       __func__);
1286 		return;
1287 	}
1288 
1289 	/* Disable ASPM when pkt log is enabled */
1290 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1291 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1292 }
1293 
1294 /**
1295  * hif_enable_power_gating() - enable HW power gating
1296  * @hif_ctx: hif context
1297  *
1298  * enables pcie L1 power states
1299  */
1300 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1301 {
1302 	if (!sc) {
1303 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1304 		       __func__);
1305 		return;
1306 	}
1307 
1308 	/* Re-enable ASPM after firmware/OTP download is complete */
1309 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1310 }
1311 
1312 /**
1313  * hif_enable_power_management() - enable power management
1314  * @hif_ctx: hif context
1315  *
1316  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1317  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1318  *
1319  * note: epping mode does not call this function as it does not
1320  *       care about saving power.
1321  */
1322 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1323 				 bool is_packet_log_enabled)
1324 {
1325 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1326 
1327 	if (!pci_ctx) {
1328 		HIF_ERROR("%s, hif_ctx null", __func__);
1329 		return;
1330 	}
1331 
1332 	hif_pm_runtime_start(pci_ctx);
1333 
1334 	if (!is_packet_log_enabled)
1335 		hif_enable_power_gating(pci_ctx);
1336 
1337 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1338 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1339 	    !ce_srng_based(hif_sc)) {
1340 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1341 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1342 			HIF_ERROR("%s, failed to set target to sleep",
1343 				  __func__);
1344 	}
1345 }
1346 
1347 /**
1348  * hif_disable_power_management() - disable power management
1349  * @hif_ctx: hif context
1350  *
1351  * Currently disables runtime pm. Should be updated to behave
1352  * if runtime pm is not started. Should be updated to take care
1353  * of aspm and soc sleep for driver load.
1354  */
1355 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1356 {
1357 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1358 
1359 	if (!pci_ctx) {
1360 		HIF_ERROR("%s, hif_ctx null", __func__);
1361 		return;
1362 	}
1363 
1364 	hif_pm_runtime_stop(pci_ctx);
1365 }
1366 
1367 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1368 {
1369 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1370 
1371 	if (!pci_ctx) {
1372 		HIF_ERROR("%s, hif_ctx null", __func__);
1373 		return;
1374 	}
1375 	hif_display_ce_stats(&pci_ctx->ce_sc);
1376 }
1377 
1378 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1379 {
1380 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1381 
1382 	if (!pci_ctx) {
1383 		HIF_ERROR("%s, hif_ctx null", __func__);
1384 		return;
1385 	}
1386 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1387 }
1388 
1389 #define ATH_PCI_PROBE_RETRY_MAX 3
1390 /**
1391  * hif_bus_open(): hif_bus_open
1392  * @scn: scn
1393  * @bus_type: bus type
1394  *
1395  * Return: n/a
1396  */
1397 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1398 {
1399 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1400 
1401 	hif_ctx->bus_type = bus_type;
1402 	hif_pm_runtime_open(sc);
1403 
1404 	qdf_spinlock_create(&sc->irq_lock);
1405 
1406 	return hif_ce_open(hif_ctx);
1407 }
1408 
1409 /**
1410  * hif_wake_target_cpu() - wake the target's cpu
1411  * @scn: hif context
1412  *
1413  * Send an interrupt to the device to wake up the Target CPU
1414  * so it has an opportunity to notice any changed state.
1415  */
1416 static void hif_wake_target_cpu(struct hif_softc *scn)
1417 {
1418 	QDF_STATUS rv;
1419 	uint32_t core_ctrl;
1420 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1421 
1422 	rv = hif_diag_read_access(hif_hdl,
1423 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1424 				  &core_ctrl);
1425 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1426 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1427 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1428 
1429 	rv = hif_diag_write_access(hif_hdl,
1430 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1431 				   core_ctrl);
1432 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1433 }
1434 
1435 /**
1436  * soc_wake_reset() - allow the target to go to sleep
1437  * @scn: hif_softc
1438  *
1439  * Clear the force wake register.  This is done by
1440  * hif_sleep_entry and cancel defered timer sleep.
1441  */
1442 static void soc_wake_reset(struct hif_softc *scn)
1443 {
1444 	hif_write32_mb(scn, scn->mem +
1445 		PCIE_LOCAL_BASE_ADDRESS +
1446 		PCIE_SOC_WAKE_ADDRESS,
1447 		PCIE_SOC_WAKE_RESET);
1448 }
1449 
1450 /**
1451  * hif_sleep_entry() - gate target sleep
1452  * @arg: hif context
1453  *
1454  * This function is the callback for the sleep timer.
1455  * Check if last force awake critical section was at least
1456  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1457  * allow the target to go to sleep and cancel the sleep timer.
1458  * otherwise reschedule the sleep timer.
1459  */
1460 static void hif_sleep_entry(void *arg)
1461 {
1462 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1463 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1464 	uint32_t idle_ms;
1465 
1466 	if (scn->recovery)
1467 		return;
1468 
1469 	if (hif_is_driver_unloading(scn))
1470 		return;
1471 
1472 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1473 	if (hif_state->verified_awake == false) {
1474 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1475 						    - hif_state->sleep_ticks);
1476 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1477 			if (!qdf_atomic_read(&scn->link_suspended)) {
1478 				soc_wake_reset(scn);
1479 				hif_state->fake_sleep = false;
1480 			}
1481 		} else {
1482 			qdf_timer_stop(&hif_state->sleep_timer);
1483 			qdf_timer_start(&hif_state->sleep_timer,
1484 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1485 		}
1486 	} else {
1487 		qdf_timer_stop(&hif_state->sleep_timer);
1488 		qdf_timer_start(&hif_state->sleep_timer,
1489 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1490 	}
1491 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1492 }
1493 
1494 #define HIF_HIA_MAX_POLL_LOOP    1000000
1495 #define HIF_HIA_POLLING_DELAY_MS 10
1496 
1497 #ifdef QCA_HIF_HIA_EXTND
1498 
1499 static void hif_set_hia_extnd(struct hif_softc *scn)
1500 {
1501 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1502 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1503 	uint32_t target_type = tgt_info->target_type;
1504 
1505 	HIF_TRACE("%s: E", __func__);
1506 
1507 	if ((target_type == TARGET_TYPE_AR900B) ||
1508 			target_type == TARGET_TYPE_QCA9984 ||
1509 			target_type == TARGET_TYPE_QCA9888) {
1510 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1511 		 * in RTC space
1512 		 */
1513 		tgt_info->target_revision
1514 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1515 					+ CHIP_ID_ADDRESS));
1516 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1517 			  target_type, tgt_info->target_revision);
1518 	}
1519 
1520 	{
1521 		uint32_t flag2_value = 0;
1522 		uint32_t flag2_targ_addr =
1523 			host_interest_item_address(target_type,
1524 			offsetof(struct host_interest_s, hi_skip_clock_init));
1525 
1526 		if ((ar900b_20_targ_clk != -1) &&
1527 			(frac != -1) && (intval != -1)) {
1528 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1529 				&flag2_value);
1530 			qdf_print("\n Setting clk_override");
1531 			flag2_value |= CLOCK_OVERRIDE;
1532 
1533 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1534 					flag2_value);
1535 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1536 		} else {
1537 			qdf_print("\n CLOCK PLL skipped");
1538 		}
1539 	}
1540 
1541 	if (target_type == TARGET_TYPE_AR900B
1542 			|| target_type == TARGET_TYPE_QCA9984
1543 			|| target_type == TARGET_TYPE_QCA9888) {
1544 
1545 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1546 		 * this would be supplied through module parameters,
1547 		 * if not supplied assumed default or same behavior as 1.0.
1548 		 * Assume 1.0 clock can't be tuned, reset to defaults
1549 		 */
1550 
1551 		qdf_print(KERN_INFO
1552 			  "%s: setting the target pll frac %x intval %x",
1553 			  __func__, frac, intval);
1554 
1555 		/* do not touch frac, and int val, let them be default -1,
1556 		 * if desired, host can supply these through module params
1557 		 */
1558 		if (frac != -1 || intval != -1) {
1559 			uint32_t flag2_value = 0;
1560 			uint32_t flag2_targ_addr;
1561 
1562 			flag2_targ_addr =
1563 				host_interest_item_address(target_type,
1564 				offsetof(struct host_interest_s,
1565 					hi_clock_info));
1566 			hif_diag_read_access(hif_hdl,
1567 				flag2_targ_addr, &flag2_value);
1568 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1569 				  flag2_value);
1570 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1571 			qdf_print("\n INT Val %x  Address %x",
1572 				  intval, flag2_value + 4);
1573 			hif_diag_write_access(hif_hdl,
1574 					flag2_value + 4, intval);
1575 		} else {
1576 			qdf_print(KERN_INFO
1577 				  "%s: no frac provided, skipping pre-configuring PLL",
1578 				  __func__);
1579 		}
1580 
1581 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1582 		if ((target_type == TARGET_TYPE_AR900B)
1583 			&& (tgt_info->target_revision == AR900B_REV_2)
1584 			&& ar900b_20_targ_clk != -1) {
1585 			uint32_t flag2_value = 0;
1586 			uint32_t flag2_targ_addr;
1587 
1588 			flag2_targ_addr
1589 				= host_interest_item_address(target_type,
1590 					offsetof(struct host_interest_s,
1591 					hi_desired_cpu_speed_hz));
1592 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1593 							&flag2_value);
1594 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1595 				  flag2_value);
1596 			hif_diag_write_access(hif_hdl, flag2_value,
1597 				ar900b_20_targ_clk/*300000000u*/);
1598 		} else if (target_type == TARGET_TYPE_QCA9888) {
1599 			uint32_t flag2_targ_addr;
1600 
1601 			if (200000000u != qca9888_20_targ_clk) {
1602 				qca9888_20_targ_clk = 300000000u;
1603 				/* Setting the target clock speed to 300 mhz */
1604 			}
1605 
1606 			flag2_targ_addr
1607 				= host_interest_item_address(target_type,
1608 					offsetof(struct host_interest_s,
1609 					hi_desired_cpu_speed_hz));
1610 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1611 				qca9888_20_targ_clk);
1612 		} else {
1613 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1614 				  __func__);
1615 		}
1616 	} else {
1617 		if (frac != -1 || intval != -1) {
1618 			uint32_t flag2_value = 0;
1619 			uint32_t flag2_targ_addr =
1620 				host_interest_item_address(target_type,
1621 					offsetof(struct host_interest_s,
1622 							hi_clock_info));
1623 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1624 						&flag2_value);
1625 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1626 				  flag2_value);
1627 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1628 			qdf_print("\n INT Val %x  Address %x", intval,
1629 				  flag2_value + 4);
1630 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1631 					      intval);
1632 		}
1633 	}
1634 }
1635 
1636 #else
1637 
1638 static void hif_set_hia_extnd(struct hif_softc *scn)
1639 {
1640 }
1641 
1642 #endif
1643 
1644 /**
1645  * hif_set_hia() - fill out the host interest area
1646  * @scn: hif context
1647  *
1648  * This is replaced by hif_wlan_enable for integrated targets.
1649  * This fills out the host interest area.  The firmware will
1650  * process these memory addresses when it is first brought out
1651  * of reset.
1652  *
1653  * Return: 0 for success.
1654  */
1655 static int hif_set_hia(struct hif_softc *scn)
1656 {
1657 	QDF_STATUS rv;
1658 	uint32_t interconnect_targ_addr = 0;
1659 	uint32_t pcie_state_targ_addr = 0;
1660 	uint32_t pipe_cfg_targ_addr = 0;
1661 	uint32_t svc_to_pipe_map = 0;
1662 	uint32_t pcie_config_flags = 0;
1663 	uint32_t flag2_value = 0;
1664 	uint32_t flag2_targ_addr = 0;
1665 #ifdef QCA_WIFI_3_0
1666 	uint32_t host_interest_area = 0;
1667 	uint8_t i;
1668 #else
1669 	uint32_t ealloc_value = 0;
1670 	uint32_t ealloc_targ_addr = 0;
1671 	uint8_t banks_switched = 1;
1672 	uint32_t chip_id;
1673 #endif
1674 	uint32_t pipe_cfg_addr;
1675 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1676 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1677 	uint32_t target_type = tgt_info->target_type;
1678 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1679 	static struct CE_pipe_config *target_ce_config;
1680 	struct service_to_pipe *target_service_to_ce_map;
1681 
1682 	HIF_TRACE("%s: E", __func__);
1683 
1684 	hif_get_target_ce_config(scn,
1685 				 &target_ce_config, &target_ce_config_sz,
1686 				 &target_service_to_ce_map,
1687 				 &target_service_to_ce_map_sz,
1688 				 NULL, NULL);
1689 
1690 	if (ADRASTEA_BU)
1691 		return QDF_STATUS_SUCCESS;
1692 
1693 #ifdef QCA_WIFI_3_0
1694 	i = 0;
1695 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1696 		host_interest_area = hif_read32_mb(scn, scn->mem +
1697 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1698 		if ((host_interest_area & 0x01) == 0) {
1699 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1700 			host_interest_area = 0;
1701 			i++;
1702 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1703 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1704 		} else {
1705 			host_interest_area &= (~0x01);
1706 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1707 			break;
1708 		}
1709 	}
1710 
1711 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1712 		HIF_ERROR("%s: hia polling timeout", __func__);
1713 		return -EIO;
1714 	}
1715 
1716 	if (host_interest_area == 0) {
1717 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1718 		return -EIO;
1719 	}
1720 
1721 	interconnect_targ_addr = host_interest_area +
1722 			offsetof(struct host_interest_area_t,
1723 			hi_interconnect_state);
1724 
1725 	flag2_targ_addr = host_interest_area +
1726 			offsetof(struct host_interest_area_t, hi_option_flag2);
1727 
1728 #else
1729 	interconnect_targ_addr = hif_hia_item_address(target_type,
1730 		offsetof(struct host_interest_s, hi_interconnect_state));
1731 	ealloc_targ_addr = hif_hia_item_address(target_type,
1732 		offsetof(struct host_interest_s, hi_early_alloc));
1733 	flag2_targ_addr = hif_hia_item_address(target_type,
1734 		offsetof(struct host_interest_s, hi_option_flag2));
1735 #endif
1736 	/* Supply Target-side CE configuration */
1737 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1738 			  &pcie_state_targ_addr);
1739 	if (rv != QDF_STATUS_SUCCESS) {
1740 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1741 			  __func__, interconnect_targ_addr, rv);
1742 		goto done;
1743 	}
1744 	if (pcie_state_targ_addr == 0) {
1745 		rv = QDF_STATUS_E_FAILURE;
1746 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1747 		goto done;
1748 	}
1749 	pipe_cfg_addr = pcie_state_targ_addr +
1750 			  offsetof(struct pcie_state_s,
1751 			  pipe_cfg_addr);
1752 	rv = hif_diag_read_access(hif_hdl,
1753 			  pipe_cfg_addr,
1754 			  &pipe_cfg_targ_addr);
1755 	if (rv != QDF_STATUS_SUCCESS) {
1756 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1757 			__func__, pipe_cfg_addr, rv);
1758 		goto done;
1759 	}
1760 	if (pipe_cfg_targ_addr == 0) {
1761 		rv = QDF_STATUS_E_FAILURE;
1762 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1763 		goto done;
1764 	}
1765 
1766 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1767 			(uint8_t *) target_ce_config,
1768 			target_ce_config_sz);
1769 
1770 	if (rv != QDF_STATUS_SUCCESS) {
1771 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1772 		goto done;
1773 	}
1774 
1775 	rv = hif_diag_read_access(hif_hdl,
1776 			  pcie_state_targ_addr +
1777 			  offsetof(struct pcie_state_s,
1778 			   svc_to_pipe_map),
1779 			  &svc_to_pipe_map);
1780 	if (rv != QDF_STATUS_SUCCESS) {
1781 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1782 		goto done;
1783 	}
1784 	if (svc_to_pipe_map == 0) {
1785 		rv = QDF_STATUS_E_FAILURE;
1786 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1787 		goto done;
1788 	}
1789 
1790 	rv = hif_diag_write_mem(hif_hdl,
1791 			svc_to_pipe_map,
1792 			(uint8_t *) target_service_to_ce_map,
1793 			target_service_to_ce_map_sz);
1794 	if (rv != QDF_STATUS_SUCCESS) {
1795 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1796 		goto done;
1797 	}
1798 
1799 	rv = hif_diag_read_access(hif_hdl,
1800 			pcie_state_targ_addr +
1801 			offsetof(struct pcie_state_s,
1802 			config_flags),
1803 			&pcie_config_flags);
1804 	if (rv != QDF_STATUS_SUCCESS) {
1805 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1806 		goto done;
1807 	}
1808 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1809 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1810 #else
1811 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1812 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1813 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1814 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1815 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1816 #endif
1817 	rv = hif_diag_write_mem(hif_hdl,
1818 			pcie_state_targ_addr +
1819 			offsetof(struct pcie_state_s,
1820 			config_flags),
1821 			(uint8_t *) &pcie_config_flags,
1822 			sizeof(pcie_config_flags));
1823 	if (rv != QDF_STATUS_SUCCESS) {
1824 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1825 		goto done;
1826 	}
1827 
1828 #ifndef QCA_WIFI_3_0
1829 	/* configure early allocation */
1830 	ealloc_targ_addr = hif_hia_item_address(target_type,
1831 						offsetof(
1832 						struct host_interest_s,
1833 						hi_early_alloc));
1834 
1835 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1836 			&ealloc_value);
1837 	if (rv != QDF_STATUS_SUCCESS) {
1838 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1839 		goto done;
1840 	}
1841 
1842 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1843 	ealloc_value |=
1844 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1845 		 HI_EARLY_ALLOC_MAGIC_MASK);
1846 
1847 	rv = hif_diag_read_access(hif_hdl,
1848 			  CHIP_ID_ADDRESS |
1849 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1850 	if (rv != QDF_STATUS_SUCCESS) {
1851 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1852 		goto done;
1853 	}
1854 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1855 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1856 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1857 		case 0x2:       /* ROME 1.3 */
1858 			/* 2 banks are switched to IRAM */
1859 			banks_switched = 2;
1860 			break;
1861 		case 0x4:       /* ROME 2.1 */
1862 		case 0x5:       /* ROME 2.2 */
1863 			banks_switched = 6;
1864 			break;
1865 		case 0x8:       /* ROME 3.0 */
1866 		case 0x9:       /* ROME 3.1 */
1867 		case 0xA:       /* ROME 3.2 */
1868 			banks_switched = 9;
1869 			break;
1870 		case 0x0:       /* ROME 1.0 */
1871 		case 0x1:       /* ROME 1.1 */
1872 		default:
1873 			/* 3 banks are switched to IRAM */
1874 			banks_switched = 3;
1875 			break;
1876 		}
1877 	}
1878 
1879 	ealloc_value |=
1880 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1881 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1882 
1883 	rv = hif_diag_write_access(hif_hdl,
1884 				ealloc_targ_addr,
1885 				ealloc_value);
1886 	if (rv != QDF_STATUS_SUCCESS) {
1887 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1888 		goto done;
1889 	}
1890 #endif
1891 	if ((target_type == TARGET_TYPE_AR900B)
1892 			|| (target_type == TARGET_TYPE_QCA9984)
1893 			|| (target_type == TARGET_TYPE_QCA9888)
1894 			|| (target_type == TARGET_TYPE_AR9888)) {
1895 		hif_set_hia_extnd(scn);
1896 	}
1897 
1898 	/* Tell Target to proceed with initialization */
1899 	flag2_targ_addr = hif_hia_item_address(target_type,
1900 						offsetof(
1901 						struct host_interest_s,
1902 						hi_option_flag2));
1903 
1904 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1905 			  &flag2_value);
1906 	if (rv != QDF_STATUS_SUCCESS) {
1907 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1908 		goto done;
1909 	}
1910 
1911 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1912 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1913 			   flag2_value);
1914 	if (rv != QDF_STATUS_SUCCESS) {
1915 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1916 		goto done;
1917 	}
1918 
1919 	hif_wake_target_cpu(scn);
1920 
1921 done:
1922 
1923 	return rv;
1924 }
1925 
1926 /**
1927  * hif_bus_configure() - configure the pcie bus
1928  * @hif_sc: pointer to the hif context.
1929  *
1930  * return: 0 for success. nonzero for failure.
1931  */
1932 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1933 {
1934 	int status = 0;
1935 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1936 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1937 
1938 	hif_ce_prepare_config(hif_sc);
1939 
1940 	/* initialize sleep state adjust variables */
1941 	hif_state->sleep_timer_init = true;
1942 	hif_state->keep_awake_count = 0;
1943 	hif_state->fake_sleep = false;
1944 	hif_state->sleep_ticks = 0;
1945 
1946 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1947 			       hif_sleep_entry, (void *)hif_state,
1948 			       QDF_TIMER_TYPE_WAKE_APPS);
1949 	hif_state->sleep_timer_init = true;
1950 
1951 	status = hif_wlan_enable(hif_sc);
1952 	if (status) {
1953 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1954 			  __func__, status);
1955 		goto timer_free;
1956 	}
1957 
1958 	A_TARGET_ACCESS_LIKELY(hif_sc);
1959 
1960 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1961 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1962 	    !ce_srng_based(hif_sc)) {
1963 		/*
1964 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1965 		 * prevent sleep when we want to keep firmware always awake
1966 		 * note: when we want to keep firmware always awake,
1967 		 *       hif_target_sleep_state_adjust will point to a dummy
1968 		 *       function, and hif_pci_target_sleep_state_adjust must
1969 		 *       be called instead.
1970 		 * note: bus type check is here because AHB bus is reusing
1971 		 *       hif_pci_bus_configure code.
1972 		 */
1973 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1974 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1975 					false, true) < 0) {
1976 				status = -EACCES;
1977 				goto disable_wlan;
1978 			}
1979 		}
1980 	}
1981 
1982 	/* todo: consider replacing this with an srng field */
1983 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1984 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1985 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1986 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1987 		hif_sc->per_ce_irq = true;
1988 	}
1989 
1990 	status = hif_config_ce(hif_sc);
1991 	if (status)
1992 		goto disable_wlan;
1993 
1994 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
1995 	if (hif_needs_bmi(hif_osc)) {
1996 		status = hif_set_hia(hif_sc);
1997 		if (status)
1998 			goto unconfig_ce;
1999 
2000 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2001 
2002 	}
2003 
2004 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2005 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2006 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
2007 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2008 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2009 						__func__);
2010 	else {
2011 		status = hif_configure_irq(hif_sc);
2012 		if (status < 0)
2013 			goto unconfig_ce;
2014 	}
2015 
2016 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2017 
2018 	return status;
2019 
2020 unconfig_ce:
2021 	hif_unconfig_ce(hif_sc);
2022 disable_wlan:
2023 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2024 	hif_wlan_disable(hif_sc);
2025 
2026 timer_free:
2027 	qdf_timer_stop(&hif_state->sleep_timer);
2028 	qdf_timer_free(&hif_state->sleep_timer);
2029 	hif_state->sleep_timer_init = false;
2030 
2031 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2032 	return status;
2033 }
2034 
2035 /**
2036  * hif_bus_close(): hif_bus_close
2037  *
2038  * Return: n/a
2039  */
2040 void hif_pci_close(struct hif_softc *hif_sc)
2041 {
2042 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2043 
2044 	hif_pm_runtime_close(hif_pci_sc);
2045 	hif_ce_close(hif_sc);
2046 }
2047 
2048 #define BAR_NUM 0
2049 
2050 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2051 				struct pci_dev *pdev,
2052 				const struct pci_device_id *id)
2053 {
2054 	void __iomem *mem;
2055 	int ret = 0;
2056 	uint16_t device_id = 0;
2057 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2058 
2059 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2060 	if (device_id != id->device)  {
2061 		HIF_ERROR(
2062 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2063 		   __func__, device_id, id->device);
2064 		/* pci link is down, so returing with error code */
2065 		return -EIO;
2066 	}
2067 
2068 	/* FIXME: temp. commenting out assign_resource
2069 	 * call for dev_attach to work on 2.6.38 kernel
2070 	 */
2071 #if (!defined(__LINUX_ARM_ARCH__))
2072 	if (pci_assign_resource(pdev, BAR_NUM)) {
2073 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2074 		return -EIO;
2075 	}
2076 #endif
2077 	if (pci_enable_device(pdev)) {
2078 		HIF_ERROR("%s: pci_enable_device error",
2079 			   __func__);
2080 		return -EIO;
2081 	}
2082 
2083 	/* Request MMIO resources */
2084 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2085 	if (ret) {
2086 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2087 		ret = -EIO;
2088 		goto err_region;
2089 	}
2090 
2091 #ifdef CONFIG_ARM_LPAE
2092 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2093 	 * for 32 bits device also.
2094 	 */
2095 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2096 	if (ret) {
2097 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2098 		goto err_dma;
2099 	}
2100 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2101 	if (ret) {
2102 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2103 		goto err_dma;
2104 	}
2105 #else
2106 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2107 	if (ret) {
2108 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2109 		goto err_dma;
2110 	}
2111 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2112 	if (ret) {
2113 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2114 			   __func__);
2115 		goto err_dma;
2116 	}
2117 #endif
2118 
2119 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2120 
2121 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2122 	pci_set_master(pdev);
2123 
2124 	/* Arrange for access to Target SoC registers. */
2125 	mem = pci_iomap(pdev, BAR_NUM, 0);
2126 	if (!mem) {
2127 		HIF_ERROR("%s: PCI iomap error", __func__);
2128 		ret = -EIO;
2129 		goto err_iomap;
2130 	}
2131 
2132 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
2133 
2134 	sc->mem = mem;
2135 
2136 	/* Hawkeye emulation specific change */
2137 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2138 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2139 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2140 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
2141 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
2142 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
2143 		mem = mem + 0x0c000000;
2144 		sc->mem = mem;
2145 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2146 			__func__, sc->mem);
2147 	}
2148 
2149 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2150 	ol_sc->mem = mem;
2151 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2152 	sc->pci_enabled = true;
2153 	return ret;
2154 
2155 err_iomap:
2156 	pci_clear_master(pdev);
2157 err_dma:
2158 	pci_release_region(pdev, BAR_NUM);
2159 err_region:
2160 	pci_disable_device(pdev);
2161 	return ret;
2162 }
2163 
2164 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2165 			      struct pci_dev *pdev,
2166 			      const struct pci_device_id *id)
2167 {
2168 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2169 	sc->pci_enabled = true;
2170 	return 0;
2171 }
2172 
2173 
2174 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
2175 {
2176 	pci_disable_msi(sc->pdev);
2177 	pci_iounmap(sc->pdev, sc->mem);
2178 	pci_clear_master(sc->pdev);
2179 	pci_release_region(sc->pdev, BAR_NUM);
2180 	pci_disable_device(sc->pdev);
2181 }
2182 
2183 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
2184 
2185 static void hif_disable_pci(struct hif_pci_softc *sc)
2186 {
2187 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2188 
2189 	if (!ol_sc) {
2190 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2191 		return;
2192 	}
2193 	hif_pci_device_reset(sc);
2194 	sc->hif_pci_deinit(sc);
2195 
2196 	sc->mem = NULL;
2197 	ol_sc->mem = NULL;
2198 }
2199 
2200 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2201 {
2202 	int ret = 0;
2203 	int targ_awake_limit = 500;
2204 #ifndef QCA_WIFI_3_0
2205 	uint32_t fw_indicator;
2206 #endif
2207 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2208 
2209 	/*
2210 	 * Verify that the Target was started cleanly.*
2211 	 * The case where this is most likely is with an AUX-powered
2212 	 * Target and a Host in WoW mode. If the Host crashes,
2213 	 * loses power, or is restarted (without unloading the driver)
2214 	 * then the Target is left (aux) powered and running.  On a
2215 	 * subsequent driver load, the Target is in an unexpected state.
2216 	 * We try to catch that here in order to reset the Target and
2217 	 * retry the probe.
2218 	 */
2219 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2220 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2221 	while (!hif_targ_is_awake(scn, sc->mem)) {
2222 		if (0 == targ_awake_limit) {
2223 			HIF_ERROR("%s: target awake timeout", __func__);
2224 			ret = -EAGAIN;
2225 			goto end;
2226 		}
2227 		qdf_mdelay(1);
2228 		targ_awake_limit--;
2229 	}
2230 
2231 #if PCIE_BAR0_READY_CHECKING
2232 	{
2233 		int wait_limit = 200;
2234 		/* Synchronization point: wait the BAR0 is configured */
2235 		while (wait_limit-- &&
2236 			   !(hif_read32_mb(sc, c->mem +
2237 					  PCIE_LOCAL_BASE_ADDRESS +
2238 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2239 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2240 			qdf_mdelay(10);
2241 		}
2242 		if (wait_limit < 0) {
2243 			/* AR6320v1 doesn't support checking of BAR0
2244 			 * configuration, takes one sec to wait BAR0 ready
2245 			 */
2246 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2247 				    __func__);
2248 		}
2249 	}
2250 #endif
2251 
2252 #ifndef QCA_WIFI_3_0
2253 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2254 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2255 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2256 
2257 	if (fw_indicator & FW_IND_INITIALIZED) {
2258 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2259 			   __func__);
2260 		ret = -EAGAIN;
2261 		goto end;
2262 	}
2263 #endif
2264 
2265 end:
2266 	return ret;
2267 }
2268 
2269 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2270 {
2271 	int ret = 0;
2272 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2273 	uint32_t target_type = scn->target_info.target_type;
2274 
2275 	HIF_TRACE("%s: E", __func__);
2276 
2277 	/* do notn support MSI or MSI IRQ failed */
2278 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2279 	ret = request_irq(sc->pdev->irq,
2280 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2281 			  "wlan_pci", sc);
2282 	if (ret) {
2283 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2284 		goto end;
2285 	}
2286 	scn->wake_irq = sc->pdev->irq;
2287 	/* Use sc->irq instead of sc->pdev-irq
2288 	 * platform_device pdev doesn't have an irq field
2289 	 */
2290 	sc->irq = sc->pdev->irq;
2291 	/* Use Legacy PCI Interrupts */
2292 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2293 		  PCIE_INTR_ENABLE_ADDRESS),
2294 		  HOST_GROUP0_MASK);
2295 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2296 			       PCIE_INTR_ENABLE_ADDRESS));
2297 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2298 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2299 
2300 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2301 			(target_type == TARGET_TYPE_AR900B)  ||
2302 			(target_type == TARGET_TYPE_QCA9984) ||
2303 			(target_type == TARGET_TYPE_AR9888) ||
2304 			(target_type == TARGET_TYPE_QCA9888) ||
2305 			(target_type == TARGET_TYPE_AR6320V1) ||
2306 			(target_type == TARGET_TYPE_AR6320V2) ||
2307 			(target_type == TARGET_TYPE_AR6320V3)) {
2308 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2309 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2310 	}
2311 end:
2312 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2313 			  "%s: X, ret = %d", __func__, ret);
2314 	return ret;
2315 }
2316 
2317 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2318 {
2319 	int ret;
2320 	int ce_id, irq;
2321 	uint32_t msi_data_start;
2322 	uint32_t msi_data_count;
2323 	uint32_t msi_irq_start;
2324 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2325 
2326 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2327 					    &msi_data_count, &msi_data_start,
2328 					    &msi_irq_start);
2329 	if (ret)
2330 		return ret;
2331 
2332 	/* needs to match the ce_id -> irq data mapping
2333 	 * used in the srng parameter configuration
2334 	 */
2335 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2336 		unsigned int msi_data;
2337 
2338 		if (!ce_sc->tasklets[ce_id].inited)
2339 			continue;
2340 
2341 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2342 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2343 
2344 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2345 			  ce_id, msi_data, irq);
2346 
2347 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2348 	}
2349 
2350 	return ret;
2351 }
2352 
2353 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2354 {
2355 	int i, j, irq;
2356 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2357 	struct hif_exec_context *hif_ext_group;
2358 
2359 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2360 		hif_ext_group = hif_state->hif_ext_group[i];
2361 		if (hif_ext_group->irq_requested) {
2362 			hif_ext_group->irq_requested = false;
2363 			for (j = 0; j < hif_ext_group->numirq; j++) {
2364 				irq = hif_ext_group->os_irq[j];
2365 				free_irq(irq, hif_ext_group);
2366 			}
2367 			hif_ext_group->numirq = 0;
2368 		}
2369 	}
2370 }
2371 
2372 /**
2373  * hif_nointrs(): disable IRQ
2374  *
2375  * This function stops interrupt(s)
2376  *
2377  * @scn: struct hif_softc
2378  *
2379  * Return: none
2380  */
2381 void hif_pci_nointrs(struct hif_softc *scn)
2382 {
2383 	int i, ret;
2384 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2385 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2386 
2387 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2388 
2389 	if (scn->request_irq_done == false)
2390 		return;
2391 
2392 	hif_pci_deconfigure_grp_irq(scn);
2393 
2394 	ret = hif_ce_srng_msi_free_irq(scn);
2395 	if (ret != -EINVAL) {
2396 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2397 
2398 		if (scn->wake_irq)
2399 			free_irq(scn->wake_irq, scn);
2400 		scn->wake_irq = 0;
2401 	} else if (sc->num_msi_intrs > 0) {
2402 		/* MSI interrupt(s) */
2403 		for (i = 0; i < sc->num_msi_intrs; i++)
2404 			free_irq(sc->irq + i, sc);
2405 		sc->num_msi_intrs = 0;
2406 	} else {
2407 		/* Legacy PCI line interrupt
2408 		 * Use sc->irq instead of sc->pdev-irq
2409 		 * platform_device pdev doesn't have an irq field
2410 		 */
2411 		free_irq(sc->irq, sc);
2412 	}
2413 	scn->request_irq_done = false;
2414 }
2415 
2416 /**
2417  * hif_disable_bus(): hif_disable_bus
2418  *
2419  * This function disables the bus
2420  *
2421  * @bdev: bus dev
2422  *
2423  * Return: none
2424  */
2425 void hif_pci_disable_bus(struct hif_softc *scn)
2426 {
2427 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2428 	struct pci_dev *pdev;
2429 	void __iomem *mem;
2430 	struct hif_target_info *tgt_info = &scn->target_info;
2431 
2432 	/* Attach did not succeed, all resources have been
2433 	 * freed in error handler
2434 	 */
2435 	if (!sc)
2436 		return;
2437 
2438 	pdev = sc->pdev;
2439 	if (ADRASTEA_BU) {
2440 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2441 
2442 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2443 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2444 			       HOST_GROUP0_MASK);
2445 	}
2446 
2447 #if defined(CPU_WARM_RESET_WAR)
2448 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2449 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2450 	 * verified for AR9888_REV1
2451 	 */
2452 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2453 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2454 		hif_pci_device_warm_reset(sc);
2455 	else
2456 		hif_pci_device_reset(sc);
2457 #else
2458 	hif_pci_device_reset(sc);
2459 #endif
2460 	mem = (void __iomem *)sc->mem;
2461 	if (mem) {
2462 		hif_dump_pipe_debug_count(scn);
2463 		if (scn->athdiag_procfs_inited) {
2464 			athdiag_procfs_remove();
2465 			scn->athdiag_procfs_inited = false;
2466 		}
2467 		sc->hif_pci_deinit(sc);
2468 		scn->mem = NULL;
2469 	}
2470 	HIF_INFO("%s: X", __func__);
2471 }
2472 
2473 #define OL_ATH_PCI_PM_CONTROL 0x44
2474 
2475 #ifdef FEATURE_RUNTIME_PM
2476 /**
2477  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2478  * @scn: hif context
2479  * @flag: prevent linkdown if true otherwise allow
2480  *
2481  * this api should only be called as part of bus prevent linkdown
2482  */
2483 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2484 {
2485 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2486 
2487 	if (flag)
2488 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2489 	else
2490 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2491 }
2492 #else
2493 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2494 {
2495 }
2496 #endif
2497 
2498 #if defined(CONFIG_PCI_MSM)
2499 /**
2500  * hif_bus_prevent_linkdown(): allow or permit linkdown
2501  * @flag: true prevents linkdown, false allows
2502  *
2503  * Calls into the platform driver to vote against taking down the
2504  * pcie link.
2505  *
2506  * Return: n/a
2507  */
2508 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2509 {
2510 	int errno;
2511 
2512 	HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2513 	hif_runtime_prevent_linkdown(scn, flag);
2514 
2515 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2516 	if (errno)
2517 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2518 			  __func__, errno);
2519 }
2520 #else
2521 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2522 {
2523 	HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
2524 	hif_runtime_prevent_linkdown(scn, flag);
2525 }
2526 #endif
2527 
2528 /**
2529  * hif_pci_bus_suspend(): prepare hif for suspend
2530  *
2531  * Return: Errno
2532  */
2533 int hif_pci_bus_suspend(struct hif_softc *scn)
2534 {
2535 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2536 
2537 	if (hif_drain_tasklets(scn)) {
2538 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2539 		return -EBUSY;
2540 	}
2541 
2542 	/* Stop the HIF Sleep Timer */
2543 	hif_cancel_deferred_target_sleep(scn);
2544 
2545 	return 0;
2546 }
2547 
2548 /**
2549  * __hif_check_link_status() - API to check if PCIe link is active/not
2550  * @scn: HIF Context
2551  *
2552  * API reads the PCIe config space to verify if PCIe link training is
2553  * successful or not.
2554  *
2555  * Return: Success/Failure
2556  */
2557 static int __hif_check_link_status(struct hif_softc *scn)
2558 {
2559 	uint16_t dev_id = 0;
2560 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2561 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2562 
2563 	if (!sc) {
2564 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2565 		return -EINVAL;
2566 	}
2567 
2568 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2569 
2570 	if (dev_id == sc->devid)
2571 		return 0;
2572 
2573 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2574 	       __func__, dev_id);
2575 
2576 	scn->recovery = true;
2577 
2578 	if (cbk && cbk->set_recovery_in_progress)
2579 		cbk->set_recovery_in_progress(cbk->context, true);
2580 	else
2581 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2582 
2583 	pld_is_pci_link_down(sc->dev);
2584 	return -EACCES;
2585 }
2586 
2587 /**
2588  * hif_pci_bus_resume(): prepare hif for resume
2589  *
2590  * Return: Errno
2591  */
2592 int hif_pci_bus_resume(struct hif_softc *scn)
2593 {
2594 	int errno;
2595 
2596 	errno = __hif_check_link_status(scn);
2597 	if (errno)
2598 		return errno;
2599 
2600 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2601 
2602 	return 0;
2603 }
2604 
2605 /**
2606  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2607  * @scn: hif context
2608  *
2609  * Ensure that if we received the wakeup message before the irq
2610  * was disabled that the message is pocessed before suspending.
2611  *
2612  * Return: -EBUSY if we fail to flush the tasklets.
2613  */
2614 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2615 {
2616 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2617 		qdf_atomic_set(&scn->link_suspended, 1);
2618 
2619 	hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
2620 
2621 	return 0;
2622 }
2623 
2624 /**
2625  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2626  * @scn: hif context
2627  *
2628  * Ensure that if we received the wakeup message before the irq
2629  * was disabled that the message is pocessed before suspending.
2630  *
2631  * Return: -EBUSY if we fail to flush the tasklets.
2632  */
2633 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2634 {
2635 	hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
2636 
2637 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2638 		qdf_atomic_set(&scn->link_suspended, 0);
2639 
2640 	return 0;
2641 }
2642 
2643 #ifdef FEATURE_RUNTIME_PM
2644 /**
2645  * __hif_runtime_pm_set_state(): utility function
2646  * @state: state to set
2647  *
2648  * indexes into the runtime pm state and sets it.
2649  */
2650 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2651 				enum hif_pm_runtime_state state)
2652 {
2653 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2654 
2655 	if (!sc) {
2656 		HIF_ERROR("%s: HIF_CTX not initialized",
2657 		       __func__);
2658 		return;
2659 	}
2660 
2661 	qdf_atomic_set(&sc->pm_state, state);
2662 }
2663 
2664 /**
2665  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2666  *
2667  * Notify hif that a runtime pm opperation has started
2668  */
2669 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2670 {
2671 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2672 }
2673 
2674 /**
2675  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2676  *
2677  * Notify hif that a the runtime pm state should be on
2678  */
2679 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2680 {
2681 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2682 }
2683 
2684 /**
2685  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2686  *
2687  * Notify hif that a runtime suspend attempt has been completed successfully
2688  */
2689 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2690 {
2691 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2692 }
2693 
2694 /**
2695  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2696  */
2697 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2698 {
2699 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2700 
2701 	if (!sc)
2702 		return;
2703 
2704 	sc->pm_stats.suspended++;
2705 	sc->pm_stats.suspend_jiffies = jiffies;
2706 }
2707 
2708 /**
2709  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2710  *
2711  * log a failed runtime suspend
2712  * mark last busy to prevent immediate runtime suspend
2713  */
2714 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2715 {
2716 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2717 
2718 	if (!sc)
2719 		return;
2720 
2721 	sc->pm_stats.suspend_err++;
2722 }
2723 
2724 /**
2725  * hif_log_runtime_resume_success() - log a successful runtime resume
2726  *
2727  * log a successful runtime resume
2728  * mark last busy to prevent immediate runtime suspend
2729  */
2730 static void hif_log_runtime_resume_success(void *hif_ctx)
2731 {
2732 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2733 
2734 	if (!sc)
2735 		return;
2736 
2737 	sc->pm_stats.resumed++;
2738 }
2739 
2740 /**
2741  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2742  *
2743  * Record the failure.
2744  * mark last busy to delay a retry.
2745  * adjust the runtime_pm state.
2746  */
2747 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2748 {
2749 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2750 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2751 
2752 	hif_log_runtime_suspend_failure(hif_ctx);
2753 	if (hif_pci_sc)
2754 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2755 	hif_runtime_pm_set_state_on(scn);
2756 }
2757 
2758 /**
2759  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2760  *
2761  * Makes sure that the pci link will be taken down by the suspend opperation.
2762  * If the hif layer is configured to leave the bus on, runtime suspend will
2763  * not save any power.
2764  *
2765  * Set the runtime suspend state to in progress.
2766  *
2767  * return -EINVAL if the bus won't go down.  otherwise return 0
2768  */
2769 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2770 {
2771 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2772 
2773 	if (!hif_can_suspend_link(hif_ctx)) {
2774 		HIF_ERROR("Runtime PM not supported for link up suspend");
2775 		return -EINVAL;
2776 	}
2777 
2778 	hif_runtime_pm_set_state_inprogress(scn);
2779 	return 0;
2780 }
2781 
2782 /**
2783  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2784  *
2785  * Record the success.
2786  * adjust the runtime_pm state
2787  */
2788 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
2789 {
2790 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2791 
2792 	hif_runtime_pm_set_state_suspended(scn);
2793 	hif_log_runtime_suspend_success(scn);
2794 }
2795 
2796 /**
2797  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2798  *
2799  * update the runtime pm state.
2800  */
2801 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
2802 {
2803 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2804 
2805 	hif_runtime_pm_set_state_inprogress(scn);
2806 }
2807 
2808 /**
2809  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2810  *
2811  * record the success.
2812  * adjust the runtime_pm state
2813  */
2814 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
2815 {
2816 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2817 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2818 
2819 	hif_log_runtime_resume_success(hif_ctx);
2820 	if (hif_pci_sc)
2821 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2822 	hif_runtime_pm_set_state_on(scn);
2823 }
2824 
2825 /**
2826  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2827  *
2828  * Return: 0 for success and non-zero error code for failure
2829  */
2830 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2831 {
2832 	int errno;
2833 
2834 	errno = hif_bus_suspend(hif_ctx);
2835 	if (errno) {
2836 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
2837 		return errno;
2838 	}
2839 
2840 	errno = hif_apps_irqs_disable(hif_ctx);
2841 	if (errno) {
2842 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
2843 		goto bus_resume;
2844 	}
2845 
2846 	errno = hif_bus_suspend_noirq(hif_ctx);
2847 	if (errno) {
2848 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
2849 		goto irqs_enable;
2850 	}
2851 
2852 	/* link should always be down; skip enable wake irq */
2853 
2854 	return 0;
2855 
2856 irqs_enable:
2857 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2858 
2859 bus_resume:
2860 	QDF_BUG(!hif_bus_resume(hif_ctx));
2861 
2862 	return errno;
2863 }
2864 
2865 /**
2866  * hif_fastpath_resume() - resume fastpath for runtimepm
2867  *
2868  * ensure that the fastpath write index register is up to date
2869  * since runtime pm may cause ce_send_fast to skip the register
2870  * write.
2871  *
2872  * fastpath only applicable to legacy copy engine
2873  */
2874 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
2875 {
2876 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2877 	struct CE_state *ce_state;
2878 
2879 	if (!scn)
2880 		return;
2881 
2882 	if (scn->fastpath_mode_on) {
2883 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2884 			return;
2885 
2886 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
2887 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2888 
2889 		/*war_ce_src_ring_write_idx_set */
2890 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2891 				ce_state->src_ring->write_index);
2892 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2893 		Q_TARGET_ACCESS_END(scn);
2894 	}
2895 }
2896 
2897 /**
2898  * hif_runtime_resume() - do the bus resume part of a runtime resume
2899  *
2900  *  Return: 0 for success and non-zero error code for failure
2901  */
2902 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
2903 {
2904 	/* link should always be down; skip disable wake irq */
2905 
2906 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
2907 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2908 	QDF_BUG(!hif_bus_resume(hif_ctx));
2909 	return 0;
2910 }
2911 #endif /* #ifdef FEATURE_RUNTIME_PM */
2912 
2913 #if CONFIG_PCIE_64BIT_MSI
2914 static void hif_free_msi_ctx(struct hif_softc *scn)
2915 {
2916 	struct hif_pci_softc *sc = scn->hif_sc;
2917 	struct hif_msi_info *info = &sc->msi_info;
2918 	struct device *dev = scn->qdf_dev->dev;
2919 
2920 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2921 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2922 	info->magic = NULL;
2923 	info->magic_dma = 0;
2924 }
2925 #else
2926 static void hif_free_msi_ctx(struct hif_softc *scn)
2927 {
2928 }
2929 #endif
2930 
2931 void hif_pci_disable_isr(struct hif_softc *scn)
2932 {
2933 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2934 
2935 	hif_exec_kill(&scn->osc);
2936 	hif_nointrs(scn);
2937 	hif_free_msi_ctx(scn);
2938 	/* Cancel the pending tasklet */
2939 	ce_tasklet_kill(scn);
2940 	tasklet_kill(&sc->intr_tq);
2941 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2942 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2943 }
2944 
2945 /* Function to reset SoC */
2946 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2947 {
2948 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2949 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2950 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2951 
2952 #if defined(CPU_WARM_RESET_WAR)
2953 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2954 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2955 	 * verified for AR9888_REV1
2956 	 */
2957 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2958 		hif_pci_device_warm_reset(sc);
2959 	else
2960 		hif_pci_device_reset(sc);
2961 #else
2962 	hif_pci_device_reset(sc);
2963 #endif
2964 }
2965 
2966 #ifdef CONFIG_PCI_MSM
2967 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2968 {
2969 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2970 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2971 }
2972 #else
2973 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2974 #endif
2975 
2976 /**
2977  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2978  * @sc: HIF PCIe Context
2979  *
2980  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2981  *
2982  * Return: Failure to caller
2983  */
2984 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2985 {
2986 	uint16_t val = 0;
2987 	uint32_t bar = 0;
2988 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2989 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2990 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2991 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2992 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2993 	A_target_id_t pci_addr = scn->mem;
2994 
2995 	HIF_ERROR("%s: keep_awake_count = %d",
2996 			__func__, hif_state->keep_awake_count);
2997 
2998 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2999 
3000 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3001 
3002 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3003 
3004 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3005 
3006 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3007 
3008 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3009 
3010 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3011 
3012 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3013 
3014 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3015 
3016 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3017 
3018 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3019 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3020 						PCIE_SOC_WAKE_ADDRESS));
3021 
3022 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3023 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3024 							RTC_STATE_ADDRESS));
3025 
3026 	HIF_ERROR("%s:error, wakeup target", __func__);
3027 	hif_msm_pcie_debug_info(sc);
3028 
3029 	if (!cfg->enable_self_recovery)
3030 		QDF_BUG(0);
3031 
3032 	scn->recovery = true;
3033 
3034 	if (cbk->set_recovery_in_progress)
3035 		cbk->set_recovery_in_progress(cbk->context, true);
3036 
3037 	pld_is_pci_link_down(sc->dev);
3038 	return -EACCES;
3039 }
3040 
3041 /*
3042  * For now, we use simple on-demand sleep/wake.
3043  * Some possible improvements:
3044  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3045  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3046  *   Careful, though, these functions may be used by
3047  *  interrupt handlers ("atomic")
3048  *  -Don't use host_reg_table for this code; instead use values directly
3049  *  -Use a separate timer to track activity and allow Target to sleep only
3050  *   if it hasn't done anything for a while; may even want to delay some
3051  *   processing for a short while in order to "batch" (e.g.) transmit
3052  *   requests with completion processing into "windows of up time".  Costs
3053  *   some performance, but improves power utilization.
3054  *  -On some platforms, it might be possible to eliminate explicit
3055  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3056  *   recover from the failure by forcing the Target awake.
3057  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3058  *   overhead in some cases. Perhaps this makes more sense when
3059  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3060  *   disabled.
3061  *  -It is possible to compile this code out and simply force the Target
3062  *   to remain awake.  That would yield optimal performance at the cost of
3063  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3064  *
3065  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3066  */
3067 /**
3068  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3069  * @scn: hif_softc pointer.
3070  * @sleep_ok: bool
3071  * @wait_for_it: bool
3072  *
3073  * Output the pipe error counts of each pipe to log file
3074  *
3075  * Return: int
3076  */
3077 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3078 			      bool sleep_ok, bool wait_for_it)
3079 {
3080 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3081 	A_target_id_t pci_addr = scn->mem;
3082 	static int max_delay;
3083 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3084 	static int debug;
3085 	if (scn->recovery)
3086 		return -EACCES;
3087 
3088 	if (qdf_atomic_read(&scn->link_suspended)) {
3089 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3090 		debug = true;
3091 		QDF_ASSERT(0);
3092 		return -EACCES;
3093 	}
3094 
3095 	if (debug) {
3096 		wait_for_it = true;
3097 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3098 				__func__);
3099 		QDF_ASSERT(0);
3100 	}
3101 
3102 	if (sleep_ok) {
3103 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3104 		hif_state->keep_awake_count--;
3105 		if (hif_state->keep_awake_count == 0) {
3106 			/* Allow sleep */
3107 			hif_state->verified_awake = false;
3108 			hif_state->sleep_ticks = qdf_system_ticks();
3109 		}
3110 		if (hif_state->fake_sleep == false) {
3111 			/* Set the Fake Sleep */
3112 			hif_state->fake_sleep = true;
3113 
3114 			/* Start the Sleep Timer */
3115 			qdf_timer_stop(&hif_state->sleep_timer);
3116 			qdf_timer_start(&hif_state->sleep_timer,
3117 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3118 		}
3119 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3120 	} else {
3121 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3122 
3123 		if (hif_state->fake_sleep) {
3124 			hif_state->verified_awake = true;
3125 		} else {
3126 			if (hif_state->keep_awake_count == 0) {
3127 				/* Force AWAKE */
3128 				hif_write32_mb(sc, pci_addr +
3129 					      PCIE_LOCAL_BASE_ADDRESS +
3130 					      PCIE_SOC_WAKE_ADDRESS,
3131 					      PCIE_SOC_WAKE_V_MASK);
3132 			}
3133 		}
3134 		hif_state->keep_awake_count++;
3135 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3136 
3137 		if (wait_for_it && !hif_state->verified_awake) {
3138 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3139 			int tot_delay = 0;
3140 			int curr_delay = 5;
3141 
3142 			for (;; ) {
3143 				if (hif_targ_is_awake(scn, pci_addr)) {
3144 					hif_state->verified_awake = true;
3145 					break;
3146 				}
3147 				if (!hif_pci_targ_is_present(scn, pci_addr))
3148 					break;
3149 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3150 					return hif_log_soc_wakeup_timeout(sc);
3151 
3152 				OS_DELAY(curr_delay);
3153 				tot_delay += curr_delay;
3154 
3155 				if (curr_delay < 50)
3156 					curr_delay += 5;
3157 			}
3158 
3159 			/*
3160 			 * NB: If Target has to come out of Deep Sleep,
3161 			 * this may take a few Msecs. Typically, though
3162 			 * this delay should be <30us.
3163 			 */
3164 			if (tot_delay > max_delay)
3165 				max_delay = tot_delay;
3166 		}
3167 	}
3168 
3169 	if (debug && hif_state->verified_awake) {
3170 		debug = 0;
3171 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3172 			__func__,
3173 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3174 				PCIE_INTR_ENABLE_ADDRESS),
3175 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3176 				PCIE_INTR_CAUSE_ADDRESS),
3177 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3178 				CPU_INTR_ADDRESS),
3179 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3180 				PCIE_INTR_CLR_ADDRESS),
3181 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3182 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3183 	}
3184 
3185 	return 0;
3186 }
3187 
3188 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3189 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3190 {
3191 	uint32_t value;
3192 	void *addr;
3193 
3194 	addr = scn->mem + offset;
3195 	value = hif_read32_mb(scn, addr);
3196 
3197 	{
3198 		unsigned long irq_flags;
3199 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3200 
3201 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3202 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3203 		pcie_access_log[idx].is_write = false;
3204 		pcie_access_log[idx].addr = addr;
3205 		pcie_access_log[idx].value = value;
3206 		pcie_access_log_seqnum++;
3207 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3208 	}
3209 
3210 	return value;
3211 }
3212 
3213 void
3214 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3215 {
3216 	void *addr;
3217 
3218 	addr = scn->mem + (offset);
3219 	hif_write32_mb(scn, addr, value);
3220 
3221 	{
3222 		unsigned long irq_flags;
3223 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3224 
3225 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3226 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3227 		pcie_access_log[idx].is_write = true;
3228 		pcie_access_log[idx].addr = addr;
3229 		pcie_access_log[idx].value = value;
3230 		pcie_access_log_seqnum++;
3231 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3232 	}
3233 }
3234 
3235 /**
3236  * hif_target_dump_access_log() - dump access log
3237  *
3238  * dump access log
3239  *
3240  * Return: n/a
3241  */
3242 void hif_target_dump_access_log(void)
3243 {
3244 	int idx, len, start_idx, cur_idx;
3245 	unsigned long irq_flags;
3246 
3247 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3248 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3249 		len = PCIE_ACCESS_LOG_NUM;
3250 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3251 	} else {
3252 		len = pcie_access_log_seqnum;
3253 		start_idx = 0;
3254 	}
3255 
3256 	for (idx = 0; idx < len; idx++) {
3257 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3258 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3259 		       __func__, idx,
3260 		       pcie_access_log[cur_idx].seqnum,
3261 		       pcie_access_log[cur_idx].is_write,
3262 		       pcie_access_log[cur_idx].addr,
3263 		       pcie_access_log[cur_idx].value);
3264 	}
3265 
3266 	pcie_access_log_seqnum = 0;
3267 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3268 }
3269 #endif
3270 
3271 #ifndef HIF_AHB
3272 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3273 {
3274 	QDF_BUG(0);
3275 	return -EINVAL;
3276 }
3277 
3278 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3279 {
3280 	QDF_BUG(0);
3281 	return -EINVAL;
3282 }
3283 #endif
3284 
3285 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3286 {
3287 	struct ce_tasklet_entry *tasklet_entry = context;
3288 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3289 }
3290 extern const char *ce_name[];
3291 
3292 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3293 {
3294 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3295 
3296 	return pci_scn->ce_msi_irq_num[ce_id];
3297 }
3298 
3299 /* hif_srng_msi_irq_disable() - disable the irq for msi
3300  * @hif_sc: hif context
3301  * @ce_id: which ce to disable copy complete interrupts for
3302  *
3303  * since MSI interrupts are not level based, the system can function
3304  * without disabling these interrupts.  Interrupt mitigation can be
3305  * added here for better system performance.
3306  */
3307 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3308 {
3309 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3310 }
3311 
3312 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3313 {
3314 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3315 }
3316 
3317 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3318 {
3319 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3320 }
3321 
3322 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3323 {
3324 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3325 }
3326 
3327 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3328 {
3329 	int ret;
3330 	int ce_id, irq;
3331 	uint32_t msi_data_start;
3332 	uint32_t msi_data_count;
3333 	uint32_t msi_irq_start;
3334 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3335 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3336 
3337 	/* do wake irq assignment */
3338 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3339 					  &msi_data_count, &msi_data_start,
3340 					  &msi_irq_start);
3341 	if (ret)
3342 		return ret;
3343 
3344 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3345 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3346 			  "wlan_wake_irq", scn);
3347 	if (ret)
3348 		return ret;
3349 
3350 	/* do ce irq assignments */
3351 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3352 					    &msi_data_count, &msi_data_start,
3353 					    &msi_irq_start);
3354 	if (ret)
3355 		goto free_wake_irq;
3356 
3357 	if (ce_srng_based(scn)) {
3358 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3359 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3360 	} else {
3361 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3362 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3363 	}
3364 
3365 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3366 
3367 	/* needs to match the ce_id -> irq data mapping
3368 	 * used in the srng parameter configuration
3369 	 */
3370 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3371 		unsigned int msi_data = (ce_id % msi_data_count) +
3372 			msi_irq_start;
3373 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3374 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3375 			 __func__, ce_id, msi_data, irq,
3376 			 &ce_sc->tasklets[ce_id]);
3377 
3378 		/* implies the ce is also initialized */
3379 		if (!ce_sc->tasklets[ce_id].inited)
3380 			continue;
3381 
3382 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3383 		ret = request_irq(irq, hif_ce_interrupt_handler,
3384 				  IRQF_SHARED,
3385 				  ce_name[ce_id],
3386 				  &ce_sc->tasklets[ce_id]);
3387 		if (ret)
3388 			goto free_irq;
3389 	}
3390 
3391 	return ret;
3392 
3393 free_irq:
3394 	/* the request_irq for the last ce_id failed so skip it. */
3395 	while (ce_id > 0 && ce_id < scn->ce_count) {
3396 		unsigned int msi_data;
3397 
3398 		ce_id--;
3399 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3400 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3401 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3402 	}
3403 
3404 free_wake_irq:
3405 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3406 	scn->wake_irq = 0;
3407 
3408 	return ret;
3409 }
3410 
3411 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3412 {
3413 	int i;
3414 
3415 	for (i = 0; i < hif_ext_group->numirq; i++)
3416 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3417 }
3418 
3419 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3420 {
3421 	int i;
3422 
3423 	for (i = 0; i < hif_ext_group->numirq; i++)
3424 		enable_irq(hif_ext_group->os_irq[i]);
3425 }
3426 
3427 /**
3428  * hif_pci_get_irq_name() - get irqname
3429  * This function gives irqnumber to irqname
3430  * mapping.
3431  *
3432  * @irq_no: irq number
3433  *
3434  * Return: irq name
3435  */
3436 const char *hif_pci_get_irq_name(int irq_no)
3437 {
3438 	return "pci-dummy";
3439 }
3440 
3441 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3442 			      struct hif_exec_context *hif_ext_group)
3443 {
3444 	int ret = 0;
3445 	int irq = 0;
3446 	int j;
3447 
3448 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3449 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3450 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3451 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3452 
3453 	for (j = 0; j < hif_ext_group->numirq; j++) {
3454 		irq = hif_ext_group->irq[j];
3455 
3456 		HIF_DBG("%s: request_irq = %d for grp %d",
3457 			  __func__, irq, hif_ext_group->grp_id);
3458 		ret = request_irq(irq,
3459 				  hif_ext_group_interrupt_handler,
3460 				  IRQF_SHARED, "wlan_EXT_GRP",
3461 				  hif_ext_group);
3462 		if (ret) {
3463 			HIF_ERROR("%s: request_irq failed ret = %d",
3464 				  __func__, ret);
3465 			return -EFAULT;
3466 		}
3467 		hif_ext_group->os_irq[j] = irq;
3468 	}
3469 	hif_ext_group->irq_requested = true;
3470 	return 0;
3471 }
3472 
3473 /**
3474  * hif_configure_irq() - configure interrupt
3475  *
3476  * This function configures interrupt(s)
3477  *
3478  * @sc: PCIe control struct
3479  * @hif_hdl: struct HIF_CE_state
3480  *
3481  * Return: 0 - for success
3482  */
3483 int hif_configure_irq(struct hif_softc *scn)
3484 {
3485 	int ret = 0;
3486 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3487 
3488 	HIF_TRACE("%s: E", __func__);
3489 
3490 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3491 		scn->request_irq_done = false;
3492 		return 0;
3493 	}
3494 
3495 	hif_init_reschedule_tasklet_work(sc);
3496 
3497 	ret = hif_ce_msi_configure_irq(scn);
3498 	if (ret == 0) {
3499 		goto end;
3500 	}
3501 
3502 	switch (scn->target_info.target_type) {
3503 	case TARGET_TYPE_IPQ4019:
3504 		ret = hif_ahb_configure_legacy_irq(sc);
3505 		break;
3506 	case TARGET_TYPE_QCA8074:
3507 	case TARGET_TYPE_QCA8074V2:
3508 	case TARGET_TYPE_QCA6018:
3509 		ret = hif_ahb_configure_irq(sc);
3510 		break;
3511 	default:
3512 		ret = hif_pci_configure_legacy_irq(sc);
3513 		break;
3514 	}
3515 	if (ret < 0) {
3516 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3517 			__func__, ret);
3518 		return ret;
3519 	}
3520 end:
3521 	scn->request_irq_done = true;
3522 	return 0;
3523 }
3524 
3525 /**
3526  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3527  * @scn: hif control structure
3528  *
3529  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3530  * stuck at a polling loop in pcie_address_config in FW
3531  *
3532  * Return: none
3533  */
3534 static void hif_trigger_timer_irq(struct hif_softc *scn)
3535 {
3536 	int tmp;
3537 	/* Trigger IRQ on Peregrine/Swift by setting
3538 	 * IRQ Bit of LF_TIMER 0
3539 	 */
3540 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3541 						SOC_LF_TIMER_STATUS0_ADDRESS));
3542 	/* Set Raw IRQ Bit */
3543 	tmp |= 1;
3544 	/* SOC_LF_TIMER_STATUS0 */
3545 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3546 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3547 }
3548 
3549 /**
3550  * hif_target_sync() : ensure the target is ready
3551  * @scn: hif control structure
3552  *
3553  * Informs fw that we plan to use legacy interupts so that
3554  * it can begin booting. Ensures that the fw finishes booting
3555  * before continuing. Should be called before trying to write
3556  * to the targets other registers for the first time.
3557  *
3558  * Return: none
3559  */
3560 static void hif_target_sync(struct hif_softc *scn)
3561 {
3562 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3563 			    PCIE_INTR_ENABLE_ADDRESS),
3564 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3565 	/* read to flush pcie write */
3566 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3567 			PCIE_INTR_ENABLE_ADDRESS));
3568 
3569 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3570 			PCIE_SOC_WAKE_ADDRESS,
3571 			PCIE_SOC_WAKE_V_MASK);
3572 	while (!hif_targ_is_awake(scn, scn->mem))
3573 		;
3574 
3575 	if (HAS_FW_INDICATOR) {
3576 		int wait_limit = 500;
3577 		int fw_ind = 0;
3578 		int retry_count = 0;
3579 		uint32_t target_type = scn->target_info.target_type;
3580 fw_retry:
3581 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3582 		while (1) {
3583 			fw_ind = hif_read32_mb(scn, scn->mem +
3584 					FW_INDICATOR_ADDRESS);
3585 			if (fw_ind & FW_IND_INITIALIZED)
3586 				break;
3587 			if (wait_limit-- < 0)
3588 				break;
3589 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3590 			    PCIE_INTR_ENABLE_ADDRESS),
3591 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3592 			    /* read to flush pcie write */
3593 			(void)hif_read32_mb(scn, scn->mem +
3594 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3595 
3596 			qdf_mdelay(10);
3597 		}
3598 		if (wait_limit < 0) {
3599 			if (target_type == TARGET_TYPE_AR9888 &&
3600 			    retry_count++ < 2) {
3601 				hif_trigger_timer_irq(scn);
3602 				wait_limit = 500;
3603 				goto fw_retry;
3604 			}
3605 			HIF_TRACE("%s: FW signal timed out",
3606 					__func__);
3607 			qdf_assert_always(0);
3608 		} else {
3609 			HIF_TRACE("%s: Got FW signal, retries = %x",
3610 					__func__, 500-wait_limit);
3611 		}
3612 	}
3613 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3614 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3615 }
3616 
3617 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3618 				     struct device *dev)
3619 {
3620 	struct pld_soc_info info;
3621 
3622 	pld_get_soc_info(dev, &info);
3623 	sc->mem = info.v_addr;
3624 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3625 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3626 }
3627 
3628 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3629 				       struct device *dev)
3630 {}
3631 
3632 static bool hif_is_pld_based_target(int device_id)
3633 {
3634 	switch (device_id) {
3635 	case QCA6290_DEVICE_ID:
3636 	case QCA6290_EMULATION_DEVICE_ID:
3637 #ifdef QCA_WIFI_QCA6390
3638 	case QCA6390_DEVICE_ID:
3639 #endif
3640 	case AR6320_DEVICE_ID:
3641 	case QCN7605_DEVICE_ID:
3642 		return true;
3643 	}
3644 	return false;
3645 }
3646 
3647 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3648 					   int device_id)
3649 {
3650 	if (hif_is_pld_based_target(device_id)) {
3651 		sc->hif_enable_pci = hif_enable_pci_pld;
3652 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3653 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3654 	} else {
3655 		sc->hif_enable_pci = hif_enable_pci_nopld;
3656 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3657 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3658 	}
3659 }
3660 
3661 #ifdef HIF_REG_WINDOW_SUPPORT
3662 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3663 					       u32 target_type)
3664 {
3665 	switch (target_type) {
3666 	case TARGET_TYPE_QCN7605:
3667 		sc->use_register_windowing = true;
3668 		qdf_spinlock_create(&sc->register_access_lock);
3669 		sc->register_window = 0;
3670 		break;
3671 	default:
3672 		sc->use_register_windowing = false;
3673 	}
3674 }
3675 #else
3676 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3677 					       u32 target_type)
3678 {
3679 	sc->use_register_windowing = false;
3680 }
3681 #endif
3682 
3683 /**
3684  * hif_enable_bus(): enable bus
3685  *
3686  * This function enables the bus
3687  *
3688  * @ol_sc: soft_sc struct
3689  * @dev: device pointer
3690  * @bdev: bus dev pointer
3691  * bid: bus id pointer
3692  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3693  * Return: QDF_STATUS
3694  */
3695 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3696 			  struct device *dev, void *bdev,
3697 			  const struct hif_bus_id *bid,
3698 			  enum hif_enable_type type)
3699 {
3700 	int ret = 0;
3701 	uint32_t hif_type, target_type;
3702 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3703 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3704 	uint16_t revision_id = 0;
3705 	int probe_again = 0;
3706 	struct pci_dev *pdev = bdev;
3707 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3708 	struct hif_target_info *tgt_info;
3709 
3710 	if (!ol_sc) {
3711 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3712 		return QDF_STATUS_E_NOMEM;
3713 	}
3714 
3715 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3716 		  __func__, hif_get_conparam(ol_sc), id->device);
3717 
3718 	sc->pdev = pdev;
3719 	sc->dev = &pdev->dev;
3720 	sc->devid = id->device;
3721 	sc->cacheline_sz = dma_get_cache_alignment();
3722 	tgt_info = hif_get_target_info_handle(hif_hdl);
3723 	hif_pci_init_deinit_ops_attach(sc, id->device);
3724 	sc->hif_pci_get_soc_info(sc, dev);
3725 again:
3726 	ret = sc->hif_enable_pci(sc, pdev, id);
3727 	if (ret < 0) {
3728 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3729 		       __func__, ret);
3730 		goto err_enable_pci;
3731 	}
3732 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3733 
3734 	/* Temporary FIX: disable ASPM on peregrine.
3735 	 * Will be removed after the OTP is programmed
3736 	 */
3737 	hif_disable_power_gating(hif_hdl);
3738 
3739 	device_disable_async_suspend(&pdev->dev);
3740 	pci_read_config_word(pdev, 0x08, &revision_id);
3741 
3742 	ret = hif_get_device_type(id->device, revision_id,
3743 						&hif_type, &target_type);
3744 	if (ret < 0) {
3745 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3746 		goto err_tgtstate;
3747 	}
3748 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3749 		  __func__, hif_type, target_type);
3750 
3751 	hif_register_tbl_attach(ol_sc, hif_type);
3752 	hif_target_register_tbl_attach(ol_sc, target_type);
3753 
3754 	hif_pci_init_reg_windowing_support(sc, target_type);
3755 
3756 	tgt_info->target_type = target_type;
3757 
3758 	if (ce_srng_based(ol_sc)) {
3759 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3760 	} else {
3761 		ret = hif_pci_probe_tgt_wakeup(sc);
3762 		if (ret < 0) {
3763 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3764 					__func__, ret);
3765 			if (ret == -EAGAIN)
3766 				probe_again++;
3767 			goto err_tgtstate;
3768 		}
3769 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3770 	}
3771 
3772 	if (!ol_sc->mem_pa) {
3773 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3774 		ret = -EIO;
3775 		goto err_tgtstate;
3776 	}
3777 
3778 	if (!ce_srng_based(ol_sc)) {
3779 		hif_target_sync(ol_sc);
3780 
3781 		if (ADRASTEA_BU)
3782 			hif_vote_link_up(hif_hdl);
3783 	}
3784 
3785 	return 0;
3786 
3787 err_tgtstate:
3788 	hif_disable_pci(sc);
3789 	sc->pci_enabled = false;
3790 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3791 	return QDF_STATUS_E_ABORTED;
3792 
3793 err_enable_pci:
3794 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3795 		int delay_time;
3796 
3797 		HIF_INFO("%s: pci reprobe", __func__);
3798 		/* 10, 40, 90, 100, 100, ... */
3799 		delay_time = max(100, 10 * (probe_again * probe_again));
3800 		qdf_mdelay(delay_time);
3801 		goto again;
3802 	}
3803 	return ret;
3804 }
3805 
3806 /**
3807  * hif_pci_irq_enable() - ce_irq_enable
3808  * @scn: hif_softc
3809  * @ce_id: ce_id
3810  *
3811  * Return: void
3812  */
3813 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3814 {
3815 	uint32_t tmp = 1 << ce_id;
3816 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3817 
3818 	qdf_spin_lock_irqsave(&sc->irq_lock);
3819 	scn->ce_irq_summary &= ~tmp;
3820 	if (scn->ce_irq_summary == 0) {
3821 		/* Enable Legacy PCI line interrupts */
3822 		if (LEGACY_INTERRUPTS(sc) &&
3823 			(scn->target_status != TARGET_STATUS_RESET) &&
3824 			(!qdf_atomic_read(&scn->link_suspended))) {
3825 
3826 			hif_write32_mb(scn, scn->mem +
3827 				(SOC_CORE_BASE_ADDRESS |
3828 				PCIE_INTR_ENABLE_ADDRESS),
3829 				HOST_GROUP0_MASK);
3830 
3831 			hif_read32_mb(scn, scn->mem +
3832 					(SOC_CORE_BASE_ADDRESS |
3833 					PCIE_INTR_ENABLE_ADDRESS));
3834 		}
3835 	}
3836 	if (scn->hif_init_done == true)
3837 		Q_TARGET_ACCESS_END(scn);
3838 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3839 
3840 	/* check for missed firmware crash */
3841 	hif_fw_interrupt_handler(0, scn);
3842 }
3843 
3844 /**
3845  * hif_pci_irq_disable() - ce_irq_disable
3846  * @scn: hif_softc
3847  * @ce_id: ce_id
3848  *
3849  * only applicable to legacy copy engine...
3850  *
3851  * Return: void
3852  */
3853 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3854 {
3855 	/* For Rome only need to wake up target */
3856 	/* target access is maintained until interrupts are re-enabled */
3857 	Q_TARGET_ACCESS_BEGIN(scn);
3858 }
3859 
3860 #ifdef FEATURE_RUNTIME_PM
3861 
3862 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
3863 {
3864 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3865 
3866 	if (!sc)
3867 		return;
3868 
3869 	sc->pm_stats.runtime_get++;
3870 	pm_runtime_get_noresume(sc->dev);
3871 }
3872 
3873 /**
3874  * hif_pm_runtime_get() - do a get opperation on the device
3875  *
3876  * A get opperation will prevent a runtime suspend until a
3877  * corresponding put is done.  This api should be used when sending
3878  * data.
3879  *
3880  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3881  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3882  *
3883  * return: success if the bus is up and a get has been issued
3884  *   otherwise an error code.
3885  */
3886 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
3887 {
3888 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3889 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3890 	int ret;
3891 	int pm_state;
3892 
3893 	if (!scn) {
3894 		HIF_ERROR("%s: Could not do runtime get, scn is null",
3895 				__func__);
3896 		return -EFAULT;
3897 	}
3898 
3899 	pm_state = qdf_atomic_read(&sc->pm_state);
3900 
3901 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
3902 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
3903 		sc->pm_stats.runtime_get++;
3904 		ret = __hif_pm_runtime_get(sc->dev);
3905 
3906 		/* Get can return 1 if the device is already active, just return
3907 		 * success in that case
3908 		 */
3909 		if (ret > 0)
3910 			ret = 0;
3911 
3912 		if (ret)
3913 			hif_pm_runtime_put(hif_ctx);
3914 
3915 		if (ret && ret != -EINPROGRESS) {
3916 			sc->pm_stats.runtime_get_err++;
3917 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
3918 				__func__, qdf_atomic_read(&sc->pm_state), ret);
3919 		}
3920 
3921 		return ret;
3922 	}
3923 
3924 	sc->pm_stats.request_resume++;
3925 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
3926 	ret = hif_pm_request_resume(sc->dev);
3927 
3928 	return -EAGAIN;
3929 }
3930 
3931 /**
3932  * hif_pm_runtime_put() - do a put opperation on the device
3933  *
3934  * A put opperation will allow a runtime suspend after a corresponding
3935  * get was done.  This api should be used when sending data.
3936  *
3937  * This api will return a failure if runtime pm is stopped
3938  * This api will return failure if it would decrement the usage count below 0.
3939  *
3940  * return: QDF_STATUS_SUCCESS if the put is performed
3941  */
3942 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
3943 {
3944 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3945 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3946 	int pm_state, usage_count;
3947 	char *error = NULL;
3948 
3949 	if (!scn) {
3950 		HIF_ERROR("%s: Could not do runtime put, scn is null",
3951 				__func__);
3952 		return -EFAULT;
3953 	}
3954 	usage_count = atomic_read(&sc->dev->power.usage_count);
3955 
3956 	if (usage_count == 1) {
3957 		pm_state = qdf_atomic_read(&sc->pm_state);
3958 
3959 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
3960 			error = "Ignoring unexpected put when runtime pm is disabled";
3961 
3962 	} else if (usage_count == 0) {
3963 		error = "PUT Without a Get Operation";
3964 	}
3965 
3966 	if (error) {
3967 		hif_pci_runtime_pm_warn(sc, error);
3968 		return -EINVAL;
3969 	}
3970 
3971 	sc->pm_stats.runtime_put++;
3972 
3973 	hif_pm_runtime_mark_last_busy(sc->dev);
3974 	hif_pm_runtime_put_auto(sc->dev);
3975 
3976 	return 0;
3977 }
3978 
3979 
3980 /**
3981  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
3982  *                                      reason
3983  * @hif_sc: pci context
3984  * @lock: runtime_pm lock being acquired
3985  *
3986  * Return 0 if successful.
3987  */
3988 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
3989 		*hif_sc, struct hif_pm_runtime_lock *lock)
3990 {
3991 	int ret = 0;
3992 
3993 	/*
3994 	 * We shouldn't be setting context->timeout to zero here when
3995 	 * context is active as we will have a case where Timeout API's
3996 	 * for the same context called back to back.
3997 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
3998 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
3999 	 * API to ensure the timeout version is no more active and
4000 	 * list entry of this context will be deleted during allow suspend.
4001 	 */
4002 	if (lock->active)
4003 		return 0;
4004 
4005 	ret = __hif_pm_runtime_get(hif_sc->dev);
4006 
4007 	/**
4008 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4009 	 * RPM_SUSPENDING. Any other negative value is an error.
4010 	 * We shouldn't be do runtime_put here as in later point allow
4011 	 * suspend gets called with the the context and there the usage count
4012 	 * is decremented, so suspend will be prevented.
4013 	 */
4014 
4015 	if (ret < 0 && ret != -EINPROGRESS) {
4016 		hif_sc->pm_stats.runtime_get_err++;
4017 		hif_pci_runtime_pm_warn(hif_sc,
4018 				"Prevent Suspend Runtime PM Error");
4019 	}
4020 
4021 	hif_sc->prevent_suspend_cnt++;
4022 
4023 	lock->active = true;
4024 
4025 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4026 
4027 	hif_sc->pm_stats.prevent_suspend++;
4028 
4029 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4030 		hif_pm_runtime_state_to_string(
4031 			qdf_atomic_read(&hif_sc->pm_state)),
4032 					ret);
4033 
4034 	return ret;
4035 }
4036 
4037 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4038 		struct hif_pm_runtime_lock *lock)
4039 {
4040 	int ret = 0;
4041 	int usage_count;
4042 
4043 	if (hif_sc->prevent_suspend_cnt == 0)
4044 		return ret;
4045 
4046 	if (!lock->active)
4047 		return ret;
4048 
4049 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4050 
4051 	/*
4052 	 * During Driver unload, platform driver increments the usage
4053 	 * count to prevent any runtime suspend getting called.
4054 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4055 	 * usage_count should be one. Ideally this shouldn't happen as
4056 	 * context->active should be active for allow suspend to happen
4057 	 * Handling this case here to prevent any failures.
4058 	 */
4059 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4060 				&& usage_count == 1) || usage_count == 0) {
4061 		hif_pci_runtime_pm_warn(hif_sc,
4062 				"Allow without a prevent suspend");
4063 		return -EINVAL;
4064 	}
4065 
4066 	list_del(&lock->list);
4067 
4068 	hif_sc->prevent_suspend_cnt--;
4069 
4070 	lock->active = false;
4071 	lock->timeout = 0;
4072 
4073 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4074 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4075 
4076 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4077 		hif_pm_runtime_state_to_string(
4078 			qdf_atomic_read(&hif_sc->pm_state)),
4079 					ret);
4080 
4081 	hif_sc->pm_stats.allow_suspend++;
4082 	return ret;
4083 }
4084 
4085 /**
4086  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4087  * @data: calback data that is the pci context
4088  *
4089  * if runtime locks are acquired with a timeout, this function releases
4090  * the locks when the last runtime lock expires.
4091  *
4092  * dummy implementation until lock acquisition is implemented.
4093  */
4094 static void hif_pm_runtime_lock_timeout_fn(void *data)
4095 {
4096 	struct hif_pci_softc *hif_sc = data;
4097 	unsigned long timer_expires;
4098 	struct hif_pm_runtime_lock *context, *temp;
4099 
4100 	spin_lock_bh(&hif_sc->runtime_lock);
4101 
4102 	timer_expires = hif_sc->runtime_timer_expires;
4103 
4104 	/* Make sure we are not called too early, this should take care of
4105 	 * following case
4106 	 *
4107 	 * CPU0                         CPU1 (timeout function)
4108 	 * ----                         ----------------------
4109 	 * spin_lock_irq
4110 	 *                              timeout function called
4111 	 *
4112 	 * mod_timer()
4113 	 *
4114 	 * spin_unlock_irq
4115 	 *                              spin_lock_irq
4116 	 */
4117 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4118 		hif_sc->runtime_timer_expires = 0;
4119 		list_for_each_entry_safe(context, temp,
4120 				&hif_sc->prevent_suspend_list, list) {
4121 			if (context->timeout) {
4122 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4123 				hif_sc->pm_stats.allow_suspend_timeout++;
4124 			}
4125 		}
4126 	}
4127 
4128 	spin_unlock_bh(&hif_sc->runtime_lock);
4129 }
4130 
4131 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4132 		struct hif_pm_runtime_lock *data)
4133 {
4134 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4135 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4136 	struct hif_pm_runtime_lock *context = data;
4137 
4138 	if (!sc->hif_config.enable_runtime_pm)
4139 		return 0;
4140 
4141 	if (!context)
4142 		return -EINVAL;
4143 
4144 	if (in_irq())
4145 		WARN_ON(1);
4146 
4147 	spin_lock_bh(&hif_sc->runtime_lock);
4148 	context->timeout = 0;
4149 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4150 	spin_unlock_bh(&hif_sc->runtime_lock);
4151 
4152 	return 0;
4153 }
4154 
4155 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4156 				struct hif_pm_runtime_lock *data)
4157 {
4158 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4159 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4160 	struct hif_pm_runtime_lock *context = data;
4161 
4162 	if (!sc->hif_config.enable_runtime_pm)
4163 		return 0;
4164 
4165 	if (!context)
4166 		return -EINVAL;
4167 
4168 	if (in_irq())
4169 		WARN_ON(1);
4170 
4171 	spin_lock_bh(&hif_sc->runtime_lock);
4172 
4173 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4174 
4175 	/* The list can be empty as well in cases where
4176 	 * we have one context in the list and the allow
4177 	 * suspend came before the timer expires and we delete
4178 	 * context above from the list.
4179 	 * When list is empty prevent_suspend count will be zero.
4180 	 */
4181 	if (hif_sc->prevent_suspend_cnt == 0 &&
4182 			hif_sc->runtime_timer_expires > 0) {
4183 		qdf_timer_free(&hif_sc->runtime_timer);
4184 		hif_sc->runtime_timer_expires = 0;
4185 	}
4186 
4187 	spin_unlock_bh(&hif_sc->runtime_lock);
4188 
4189 	return 0;
4190 }
4191 
4192 /**
4193  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4194  * @ol_sc: HIF context
4195  * @lock: which lock is being acquired
4196  * @delay: Timeout in milliseconds
4197  *
4198  * Prevent runtime suspend with a timeout after which runtime suspend would be
4199  * allowed. This API uses a single timer to allow the suspend and timer is
4200  * modified if the timeout is changed before timer fires.
4201  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4202  * of starting the timer.
4203  *
4204  * It is wise to try not to use this API and correct the design if possible.
4205  *
4206  * Return: 0 on success and negative error code on failure
4207  */
4208 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4209 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4210 {
4211 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4212 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4213 
4214 	int ret = 0;
4215 	unsigned long expires;
4216 	struct hif_pm_runtime_lock *context = lock;
4217 
4218 	if (hif_is_load_or_unload_in_progress(sc)) {
4219 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4220 				__func__);
4221 		return -EINVAL;
4222 	}
4223 
4224 	if (hif_is_recovery_in_progress(sc)) {
4225 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4226 		return -EINVAL;
4227 	}
4228 
4229 	if (!sc->hif_config.enable_runtime_pm)
4230 		return 0;
4231 
4232 	if (!context)
4233 		return -EINVAL;
4234 
4235 	if (in_irq())
4236 		WARN_ON(1);
4237 
4238 	/*
4239 	 * Don't use internal timer if the timeout is less than auto suspend
4240 	 * delay.
4241 	 */
4242 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4243 		hif_pm_request_resume(hif_sc->dev);
4244 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4245 		return ret;
4246 	}
4247 
4248 	expires = jiffies + msecs_to_jiffies(delay);
4249 	expires += !expires;
4250 
4251 	spin_lock_bh(&hif_sc->runtime_lock);
4252 
4253 	context->timeout = delay;
4254 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4255 	hif_sc->pm_stats.prevent_suspend_timeout++;
4256 
4257 	/* Modify the timer only if new timeout is after already configured
4258 	 * timeout
4259 	 */
4260 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4261 		qdf_timer_mod(&hif_sc->runtime_timer, delay);
4262 		hif_sc->runtime_timer_expires = expires;
4263 	}
4264 
4265 	spin_unlock_bh(&hif_sc->runtime_lock);
4266 
4267 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4268 		hif_pm_runtime_state_to_string(
4269 			qdf_atomic_read(&hif_sc->pm_state)),
4270 					delay, ret);
4271 
4272 	return ret;
4273 }
4274 
4275 /**
4276  * hif_runtime_lock_init() - API to initialize Runtime PM context
4277  * @name: Context name
4278  *
4279  * This API initializes the Runtime PM context of the caller and
4280  * return the pointer.
4281  *
4282  * Return: None
4283  */
4284 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4285 {
4286 	struct hif_pm_runtime_lock *context;
4287 
4288 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4289 
4290 	context = qdf_mem_malloc(sizeof(*context));
4291 	if (!context)
4292 		return -ENOMEM;
4293 
4294 	context->name = name ? name : "Default";
4295 	lock->lock = context;
4296 
4297 	return 0;
4298 }
4299 
4300 /**
4301  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4302  * @data: Runtime PM context
4303  *
4304  * Return: void
4305  */
4306 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4307 			     struct hif_pm_runtime_lock *data)
4308 {
4309 	struct hif_pm_runtime_lock *context = data;
4310 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4311 
4312 	if (!context) {
4313 		HIF_ERROR("Runtime PM wakelock context is NULL");
4314 		return;
4315 	}
4316 
4317 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4318 
4319 	/*
4320 	 * Ensure to delete the context list entry and reduce the usage count
4321 	 * before freeing the context if context is active.
4322 	 */
4323 	if (sc) {
4324 		spin_lock_bh(&sc->runtime_lock);
4325 		__hif_pm_runtime_allow_suspend(sc, context);
4326 		spin_unlock_bh(&sc->runtime_lock);
4327 	}
4328 
4329 	qdf_mem_free(context);
4330 }
4331 #endif /* FEATURE_RUNTIME_PM */
4332 
4333 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4334 {
4335 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4336 
4337 	/* legacy case only has one irq */
4338 	return pci_scn->irq;
4339 }
4340 
4341 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4342 {
4343 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4344 	struct hif_target_info *tgt_info;
4345 
4346 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4347 
4348 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4349 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
4350 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4351 		/*
4352 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4353 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4354 		 * well initialized/defined.
4355 		 */
4356 		return 0;
4357 	}
4358 
4359 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4360 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4361 		return 0;
4362 	}
4363 
4364 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
4365 		  offset, (uint32_t)(offset + sizeof(unsigned int)),
4366 		  sc->mem_len);
4367 
4368 	return -EINVAL;
4369 }
4370 
4371 /**
4372  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4373  * @scn: hif context
4374  *
4375  * Return: true if soc needs driver bmi otherwise false
4376  */
4377 bool hif_pci_needs_bmi(struct hif_softc *scn)
4378 {
4379 	return !ce_srng_based(scn);
4380 }
4381