xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 1f55ed1a9f5050d8da228aa8dd3fff7c0242aa71)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 #ifdef CONFIG_WIN
66 extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
67 #endif
68 
69 /*
70  * Top-level interrupt handler for all PCI interrupts from a Target.
71  * When a block of MSI interrupts is allocated, this top-level handler
72  * is not used; instead, we directly call the correct sub-handler.
73  */
74 struct ce_irq_reg_table {
75 	uint32_t irq_enable;
76 	uint32_t irq_status;
77 };
78 
79 #ifndef QCA_WIFI_3_0_ADRASTEA
80 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 }
83 #else
84 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
87 	unsigned int target_enable0, target_enable1;
88 	unsigned int target_cause0, target_cause1;
89 
90 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
91 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
92 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
93 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
94 
95 	if ((target_enable0 & target_cause0) ||
96 	    (target_enable1 & target_cause1)) {
97 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
98 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
99 
100 		if (scn->notice_send)
101 			pld_intr_notify_q6(sc->dev);
102 	}
103 }
104 #endif
105 
106 
107 /**
108  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
109  * @scn: scn
110  *
111  * Return: N/A
112  */
113 static void pci_dispatch_interrupt(struct hif_softc *scn)
114 {
115 	uint32_t intr_summary;
116 	int id;
117 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
118 
119 	if (scn->hif_init_done != true)
120 		return;
121 
122 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
123 		return;
124 
125 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
126 
127 	if (intr_summary == 0) {
128 		if ((scn->target_status != TARGET_STATUS_RESET) &&
129 			(!qdf_atomic_read(&scn->link_suspended))) {
130 
131 			hif_write32_mb(scn, scn->mem +
132 				(SOC_CORE_BASE_ADDRESS |
133 				PCIE_INTR_ENABLE_ADDRESS),
134 				HOST_GROUP0_MASK);
135 
136 			hif_read32_mb(scn, scn->mem +
137 					(SOC_CORE_BASE_ADDRESS |
138 					PCIE_INTR_ENABLE_ADDRESS));
139 		}
140 		Q_TARGET_ACCESS_END(scn);
141 		return;
142 	}
143 	Q_TARGET_ACCESS_END(scn);
144 
145 	scn->ce_irq_summary = intr_summary;
146 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
147 		if (intr_summary & (1 << id)) {
148 			intr_summary &= ~(1 << id);
149 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
150 		}
151 	}
152 }
153 
154 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
155 {
156 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
157 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
158 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
159 
160 	volatile int tmp;
161 	uint16_t val = 0;
162 	uint32_t bar0 = 0;
163 	uint32_t fw_indicator_address, fw_indicator;
164 	bool ssr_irq = false;
165 	unsigned int host_cause, host_enable;
166 
167 	if (LEGACY_INTERRUPTS(sc)) {
168 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
169 			return IRQ_HANDLED;
170 
171 		if (ADRASTEA_BU) {
172 			host_enable = hif_read32_mb(sc, sc->mem +
173 						    PCIE_INTR_ENABLE_ADDRESS);
174 			host_cause = hif_read32_mb(sc, sc->mem +
175 						   PCIE_INTR_CAUSE_ADDRESS);
176 			if (!(host_enable & host_cause)) {
177 				hif_pci_route_adrastea_interrupt(sc);
178 				return IRQ_HANDLED;
179 			}
180 		}
181 
182 		/* Clear Legacy PCI line interrupts
183 		 * IMPORTANT: INTR_CLR regiser has to be set
184 		 * after INTR_ENABLE is set to 0,
185 		 * otherwise interrupt can not be really cleared
186 		 */
187 		hif_write32_mb(sc, sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS |
189 			       PCIE_INTR_ENABLE_ADDRESS), 0);
190 
191 		hif_write32_mb(sc, sc->mem +
192 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
193 			       ADRASTEA_BU ?
194 			       (host_enable & host_cause) :
195 			      HOST_GROUP0_MASK);
196 
197 		if (ADRASTEA_BU)
198 			hif_write32_mb(sc, sc->mem + 0x2f100c,
199 				       (host_cause >> 1));
200 
201 		/* IMPORTANT: this extra read transaction is required to
202 		 * flush the posted write buffer
203 		 */
204 		if (!ADRASTEA_BU) {
205 		tmp =
206 			hif_read32_mb(sc, sc->mem +
207 				     (SOC_CORE_BASE_ADDRESS |
208 				      PCIE_INTR_ENABLE_ADDRESS));
209 
210 		if (tmp == 0xdeadbeef) {
211 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
212 			       __func__);
213 
214 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
215 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
216 			       __func__, val);
217 
218 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
219 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
220 			       __func__, val);
221 
222 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
223 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
224 			       val);
225 
226 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
227 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
228 			       val);
229 
230 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
231 					      &bar0);
232 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
233 			       bar0);
234 
235 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
236 				  __func__,
237 				  hif_read32_mb(sc, sc->mem +
238 						PCIE_LOCAL_BASE_ADDRESS
239 						+ RTC_STATE_ADDRESS));
240 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
241 				  __func__,
242 				  hif_read32_mb(sc, sc->mem +
243 						PCIE_LOCAL_BASE_ADDRESS
244 						+ PCIE_SOC_WAKE_ADDRESS));
245 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
246 				  __func__,
247 				  hif_read32_mb(sc, sc->mem + 0x80008),
248 				  hif_read32_mb(sc, sc->mem + 0x8000c));
249 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
250 				  __func__,
251 				  hif_read32_mb(sc, sc->mem + 0x80010),
252 				  hif_read32_mb(sc, sc->mem + 0x80014));
253 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
254 				  __func__,
255 				  hif_read32_mb(sc, sc->mem + 0x80018),
256 				  hif_read32_mb(sc, sc->mem + 0x8001c));
257 			QDF_BUG(0);
258 		}
259 
260 		PCI_CLR_CAUSE0_REGISTER(sc);
261 		}
262 
263 		if (HAS_FW_INDICATOR) {
264 			fw_indicator_address = hif_state->fw_indicator_address;
265 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
266 			if ((fw_indicator != ~0) &&
267 			   (fw_indicator & FW_IND_EVENT_PENDING))
268 				ssr_irq = true;
269 		}
270 
271 		if (Q_TARGET_ACCESS_END(scn) < 0)
272 			return IRQ_HANDLED;
273 	}
274 	/* TBDXXX: Add support for WMAC */
275 
276 	if (ssr_irq) {
277 		sc->irq_event = irq;
278 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
279 
280 		qdf_atomic_inc(&scn->active_tasklet_cnt);
281 		tasklet_schedule(&sc->intr_tq);
282 	} else {
283 		pci_dispatch_interrupt(scn);
284 	}
285 
286 	return IRQ_HANDLED;
287 }
288 
289 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
290 {
291 	return 1;               /* FIX THIS */
292 }
293 
294 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
295 {
296 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
297 	int i = 0;
298 
299 	if (!irq || !size) {
300 		return -EINVAL;
301 	}
302 
303 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
304 		irq[0] = sc->irq;
305 		return 1;
306 	}
307 
308 	if (sc->num_msi_intrs > size) {
309 		qdf_print("Not enough space in irq buffer to return irqs");
310 		return -EINVAL;
311 	}
312 
313 	for (i = 0; i < sc->num_msi_intrs; i++) {
314 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
315 	}
316 
317 	return sc->num_msi_intrs;
318 }
319 
320 
321 /**
322  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
323  * @scn: hif_softc
324  *
325  * Return: void
326  */
327 #if CONFIG_ATH_PCIE_MAX_PERF == 0
328 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
329 {
330 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
331 	A_target_id_t pci_addr = scn->mem;
332 
333 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
334 	/*
335 	 * If the deferred sleep timer is running cancel it
336 	 * and put the soc into sleep.
337 	 */
338 	if (hif_state->fake_sleep == true) {
339 		qdf_timer_stop(&hif_state->sleep_timer);
340 		if (hif_state->verified_awake == false) {
341 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
342 				      PCIE_SOC_WAKE_ADDRESS,
343 				      PCIE_SOC_WAKE_RESET);
344 		}
345 		hif_state->fake_sleep = false;
346 	}
347 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
348 }
349 #else
350 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
351 {
352 }
353 #endif
354 
355 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
356 	hif_read32_mb(sc, (char *)(mem) + \
357 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
358 
359 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
360 	hif_write32_mb(sc, ((char *)(mem) + \
361 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
362 
363 #ifdef QCA_WIFI_3_0
364 /**
365  * hif_targ_is_awake() - check to see if the target is awake
366  * @hif_ctx: hif context
367  *
368  * emulation never goes to sleep
369  *
370  * Return: true if target is awake
371  */
372 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
373 {
374 	return true;
375 }
376 #else
377 /**
378  * hif_targ_is_awake() - check to see if the target is awake
379  * @hif_ctx: hif context
380  *
381  * Return: true if the targets clocks are on
382  */
383 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
384 {
385 	uint32_t val;
386 
387 	if (scn->recovery)
388 		return false;
389 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
390 		+ RTC_STATE_ADDRESS);
391 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
392 }
393 #endif
394 
395 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
396 static void hif_pci_device_reset(struct hif_pci_softc *sc)
397 {
398 	void __iomem *mem = sc->mem;
399 	int i;
400 	uint32_t val;
401 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
402 
403 	if (!scn->hostdef)
404 		return;
405 
406 	/* NB: Don't check resetok here.  This form of reset
407 	 * is integral to correct operation.
408 	 */
409 
410 	if (!SOC_GLOBAL_RESET_ADDRESS)
411 		return;
412 
413 	if (!mem)
414 		return;
415 
416 	HIF_ERROR("%s: Reset Device", __func__);
417 
418 	/*
419 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
420 	 * writing WAKE_V, the Target may scribble over Host memory!
421 	 */
422 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
423 			       PCIE_SOC_WAKE_V_MASK);
424 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
425 		if (hif_targ_is_awake(scn, mem))
426 			break;
427 
428 		qdf_mdelay(1);
429 	}
430 
431 	/* Put Target, including PCIe, into RESET. */
432 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
433 	val |= 1;
434 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
435 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
436 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
437 		    RTC_STATE_COLD_RESET_MASK)
438 			break;
439 
440 		qdf_mdelay(1);
441 	}
442 
443 	/* Pull Target, including PCIe, out of RESET. */
444 	val &= ~1;
445 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
446 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
447 		if (!
448 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
449 		     RTC_STATE_COLD_RESET_MASK))
450 			break;
451 
452 		qdf_mdelay(1);
453 	}
454 
455 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
456 			       PCIE_SOC_WAKE_RESET);
457 }
458 
459 /* CPU warm reset function
460  * Steps:
461  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
462  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
463  *    correctly on WARM reset
464  * 3. Clear TARGET CPU LF timer interrupt
465  * 4. Reset all CEs to clear any pending CE tarnsactions
466  * 5. Warm reset CPU
467  */
468 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
469 {
470 	void __iomem *mem = sc->mem;
471 	int i;
472 	uint32_t val;
473 	uint32_t fw_indicator;
474 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
475 
476 	/* NB: Don't check resetok here.  This form of reset is
477 	 * integral to correct operation.
478 	 */
479 
480 	if (!mem)
481 		return;
482 
483 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
484 
485 	/*
486 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
487 	 * writing WAKE_V, the Target may scribble over Host memory!
488 	 */
489 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
490 			       PCIE_SOC_WAKE_V_MASK);
491 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
492 		if (hif_targ_is_awake(scn, mem))
493 			break;
494 		qdf_mdelay(1);
495 	}
496 
497 	/*
498 	 * Disable Pending interrupts
499 	 */
500 	val =
501 		hif_read32_mb(sc, mem +
502 			     (SOC_CORE_BASE_ADDRESS |
503 			      PCIE_INTR_CAUSE_ADDRESS));
504 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
505 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
506 	/* Target CPU Intr Cause */
507 	val = hif_read32_mb(sc, mem +
508 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
509 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
510 
511 	val =
512 		hif_read32_mb(sc, mem +
513 			     (SOC_CORE_BASE_ADDRESS |
514 			      PCIE_INTR_ENABLE_ADDRESS));
515 	hif_write32_mb(sc, (mem +
516 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
517 	hif_write32_mb(sc, (mem +
518 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
519 		       HOST_GROUP0_MASK);
520 
521 	qdf_mdelay(100);
522 
523 	/* Clear FW_INDICATOR_ADDRESS */
524 	if (HAS_FW_INDICATOR) {
525 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
526 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
527 	}
528 
529 	/* Clear Target LF Timer interrupts */
530 	val =
531 		hif_read32_mb(sc, mem +
532 			     (RTC_SOC_BASE_ADDRESS +
533 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
534 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
535 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
536 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
537 	hif_write32_mb(sc, mem +
538 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
539 		      val);
540 
541 	/* Reset CE */
542 	val =
543 		hif_read32_mb(sc, mem +
544 			     (RTC_SOC_BASE_ADDRESS |
545 			      SOC_RESET_CONTROL_ADDRESS));
546 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
547 	hif_write32_mb(sc, (mem +
548 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
549 		      val);
550 	val =
551 		hif_read32_mb(sc, mem +
552 			     (RTC_SOC_BASE_ADDRESS |
553 			      SOC_RESET_CONTROL_ADDRESS));
554 	qdf_mdelay(10);
555 
556 	/* CE unreset */
557 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
558 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
559 		       SOC_RESET_CONTROL_ADDRESS), val);
560 	val =
561 		hif_read32_mb(sc, mem +
562 			     (RTC_SOC_BASE_ADDRESS |
563 			      SOC_RESET_CONTROL_ADDRESS));
564 	qdf_mdelay(10);
565 
566 	/* Read Target CPU Intr Cause */
567 	val = hif_read32_mb(sc, mem +
568 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
569 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
570 		    __func__, val);
571 
572 	/* CPU warm RESET */
573 	val =
574 		hif_read32_mb(sc, mem +
575 			     (RTC_SOC_BASE_ADDRESS |
576 			      SOC_RESET_CONTROL_ADDRESS));
577 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
578 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
579 		       SOC_RESET_CONTROL_ADDRESS), val);
580 	val =
581 		hif_read32_mb(sc, mem +
582 			     (RTC_SOC_BASE_ADDRESS |
583 			      SOC_RESET_CONTROL_ADDRESS));
584 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
585 		    __func__, val);
586 
587 	qdf_mdelay(100);
588 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
589 
590 }
591 
592 #ifndef QCA_WIFI_3_0
593 /* only applicable to legacy ce */
594 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
595 {
596 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
597 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
598 	void __iomem *mem = sc->mem;
599 	uint32_t val;
600 
601 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
602 		return ATH_ISR_NOSCHED;
603 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
604 	if (Q_TARGET_ACCESS_END(scn) < 0)
605 		return ATH_ISR_SCHED;
606 
607 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
608 
609 	if (val & FW_IND_HELPER)
610 		return 0;
611 
612 	return 1;
613 }
614 #endif
615 
616 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
617 {
618 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
619 	uint16_t device_id = 0;
620 	uint32_t val;
621 	uint16_t timeout_count = 0;
622 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
623 
624 	/* Check device ID from PCIe configuration space for link status */
625 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
626 	if (device_id != sc->devid) {
627 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
628 			  __func__, device_id, sc->devid);
629 		return -EACCES;
630 	}
631 
632 	/* Check PCIe local register for bar/memory access */
633 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
634 			   RTC_STATE_ADDRESS);
635 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
636 
637 	/* Try to wake up taget if it sleeps */
638 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
639 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
640 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
641 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
642 		PCIE_SOC_WAKE_ADDRESS));
643 
644 	/* Check if taget can be woken up */
645 	while (!hif_targ_is_awake(scn, sc->mem)) {
646 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
647 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
648 				__func__,
649 				hif_read32_mb(sc, sc->mem +
650 					     PCIE_LOCAL_BASE_ADDRESS +
651 					     RTC_STATE_ADDRESS),
652 				hif_read32_mb(sc, sc->mem +
653 					     PCIE_LOCAL_BASE_ADDRESS +
654 					PCIE_SOC_WAKE_ADDRESS));
655 			return -EACCES;
656 		}
657 
658 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
659 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
660 
661 		qdf_mdelay(100);
662 		timeout_count += 100;
663 	}
664 
665 	/* Check Power register for SoC internal bus issues */
666 	val =
667 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
668 			     SOC_POWER_REG_OFFSET);
669 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
670 
671 	return 0;
672 }
673 
674 /**
675  * __hif_pci_dump_registers(): dump other PCI debug registers
676  * @scn: struct hif_softc
677  *
678  * This function dumps pci debug registers.  The parrent function
679  * dumps the copy engine registers before calling this function.
680  *
681  * Return: void
682  */
683 static void __hif_pci_dump_registers(struct hif_softc *scn)
684 {
685 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
686 	void __iomem *mem = sc->mem;
687 	uint32_t val, i, j;
688 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
689 	uint32_t ce_base;
690 
691 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
692 		return;
693 
694 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
695 	val =
696 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
697 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
698 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
699 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
700 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
701 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
702 
703 	/* DEBUG_CONTROL_ENABLE = 0x1 */
704 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
705 			   WLAN_DEBUG_CONTROL_OFFSET);
706 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
707 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
708 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
709 		      WLAN_DEBUG_CONTROL_OFFSET, val);
710 
711 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
712 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
713 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
714 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
715 			    WLAN_DEBUG_CONTROL_OFFSET));
716 
717 	HIF_INFO_MED("%s: Debug CE", __func__);
718 	/* Loop CE debug output */
719 	/* AMBA_DEBUG_BUS_SEL = 0xc */
720 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
721 			    AMBA_DEBUG_BUS_OFFSET);
722 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
723 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
724 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
725 		       val);
726 
727 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
728 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
729 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
730 				   CE_WRAPPER_DEBUG_OFFSET);
731 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
732 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
733 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
734 			      CE_WRAPPER_DEBUG_OFFSET, val);
735 
736 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
737 			    __func__, wrapper_idx[i],
738 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
739 				AMBA_DEBUG_BUS_OFFSET),
740 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
741 				CE_WRAPPER_DEBUG_OFFSET));
742 
743 		if (wrapper_idx[i] <= 7) {
744 			for (j = 0; j <= 5; j++) {
745 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
746 				/* For (j=0~5) write CE_DEBUG_SEL = j */
747 				val =
748 					hif_read32_mb(sc, mem + ce_base +
749 						     CE_DEBUG_OFFSET);
750 				val &= ~CE_DEBUG_SEL_MASK;
751 				val |= CE_DEBUG_SEL_SET(j);
752 				hif_write32_mb(sc, mem + ce_base +
753 					       CE_DEBUG_OFFSET, val);
754 
755 				/* read (@gpio_athr_wlan_reg)
756 				 * WLAN_DEBUG_OUT_DATA
757 				 */
758 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
759 						    + WLAN_DEBUG_OUT_OFFSET);
760 				val = WLAN_DEBUG_OUT_DATA_GET(val);
761 
762 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
763 					    __func__, j,
764 					    hif_read32_mb(sc, mem + ce_base +
765 						    CE_DEBUG_OFFSET), val);
766 			}
767 		} else {
768 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
769 			val =
770 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
771 					     WLAN_DEBUG_OUT_OFFSET);
772 			val = WLAN_DEBUG_OUT_DATA_GET(val);
773 
774 			HIF_INFO_MED("%s: out: %x", __func__, val);
775 		}
776 	}
777 
778 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
779 	/* Loop PCIe debug output */
780 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
781 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
782 			    AMBA_DEBUG_BUS_OFFSET);
783 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
784 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
785 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
786 		       AMBA_DEBUG_BUS_OFFSET, val);
787 
788 	for (i = 0; i <= 8; i++) {
789 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
790 		val =
791 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
792 				     AMBA_DEBUG_BUS_OFFSET);
793 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
794 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
795 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
796 			       AMBA_DEBUG_BUS_OFFSET, val);
797 
798 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
799 		val =
800 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
801 				     WLAN_DEBUG_OUT_OFFSET);
802 		val = WLAN_DEBUG_OUT_DATA_GET(val);
803 
804 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
805 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
806 				    WLAN_DEBUG_OUT_OFFSET), val,
807 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
808 				    WLAN_DEBUG_OUT_OFFSET));
809 	}
810 
811 	Q_TARGET_ACCESS_END(scn);
812 }
813 
814 /**
815  * hif_dump_registers(): dump bus debug registers
816  * @scn: struct hif_opaque_softc
817  *
818  * This function dumps hif bus debug registers
819  *
820  * Return: 0 for success or error code
821  */
822 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
823 {
824 	int status;
825 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
826 
827 	status = hif_dump_ce_registers(scn);
828 
829 	if (status)
830 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
831 
832 	/* dump non copy engine pci registers */
833 	__hif_pci_dump_registers(scn);
834 
835 	return 0;
836 }
837 
838 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
839 
840 /* worker thread to schedule wlan_tasklet in SLUB debug build */
841 static void reschedule_tasklet_work_handler(void *arg)
842 {
843 	struct hif_pci_softc *sc = arg;
844 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
845 
846 	if (!scn) {
847 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
848 		return;
849 	}
850 
851 	if (scn->hif_init_done == false) {
852 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
853 		return;
854 	}
855 
856 	tasklet_schedule(&sc->intr_tq);
857 }
858 
859 /**
860  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
861  * work
862  * @sc: HIF PCI Context
863  *
864  * Return: void
865  */
866 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
867 {
868 	qdf_create_work(0, &sc->reschedule_tasklet_work,
869 				reschedule_tasklet_work_handler, NULL);
870 }
871 #else
872 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
873 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
874 
875 void wlan_tasklet(unsigned long data)
876 {
877 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
878 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
879 
880 	if (scn->hif_init_done == false)
881 		goto end;
882 
883 	if (qdf_atomic_read(&scn->link_suspended))
884 		goto end;
885 
886 	if (!ADRASTEA_BU) {
887 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
888 		if (scn->target_status == TARGET_STATUS_RESET)
889 			goto end;
890 	}
891 
892 end:
893 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
894 	qdf_atomic_dec(&scn->active_tasklet_cnt);
895 }
896 
897 #ifdef FEATURE_RUNTIME_PM
898 static const char *hif_pm_runtime_state_to_string(uint32_t state)
899 {
900 	switch (state) {
901 	case HIF_PM_RUNTIME_STATE_NONE:
902 		return "INIT_STATE";
903 	case HIF_PM_RUNTIME_STATE_ON:
904 		return "ON";
905 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
906 		return "INPROGRESS";
907 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
908 		return "SUSPENDED";
909 	default:
910 		return "INVALID STATE";
911 	}
912 }
913 
914 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
915 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
916 /**
917  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
918  * @sc: hif_pci_softc context
919  * @msg: log message
920  *
921  * log runtime pm stats when something seems off.
922  *
923  * Return: void
924  */
925 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
926 {
927 	struct hif_pm_runtime_lock *ctx;
928 
929 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
930 			msg, atomic_read(&sc->dev->power.usage_count),
931 			hif_pm_runtime_state_to_string(
932 					atomic_read(&sc->pm_state)),
933 			sc->prevent_suspend_cnt);
934 
935 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
936 			sc->dev->power.runtime_status,
937 			sc->dev->power.runtime_error,
938 			sc->dev->power.disable_depth,
939 			sc->dev->power.autosuspend_delay);
940 
941 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
942 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
943 			sc->pm_stats.request_resume);
944 
945 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
946 			sc->pm_stats.allow_suspend,
947 			sc->pm_stats.prevent_suspend);
948 
949 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
950 			sc->pm_stats.prevent_suspend_timeout,
951 			sc->pm_stats.allow_suspend_timeout);
952 
953 	HIF_ERROR("Suspended: %u, resumed: %u count",
954 			sc->pm_stats.suspended,
955 			sc->pm_stats.resumed);
956 
957 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
958 			sc->pm_stats.suspend_err,
959 			sc->pm_stats.runtime_get_err);
960 
961 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
962 
963 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
964 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
965 	}
966 
967 	WARN_ON(1);
968 }
969 
970 /**
971  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
972  * @s: file to print to
973  * @data: unused
974  *
975  * debugging tool added to the debug fs for displaying runtimepm stats
976  *
977  * Return: 0
978  */
979 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
980 {
981 	struct hif_pci_softc *sc = s->private;
982 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
983 		"SUSPENDED"};
984 	unsigned int msecs_age;
985 	int pm_state = atomic_read(&sc->pm_state);
986 	unsigned long timer_expires;
987 	struct hif_pm_runtime_lock *ctx;
988 
989 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
990 			autopm_state[pm_state]);
991 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
992 			sc->pm_stats.last_resume_caller);
993 
994 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
995 		msecs_age = jiffies_to_msecs(
996 				jiffies - sc->pm_stats.suspend_jiffies);
997 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
998 				msecs_age / 1000, msecs_age % 1000);
999 	}
1000 
1001 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1002 			atomic_read(&sc->dev->power.usage_count));
1003 
1004 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1005 			sc->prevent_suspend_cnt);
1006 
1007 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1008 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1009 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1010 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1011 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1012 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1013 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1014 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1015 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1016 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1017 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1018 
1019 	timer_expires = sc->runtime_timer_expires;
1020 	if (timer_expires > 0) {
1021 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1022 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1023 				msecs_age / 1000, msecs_age % 1000);
1024 	}
1025 
1026 	spin_lock_bh(&sc->runtime_lock);
1027 	if (list_empty(&sc->prevent_suspend_list)) {
1028 		spin_unlock_bh(&sc->runtime_lock);
1029 		return 0;
1030 	}
1031 
1032 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1033 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1034 		seq_printf(s, "%s", ctx->name);
1035 		if (ctx->timeout)
1036 			seq_printf(s, "(%d ms)", ctx->timeout);
1037 		seq_puts(s, " ");
1038 	}
1039 	seq_puts(s, "\n");
1040 	spin_unlock_bh(&sc->runtime_lock);
1041 
1042 	return 0;
1043 }
1044 #undef HIF_PCI_RUNTIME_PM_STATS
1045 
1046 /**
1047  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1048  * @inode
1049  * @file
1050  *
1051  * Return: linux error code of single_open.
1052  */
1053 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1054 {
1055 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1056 			inode->i_private);
1057 }
1058 
1059 static const struct file_operations hif_pci_runtime_pm_fops = {
1060 	.owner          = THIS_MODULE,
1061 	.open           = hif_pci_runtime_pm_open,
1062 	.release        = single_release,
1063 	.read           = seq_read,
1064 	.llseek         = seq_lseek,
1065 };
1066 
1067 /**
1068  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1069  * @sc: pci context
1070  *
1071  * creates a debugfs entry to debug the runtime pm feature.
1072  */
1073 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1074 {
1075 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1076 					0400, NULL, sc,
1077 					&hif_pci_runtime_pm_fops);
1078 }
1079 
1080 /**
1081  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1082  * @sc: pci context
1083  *
1084  * removes the debugfs entry to debug the runtime pm feature.
1085  */
1086 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1087 {
1088 	debugfs_remove(sc->pm_dentry);
1089 }
1090 
1091 static void hif_runtime_init(struct device *dev, int delay)
1092 {
1093 	pm_runtime_set_autosuspend_delay(dev, delay);
1094 	pm_runtime_use_autosuspend(dev);
1095 	pm_runtime_allow(dev);
1096 	pm_runtime_mark_last_busy(dev);
1097 	pm_runtime_put_noidle(dev);
1098 	pm_suspend_ignore_children(dev, true);
1099 }
1100 
1101 static void hif_runtime_exit(struct device *dev)
1102 {
1103 	pm_runtime_get_noresume(dev);
1104 	pm_runtime_set_active(dev);
1105 }
1106 
1107 static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
1108 
1109 /**
1110  * hif_pm_runtime_start(): start the runtime pm
1111  * @sc: pci context
1112  *
1113  * After this call, runtime pm will be active.
1114  */
1115 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1116 {
1117 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1118 	uint32_t mode = hif_get_conparam(ol_sc);
1119 
1120 	if (!ol_sc->hif_config.enable_runtime_pm) {
1121 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1122 		return;
1123 	}
1124 
1125 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1126 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1127 				__func__);
1128 		return;
1129 	}
1130 
1131 	setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1132 			(unsigned long)sc);
1133 
1134 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1135 			ol_sc->hif_config.runtime_pm_delay);
1136 
1137 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1138 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1139 	hif_runtime_pm_debugfs_create(sc);
1140 }
1141 
1142 /**
1143  * hif_pm_runtime_stop(): stop runtime pm
1144  * @sc: pci context
1145  *
1146  * Turns off runtime pm and frees corresponding resources
1147  * that were acquired by hif_runtime_pm_start().
1148  */
1149 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1150 {
1151 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1152 	uint32_t mode = hif_get_conparam(ol_sc);
1153 
1154 	if (!ol_sc->hif_config.enable_runtime_pm)
1155 		return;
1156 
1157 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1158 		return;
1159 
1160 	hif_runtime_exit(sc->dev);
1161 	hif_pm_runtime_resume(sc->dev);
1162 
1163 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1164 
1165 	hif_runtime_pm_debugfs_remove(sc);
1166 	del_timer_sync(&sc->runtime_timer);
1167 	/* doesn't wait for penting trafic unlike cld-2.0 */
1168 }
1169 
1170 /**
1171  * hif_pm_runtime_open(): initialize runtime pm
1172  * @sc: pci data structure
1173  *
1174  * Early initialization
1175  */
1176 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1177 {
1178 	spin_lock_init(&sc->runtime_lock);
1179 
1180 	qdf_atomic_init(&sc->pm_state);
1181 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1182 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1183 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1184 }
1185 
1186 /**
1187  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1188  * @sc: pci context
1189  *
1190  * Ensure we have only one vote against runtime suspend before closing
1191  * the runtime suspend feature.
1192  *
1193  * all gets by the wlan driver should have been returned
1194  * one vote should remain as part of cnss_runtime_exit
1195  *
1196  * needs to be revisited if we share the root complex.
1197  */
1198 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1199 {
1200 	struct hif_pm_runtime_lock *ctx, *tmp;
1201 
1202 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1203 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1204 	else
1205 		return;
1206 
1207 	spin_lock_bh(&sc->runtime_lock);
1208 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1209 		spin_unlock_bh(&sc->runtime_lock);
1210 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1211 		spin_lock_bh(&sc->runtime_lock);
1212 	}
1213 	spin_unlock_bh(&sc->runtime_lock);
1214 
1215 	/* ensure 1 and only 1 usage count so that when the wlan
1216 	 * driver is re-insmodded runtime pm won't be
1217 	 * disabled also ensures runtime pm doesn't get
1218 	 * broken on by being less than 1.
1219 	 */
1220 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1221 		atomic_set(&sc->dev->power.usage_count, 1);
1222 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1223 		hif_pm_runtime_put_auto(sc->dev);
1224 }
1225 
1226 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1227 					  struct hif_pm_runtime_lock *lock);
1228 
1229 /**
1230  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1231  * @sc: PCIe Context
1232  *
1233  * API is used to empty the runtime pm prevent suspend list.
1234  *
1235  * Return: void
1236  */
1237 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1238 {
1239 	struct hif_pm_runtime_lock *ctx, *tmp;
1240 
1241 	spin_lock_bh(&sc->runtime_lock);
1242 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1243 		__hif_pm_runtime_allow_suspend(sc, ctx);
1244 	}
1245 	spin_unlock_bh(&sc->runtime_lock);
1246 }
1247 
1248 /**
1249  * hif_pm_runtime_close(): close runtime pm
1250  * @sc: pci bus handle
1251  *
1252  * ensure runtime_pm is stopped before closing the driver
1253  */
1254 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1255 {
1256 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1257 
1258 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1259 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1260 		return;
1261 
1262 	hif_pm_runtime_stop(sc);
1263 
1264 	hif_is_recovery_in_progress(scn) ?
1265 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1266 		hif_pm_runtime_sanitize_on_exit(sc);
1267 }
1268 #else
1269 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1270 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1271 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1272 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1273 #endif
1274 
1275 /**
1276  * hif_disable_power_gating() - disable HW power gating
1277  * @hif_ctx: hif context
1278  *
1279  * disables pcie L1 power states
1280  */
1281 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1282 {
1283 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1284 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1285 
1286 	if (NULL == scn) {
1287 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1288 		       __func__);
1289 		return;
1290 	}
1291 
1292 	/* Disable ASPM when pkt log is enabled */
1293 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1294 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1295 }
1296 
1297 /**
1298  * hif_enable_power_gating() - enable HW power gating
1299  * @hif_ctx: hif context
1300  *
1301  * enables pcie L1 power states
1302  */
1303 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1304 {
1305 	if (NULL == sc) {
1306 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1307 		       __func__);
1308 		return;
1309 	}
1310 
1311 	/* Re-enable ASPM after firmware/OTP download is complete */
1312 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1313 }
1314 
1315 /**
1316  * hif_enable_power_management() - enable power management
1317  * @hif_ctx: hif context
1318  *
1319  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1320  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1321  *
1322  * note: epping mode does not call this function as it does not
1323  *       care about saving power.
1324  */
1325 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1326 				 bool is_packet_log_enabled)
1327 {
1328 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1329 
1330 	if (pci_ctx == NULL) {
1331 		HIF_ERROR("%s, hif_ctx null", __func__);
1332 		return;
1333 	}
1334 
1335 	hif_pm_runtime_start(pci_ctx);
1336 
1337 	if (!is_packet_log_enabled)
1338 		hif_enable_power_gating(pci_ctx);
1339 
1340 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1341 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1342 	    !ce_srng_based(hif_sc)) {
1343 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1344 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1345 			HIF_ERROR("%s, failed to set target to sleep",
1346 				  __func__);
1347 	}
1348 }
1349 
1350 /**
1351  * hif_disable_power_management() - disable power management
1352  * @hif_ctx: hif context
1353  *
1354  * Currently disables runtime pm. Should be updated to behave
1355  * if runtime pm is not started. Should be updated to take care
1356  * of aspm and soc sleep for driver load.
1357  */
1358 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1359 {
1360 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1361 
1362 	if (pci_ctx == NULL) {
1363 		HIF_ERROR("%s, hif_ctx null", __func__);
1364 		return;
1365 	}
1366 
1367 	hif_pm_runtime_stop(pci_ctx);
1368 }
1369 
1370 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1371 {
1372 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1373 
1374 	if (pci_ctx == NULL) {
1375 		HIF_ERROR("%s, hif_ctx null", __func__);
1376 		return;
1377 	}
1378 	hif_display_ce_stats(&pci_ctx->ce_sc);
1379 }
1380 
1381 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1382 {
1383 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1384 
1385 	if (pci_ctx == NULL) {
1386 		HIF_ERROR("%s, hif_ctx null", __func__);
1387 		return;
1388 	}
1389 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1390 }
1391 
1392 #define ATH_PCI_PROBE_RETRY_MAX 3
1393 /**
1394  * hif_bus_open(): hif_bus_open
1395  * @scn: scn
1396  * @bus_type: bus type
1397  *
1398  * Return: n/a
1399  */
1400 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1401 {
1402 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1403 
1404 	hif_ctx->bus_type = bus_type;
1405 	hif_pm_runtime_open(sc);
1406 
1407 	qdf_spinlock_create(&sc->irq_lock);
1408 
1409 	return hif_ce_open(hif_ctx);
1410 }
1411 
1412 /**
1413  * hif_wake_target_cpu() - wake the target's cpu
1414  * @scn: hif context
1415  *
1416  * Send an interrupt to the device to wake up the Target CPU
1417  * so it has an opportunity to notice any changed state.
1418  */
1419 static void hif_wake_target_cpu(struct hif_softc *scn)
1420 {
1421 	QDF_STATUS rv;
1422 	uint32_t core_ctrl;
1423 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1424 
1425 	rv = hif_diag_read_access(hif_hdl,
1426 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1427 				  &core_ctrl);
1428 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1429 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1430 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1431 
1432 	rv = hif_diag_write_access(hif_hdl,
1433 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1434 				   core_ctrl);
1435 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1436 }
1437 
1438 /**
1439  * soc_wake_reset() - allow the target to go to sleep
1440  * @scn: hif_softc
1441  *
1442  * Clear the force wake register.  This is done by
1443  * hif_sleep_entry and cancel defered timer sleep.
1444  */
1445 static void soc_wake_reset(struct hif_softc *scn)
1446 {
1447 	hif_write32_mb(scn, scn->mem +
1448 		PCIE_LOCAL_BASE_ADDRESS +
1449 		PCIE_SOC_WAKE_ADDRESS,
1450 		PCIE_SOC_WAKE_RESET);
1451 }
1452 
1453 /**
1454  * hif_sleep_entry() - gate target sleep
1455  * @arg: hif context
1456  *
1457  * This function is the callback for the sleep timer.
1458  * Check if last force awake critical section was at least
1459  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1460  * allow the target to go to sleep and cancel the sleep timer.
1461  * otherwise reschedule the sleep timer.
1462  */
1463 static void hif_sleep_entry(void *arg)
1464 {
1465 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1466 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1467 	uint32_t idle_ms;
1468 
1469 	if (scn->recovery)
1470 		return;
1471 
1472 	if (hif_is_driver_unloading(scn))
1473 		return;
1474 
1475 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1476 	if (hif_state->verified_awake == false) {
1477 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1478 						    - hif_state->sleep_ticks);
1479 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1480 			if (!qdf_atomic_read(&scn->link_suspended)) {
1481 				soc_wake_reset(scn);
1482 				hif_state->fake_sleep = false;
1483 			}
1484 		} else {
1485 			qdf_timer_stop(&hif_state->sleep_timer);
1486 			qdf_timer_start(&hif_state->sleep_timer,
1487 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1488 		}
1489 	} else {
1490 		qdf_timer_stop(&hif_state->sleep_timer);
1491 		qdf_timer_start(&hif_state->sleep_timer,
1492 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1493 	}
1494 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1495 }
1496 
1497 #define HIF_HIA_MAX_POLL_LOOP    1000000
1498 #define HIF_HIA_POLLING_DELAY_MS 10
1499 
1500 #ifdef CONFIG_WIN
1501 static void hif_set_hia_extnd(struct hif_softc *scn)
1502 {
1503 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1504 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1505 	uint32_t target_type = tgt_info->target_type;
1506 
1507 	HIF_TRACE("%s: E", __func__);
1508 
1509 	if ((target_type == TARGET_TYPE_AR900B) ||
1510 			target_type == TARGET_TYPE_QCA9984 ||
1511 			target_type == TARGET_TYPE_QCA9888) {
1512 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1513 		 * in RTC space
1514 		 */
1515 		tgt_info->target_revision
1516 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1517 					+ CHIP_ID_ADDRESS));
1518 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1519 			  target_type, tgt_info->target_revision);
1520 	}
1521 
1522 	{
1523 		uint32_t flag2_value = 0;
1524 		uint32_t flag2_targ_addr =
1525 			host_interest_item_address(target_type,
1526 			offsetof(struct host_interest_s, hi_skip_clock_init));
1527 
1528 		if ((ar900b_20_targ_clk != -1) &&
1529 			(frac != -1) && (intval != -1)) {
1530 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1531 				&flag2_value);
1532 			qdf_print("\n Setting clk_override");
1533 			flag2_value |= CLOCK_OVERRIDE;
1534 
1535 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1536 					flag2_value);
1537 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1538 		} else {
1539 			qdf_print("\n CLOCK PLL skipped");
1540 		}
1541 	}
1542 
1543 	if (target_type == TARGET_TYPE_AR900B
1544 			|| target_type == TARGET_TYPE_QCA9984
1545 			|| target_type == TARGET_TYPE_QCA9888) {
1546 
1547 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1548 		 * this would be supplied through module parameters,
1549 		 * if not supplied assumed default or same behavior as 1.0.
1550 		 * Assume 1.0 clock can't be tuned, reset to defaults
1551 		 */
1552 
1553 		qdf_print(KERN_INFO
1554 			  "%s: setting the target pll frac %x intval %x",
1555 			  __func__, frac, intval);
1556 
1557 		/* do not touch frac, and int val, let them be default -1,
1558 		 * if desired, host can supply these through module params
1559 		 */
1560 		if (frac != -1 || intval != -1) {
1561 			uint32_t flag2_value = 0;
1562 			uint32_t flag2_targ_addr;
1563 
1564 			flag2_targ_addr =
1565 				host_interest_item_address(target_type,
1566 				offsetof(struct host_interest_s,
1567 					hi_clock_info));
1568 			hif_diag_read_access(hif_hdl,
1569 				flag2_targ_addr, &flag2_value);
1570 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1571 				  flag2_value);
1572 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1573 			qdf_print("\n INT Val %x  Address %x",
1574 				  intval, flag2_value + 4);
1575 			hif_diag_write_access(hif_hdl,
1576 					flag2_value + 4, intval);
1577 		} else {
1578 			qdf_print(KERN_INFO
1579 				  "%s: no frac provided, skipping pre-configuring PLL",
1580 				  __func__);
1581 		}
1582 
1583 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1584 		if ((target_type == TARGET_TYPE_AR900B)
1585 			&& (tgt_info->target_revision == AR900B_REV_2)
1586 			&& ar900b_20_targ_clk != -1) {
1587 			uint32_t flag2_value = 0;
1588 			uint32_t flag2_targ_addr;
1589 
1590 			flag2_targ_addr
1591 				= host_interest_item_address(target_type,
1592 					offsetof(struct host_interest_s,
1593 					hi_desired_cpu_speed_hz));
1594 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1595 							&flag2_value);
1596 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1597 				  flag2_value);
1598 			hif_diag_write_access(hif_hdl, flag2_value,
1599 				ar900b_20_targ_clk/*300000000u*/);
1600 		} else if (target_type == TARGET_TYPE_QCA9888) {
1601 			uint32_t flag2_targ_addr;
1602 
1603 			if (200000000u != qca9888_20_targ_clk) {
1604 				qca9888_20_targ_clk = 300000000u;
1605 				/* Setting the target clock speed to 300 mhz */
1606 			}
1607 
1608 			flag2_targ_addr
1609 				= host_interest_item_address(target_type,
1610 					offsetof(struct host_interest_s,
1611 					hi_desired_cpu_speed_hz));
1612 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1613 				qca9888_20_targ_clk);
1614 		} else {
1615 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1616 				  __func__);
1617 		}
1618 	} else {
1619 		if (frac != -1 || intval != -1) {
1620 			uint32_t flag2_value = 0;
1621 			uint32_t flag2_targ_addr =
1622 				host_interest_item_address(target_type,
1623 					offsetof(struct host_interest_s,
1624 							hi_clock_info));
1625 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1626 						&flag2_value);
1627 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1628 				  flag2_value);
1629 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1630 			qdf_print("\n INT Val %x  Address %x", intval,
1631 				  flag2_value + 4);
1632 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1633 					      intval);
1634 		}
1635 	}
1636 }
1637 
1638 #else
1639 
1640 static void hif_set_hia_extnd(struct hif_softc *scn)
1641 {
1642 }
1643 
1644 #endif
1645 
1646 /**
1647  * hif_set_hia() - fill out the host interest area
1648  * @scn: hif context
1649  *
1650  * This is replaced by hif_wlan_enable for integrated targets.
1651  * This fills out the host interest area.  The firmware will
1652  * process these memory addresses when it is first brought out
1653  * of reset.
1654  *
1655  * Return: 0 for success.
1656  */
1657 static int hif_set_hia(struct hif_softc *scn)
1658 {
1659 	QDF_STATUS rv;
1660 	uint32_t interconnect_targ_addr = 0;
1661 	uint32_t pcie_state_targ_addr = 0;
1662 	uint32_t pipe_cfg_targ_addr = 0;
1663 	uint32_t svc_to_pipe_map = 0;
1664 	uint32_t pcie_config_flags = 0;
1665 	uint32_t flag2_value = 0;
1666 	uint32_t flag2_targ_addr = 0;
1667 #ifdef QCA_WIFI_3_0
1668 	uint32_t host_interest_area = 0;
1669 	uint8_t i;
1670 #else
1671 	uint32_t ealloc_value = 0;
1672 	uint32_t ealloc_targ_addr = 0;
1673 	uint8_t banks_switched = 1;
1674 	uint32_t chip_id;
1675 #endif
1676 	uint32_t pipe_cfg_addr;
1677 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1678 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1679 	uint32_t target_type = tgt_info->target_type;
1680 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1681 	static struct CE_pipe_config *target_ce_config;
1682 	struct service_to_pipe *target_service_to_ce_map;
1683 
1684 	HIF_TRACE("%s: E", __func__);
1685 
1686 	hif_get_target_ce_config(scn,
1687 				 &target_ce_config, &target_ce_config_sz,
1688 				 &target_service_to_ce_map,
1689 				 &target_service_to_ce_map_sz,
1690 				 NULL, NULL);
1691 
1692 	if (ADRASTEA_BU)
1693 		return QDF_STATUS_SUCCESS;
1694 
1695 #ifdef QCA_WIFI_3_0
1696 	i = 0;
1697 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1698 		host_interest_area = hif_read32_mb(scn, scn->mem +
1699 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1700 		if ((host_interest_area & 0x01) == 0) {
1701 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1702 			host_interest_area = 0;
1703 			i++;
1704 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1705 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1706 		} else {
1707 			host_interest_area &= (~0x01);
1708 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1709 			break;
1710 		}
1711 	}
1712 
1713 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1714 		HIF_ERROR("%s: hia polling timeout", __func__);
1715 		return -EIO;
1716 	}
1717 
1718 	if (host_interest_area == 0) {
1719 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1720 		return -EIO;
1721 	}
1722 
1723 	interconnect_targ_addr = host_interest_area +
1724 			offsetof(struct host_interest_area_t,
1725 			hi_interconnect_state);
1726 
1727 	flag2_targ_addr = host_interest_area +
1728 			offsetof(struct host_interest_area_t, hi_option_flag2);
1729 
1730 #else
1731 	interconnect_targ_addr = hif_hia_item_address(target_type,
1732 		offsetof(struct host_interest_s, hi_interconnect_state));
1733 	ealloc_targ_addr = hif_hia_item_address(target_type,
1734 		offsetof(struct host_interest_s, hi_early_alloc));
1735 	flag2_targ_addr = hif_hia_item_address(target_type,
1736 		offsetof(struct host_interest_s, hi_option_flag2));
1737 #endif
1738 	/* Supply Target-side CE configuration */
1739 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1740 			  &pcie_state_targ_addr);
1741 	if (rv != QDF_STATUS_SUCCESS) {
1742 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1743 			  __func__, interconnect_targ_addr, rv);
1744 		goto done;
1745 	}
1746 	if (pcie_state_targ_addr == 0) {
1747 		rv = QDF_STATUS_E_FAILURE;
1748 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1749 		goto done;
1750 	}
1751 	pipe_cfg_addr = pcie_state_targ_addr +
1752 			  offsetof(struct pcie_state_s,
1753 			  pipe_cfg_addr);
1754 	rv = hif_diag_read_access(hif_hdl,
1755 			  pipe_cfg_addr,
1756 			  &pipe_cfg_targ_addr);
1757 	if (rv != QDF_STATUS_SUCCESS) {
1758 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1759 			__func__, pipe_cfg_addr, rv);
1760 		goto done;
1761 	}
1762 	if (pipe_cfg_targ_addr == 0) {
1763 		rv = QDF_STATUS_E_FAILURE;
1764 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1765 		goto done;
1766 	}
1767 
1768 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1769 			(uint8_t *) target_ce_config,
1770 			target_ce_config_sz);
1771 
1772 	if (rv != QDF_STATUS_SUCCESS) {
1773 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1774 		goto done;
1775 	}
1776 
1777 	rv = hif_diag_read_access(hif_hdl,
1778 			  pcie_state_targ_addr +
1779 			  offsetof(struct pcie_state_s,
1780 			   svc_to_pipe_map),
1781 			  &svc_to_pipe_map);
1782 	if (rv != QDF_STATUS_SUCCESS) {
1783 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1784 		goto done;
1785 	}
1786 	if (svc_to_pipe_map == 0) {
1787 		rv = QDF_STATUS_E_FAILURE;
1788 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1789 		goto done;
1790 	}
1791 
1792 	rv = hif_diag_write_mem(hif_hdl,
1793 			svc_to_pipe_map,
1794 			(uint8_t *) target_service_to_ce_map,
1795 			target_service_to_ce_map_sz);
1796 	if (rv != QDF_STATUS_SUCCESS) {
1797 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1798 		goto done;
1799 	}
1800 
1801 	rv = hif_diag_read_access(hif_hdl,
1802 			pcie_state_targ_addr +
1803 			offsetof(struct pcie_state_s,
1804 			config_flags),
1805 			&pcie_config_flags);
1806 	if (rv != QDF_STATUS_SUCCESS) {
1807 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1808 		goto done;
1809 	}
1810 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1811 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1812 #else
1813 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1814 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1815 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1816 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1817 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1818 #endif
1819 	rv = hif_diag_write_mem(hif_hdl,
1820 			pcie_state_targ_addr +
1821 			offsetof(struct pcie_state_s,
1822 			config_flags),
1823 			(uint8_t *) &pcie_config_flags,
1824 			sizeof(pcie_config_flags));
1825 	if (rv != QDF_STATUS_SUCCESS) {
1826 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1827 		goto done;
1828 	}
1829 
1830 #ifndef QCA_WIFI_3_0
1831 	/* configure early allocation */
1832 	ealloc_targ_addr = hif_hia_item_address(target_type,
1833 						offsetof(
1834 						struct host_interest_s,
1835 						hi_early_alloc));
1836 
1837 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1838 			&ealloc_value);
1839 	if (rv != QDF_STATUS_SUCCESS) {
1840 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1841 		goto done;
1842 	}
1843 
1844 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1845 	ealloc_value |=
1846 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1847 		 HI_EARLY_ALLOC_MAGIC_MASK);
1848 
1849 	rv = hif_diag_read_access(hif_hdl,
1850 			  CHIP_ID_ADDRESS |
1851 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1852 	if (rv != QDF_STATUS_SUCCESS) {
1853 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1854 		goto done;
1855 	}
1856 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1857 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1858 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1859 		case 0x2:       /* ROME 1.3 */
1860 			/* 2 banks are switched to IRAM */
1861 			banks_switched = 2;
1862 			break;
1863 		case 0x4:       /* ROME 2.1 */
1864 		case 0x5:       /* ROME 2.2 */
1865 			banks_switched = 6;
1866 			break;
1867 		case 0x8:       /* ROME 3.0 */
1868 		case 0x9:       /* ROME 3.1 */
1869 		case 0xA:       /* ROME 3.2 */
1870 			banks_switched = 9;
1871 			break;
1872 		case 0x0:       /* ROME 1.0 */
1873 		case 0x1:       /* ROME 1.1 */
1874 		default:
1875 			/* 3 banks are switched to IRAM */
1876 			banks_switched = 3;
1877 			break;
1878 		}
1879 	}
1880 
1881 	ealloc_value |=
1882 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1883 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1884 
1885 	rv = hif_diag_write_access(hif_hdl,
1886 				ealloc_targ_addr,
1887 				ealloc_value);
1888 	if (rv != QDF_STATUS_SUCCESS) {
1889 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1890 		goto done;
1891 	}
1892 #endif
1893 	if ((target_type == TARGET_TYPE_AR900B)
1894 			|| (target_type == TARGET_TYPE_QCA9984)
1895 			|| (target_type == TARGET_TYPE_QCA9888)
1896 			|| (target_type == TARGET_TYPE_AR9888)) {
1897 		hif_set_hia_extnd(scn);
1898 	}
1899 
1900 	/* Tell Target to proceed with initialization */
1901 	flag2_targ_addr = hif_hia_item_address(target_type,
1902 						offsetof(
1903 						struct host_interest_s,
1904 						hi_option_flag2));
1905 
1906 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1907 			  &flag2_value);
1908 	if (rv != QDF_STATUS_SUCCESS) {
1909 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1910 		goto done;
1911 	}
1912 
1913 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1914 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1915 			   flag2_value);
1916 	if (rv != QDF_STATUS_SUCCESS) {
1917 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1918 		goto done;
1919 	}
1920 
1921 	hif_wake_target_cpu(scn);
1922 
1923 done:
1924 
1925 	return rv;
1926 }
1927 
1928 /**
1929  * hif_bus_configure() - configure the pcie bus
1930  * @hif_sc: pointer to the hif context.
1931  *
1932  * return: 0 for success. nonzero for failure.
1933  */
1934 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1935 {
1936 	int status = 0;
1937 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1938 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1939 
1940 	hif_ce_prepare_config(hif_sc);
1941 
1942 	/* initialize sleep state adjust variables */
1943 	hif_state->sleep_timer_init = true;
1944 	hif_state->keep_awake_count = 0;
1945 	hif_state->fake_sleep = false;
1946 	hif_state->sleep_ticks = 0;
1947 
1948 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1949 			       hif_sleep_entry, (void *)hif_state,
1950 			       QDF_TIMER_TYPE_WAKE_APPS);
1951 	hif_state->sleep_timer_init = true;
1952 
1953 	status = hif_wlan_enable(hif_sc);
1954 	if (status) {
1955 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1956 			  __func__, status);
1957 		goto timer_free;
1958 	}
1959 
1960 	A_TARGET_ACCESS_LIKELY(hif_sc);
1961 
1962 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1963 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1964 	    !ce_srng_based(hif_sc)) {
1965 		/*
1966 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1967 		 * prevent sleep when we want to keep firmware always awake
1968 		 * note: when we want to keep firmware always awake,
1969 		 *       hif_target_sleep_state_adjust will point to a dummy
1970 		 *       function, and hif_pci_target_sleep_state_adjust must
1971 		 *       be called instead.
1972 		 * note: bus type check is here because AHB bus is reusing
1973 		 *       hif_pci_bus_configure code.
1974 		 */
1975 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1976 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1977 					false, true) < 0) {
1978 				status = -EACCES;
1979 				goto disable_wlan;
1980 			}
1981 		}
1982 	}
1983 
1984 	/* todo: consider replacing this with an srng field */
1985 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1986 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1987 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1988 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1989 		hif_sc->per_ce_irq = true;
1990 	}
1991 
1992 	status = hif_config_ce(hif_sc);
1993 	if (status)
1994 		goto disable_wlan;
1995 
1996 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
1997 	if (hif_needs_bmi(hif_osc)) {
1998 		status = hif_set_hia(hif_sc);
1999 		if (status)
2000 			goto unconfig_ce;
2001 
2002 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2003 
2004 	}
2005 
2006 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2007 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2008 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
2009 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2010 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2011 						__func__);
2012 	else {
2013 		status = hif_configure_irq(hif_sc);
2014 		if (status < 0)
2015 			goto unconfig_ce;
2016 	}
2017 
2018 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2019 
2020 	return status;
2021 
2022 unconfig_ce:
2023 	hif_unconfig_ce(hif_sc);
2024 disable_wlan:
2025 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2026 	hif_wlan_disable(hif_sc);
2027 
2028 timer_free:
2029 	qdf_timer_stop(&hif_state->sleep_timer);
2030 	qdf_timer_free(&hif_state->sleep_timer);
2031 	hif_state->sleep_timer_init = false;
2032 
2033 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2034 	return status;
2035 }
2036 
2037 /**
2038  * hif_bus_close(): hif_bus_close
2039  *
2040  * Return: n/a
2041  */
2042 void hif_pci_close(struct hif_softc *hif_sc)
2043 {
2044 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2045 
2046 	hif_pm_runtime_close(hif_pci_sc);
2047 	hif_ce_close(hif_sc);
2048 }
2049 
2050 #define BAR_NUM 0
2051 
2052 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2053 				struct pci_dev *pdev,
2054 				const struct pci_device_id *id)
2055 {
2056 	void __iomem *mem;
2057 	int ret = 0;
2058 	uint16_t device_id = 0;
2059 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2060 
2061 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2062 	if (device_id != id->device)  {
2063 		HIF_ERROR(
2064 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2065 		   __func__, device_id, id->device);
2066 		/* pci link is down, so returing with error code */
2067 		return -EIO;
2068 	}
2069 
2070 	/* FIXME: temp. commenting out assign_resource
2071 	 * call for dev_attach to work on 2.6.38 kernel
2072 	 */
2073 #if (!defined(__LINUX_ARM_ARCH__))
2074 	if (pci_assign_resource(pdev, BAR_NUM)) {
2075 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2076 		return -EIO;
2077 	}
2078 #endif
2079 	if (pci_enable_device(pdev)) {
2080 		HIF_ERROR("%s: pci_enable_device error",
2081 			   __func__);
2082 		return -EIO;
2083 	}
2084 
2085 	/* Request MMIO resources */
2086 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2087 	if (ret) {
2088 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2089 		ret = -EIO;
2090 		goto err_region;
2091 	}
2092 
2093 #ifdef CONFIG_ARM_LPAE
2094 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2095 	 * for 32 bits device also.
2096 	 */
2097 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2098 	if (ret) {
2099 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2100 		goto err_dma;
2101 	}
2102 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2103 	if (ret) {
2104 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2105 		goto err_dma;
2106 	}
2107 #else
2108 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2109 	if (ret) {
2110 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2111 		goto err_dma;
2112 	}
2113 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2114 	if (ret) {
2115 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2116 			   __func__);
2117 		goto err_dma;
2118 	}
2119 #endif
2120 
2121 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2122 
2123 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2124 	pci_set_master(pdev);
2125 
2126 	/* Arrange for access to Target SoC registers. */
2127 	mem = pci_iomap(pdev, BAR_NUM, 0);
2128 	if (!mem) {
2129 		HIF_ERROR("%s: PCI iomap error", __func__);
2130 		ret = -EIO;
2131 		goto err_iomap;
2132 	}
2133 
2134 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
2135 
2136 	sc->mem = mem;
2137 
2138 	/* Hawkeye emulation specific change */
2139 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2140 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2141 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2142 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
2143 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
2144 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
2145 		mem = mem + 0x0c000000;
2146 		sc->mem = mem;
2147 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2148 			__func__, sc->mem);
2149 	}
2150 
2151 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2152 	ol_sc->mem = mem;
2153 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2154 	sc->pci_enabled = true;
2155 	return ret;
2156 
2157 err_iomap:
2158 	pci_clear_master(pdev);
2159 err_dma:
2160 	pci_release_region(pdev, BAR_NUM);
2161 err_region:
2162 	pci_disable_device(pdev);
2163 	return ret;
2164 }
2165 
2166 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2167 			      struct pci_dev *pdev,
2168 			      const struct pci_device_id *id)
2169 {
2170 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2171 	sc->pci_enabled = true;
2172 	return 0;
2173 }
2174 
2175 
2176 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
2177 {
2178 	pci_disable_msi(sc->pdev);
2179 	pci_iounmap(sc->pdev, sc->mem);
2180 	pci_clear_master(sc->pdev);
2181 	pci_release_region(sc->pdev, BAR_NUM);
2182 	pci_disable_device(sc->pdev);
2183 }
2184 
2185 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
2186 
2187 static void hif_disable_pci(struct hif_pci_softc *sc)
2188 {
2189 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2190 
2191 	if (ol_sc == NULL) {
2192 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2193 		return;
2194 	}
2195 	hif_pci_device_reset(sc);
2196 	sc->hif_pci_deinit(sc);
2197 
2198 	sc->mem = NULL;
2199 	ol_sc->mem = NULL;
2200 }
2201 
2202 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2203 {
2204 	int ret = 0;
2205 	int targ_awake_limit = 500;
2206 #ifndef QCA_WIFI_3_0
2207 	uint32_t fw_indicator;
2208 #endif
2209 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2210 
2211 	/*
2212 	 * Verify that the Target was started cleanly.*
2213 	 * The case where this is most likely is with an AUX-powered
2214 	 * Target and a Host in WoW mode. If the Host crashes,
2215 	 * loses power, or is restarted (without unloading the driver)
2216 	 * then the Target is left (aux) powered and running.  On a
2217 	 * subsequent driver load, the Target is in an unexpected state.
2218 	 * We try to catch that here in order to reset the Target and
2219 	 * retry the probe.
2220 	 */
2221 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2222 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2223 	while (!hif_targ_is_awake(scn, sc->mem)) {
2224 		if (0 == targ_awake_limit) {
2225 			HIF_ERROR("%s: target awake timeout", __func__);
2226 			ret = -EAGAIN;
2227 			goto end;
2228 		}
2229 		qdf_mdelay(1);
2230 		targ_awake_limit--;
2231 	}
2232 
2233 #if PCIE_BAR0_READY_CHECKING
2234 	{
2235 		int wait_limit = 200;
2236 		/* Synchronization point: wait the BAR0 is configured */
2237 		while (wait_limit-- &&
2238 			   !(hif_read32_mb(sc, c->mem +
2239 					  PCIE_LOCAL_BASE_ADDRESS +
2240 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2241 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2242 			qdf_mdelay(10);
2243 		}
2244 		if (wait_limit < 0) {
2245 			/* AR6320v1 doesn't support checking of BAR0
2246 			 * configuration, takes one sec to wait BAR0 ready
2247 			 */
2248 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2249 				    __func__);
2250 		}
2251 	}
2252 #endif
2253 
2254 #ifndef QCA_WIFI_3_0
2255 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2256 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2257 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2258 
2259 	if (fw_indicator & FW_IND_INITIALIZED) {
2260 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2261 			   __func__);
2262 		ret = -EAGAIN;
2263 		goto end;
2264 	}
2265 #endif
2266 
2267 end:
2268 	return ret;
2269 }
2270 
2271 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2272 {
2273 	int ret = 0;
2274 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2275 	uint32_t target_type = scn->target_info.target_type;
2276 
2277 	HIF_TRACE("%s: E", __func__);
2278 
2279 	/* do notn support MSI or MSI IRQ failed */
2280 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2281 	ret = request_irq(sc->pdev->irq,
2282 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2283 			  "wlan_pci", sc);
2284 	if (ret) {
2285 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2286 		goto end;
2287 	}
2288 	scn->wake_irq = sc->pdev->irq;
2289 	/* Use sc->irq instead of sc->pdev-irq
2290 	 * platform_device pdev doesn't have an irq field
2291 	 */
2292 	sc->irq = sc->pdev->irq;
2293 	/* Use Legacy PCI Interrupts */
2294 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2295 		  PCIE_INTR_ENABLE_ADDRESS),
2296 		  HOST_GROUP0_MASK);
2297 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2298 			       PCIE_INTR_ENABLE_ADDRESS));
2299 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2300 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2301 
2302 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2303 			(target_type == TARGET_TYPE_AR900B)  ||
2304 			(target_type == TARGET_TYPE_QCA9984) ||
2305 			(target_type == TARGET_TYPE_AR9888) ||
2306 			(target_type == TARGET_TYPE_QCA9888) ||
2307 			(target_type == TARGET_TYPE_AR6320V1) ||
2308 			(target_type == TARGET_TYPE_AR6320V2) ||
2309 			(target_type == TARGET_TYPE_AR6320V3)) {
2310 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2311 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2312 	}
2313 end:
2314 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2315 			  "%s: X, ret = %d", __func__, ret);
2316 	return ret;
2317 }
2318 
2319 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2320 {
2321 	int ret;
2322 	int ce_id, irq;
2323 	uint32_t msi_data_start;
2324 	uint32_t msi_data_count;
2325 	uint32_t msi_irq_start;
2326 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2327 
2328 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2329 					    &msi_data_count, &msi_data_start,
2330 					    &msi_irq_start);
2331 	if (ret)
2332 		return ret;
2333 
2334 	/* needs to match the ce_id -> irq data mapping
2335 	 * used in the srng parameter configuration
2336 	 */
2337 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2338 		unsigned int msi_data;
2339 
2340 		if (!ce_sc->tasklets[ce_id].inited)
2341 			continue;
2342 
2343 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2344 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2345 
2346 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2347 			  ce_id, msi_data, irq);
2348 
2349 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2350 	}
2351 
2352 	return ret;
2353 }
2354 
2355 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2356 {
2357 	int i, j, irq;
2358 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2359 	struct hif_exec_context *hif_ext_group;
2360 
2361 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2362 		hif_ext_group = hif_state->hif_ext_group[i];
2363 		if (hif_ext_group->irq_requested) {
2364 			hif_ext_group->irq_requested = false;
2365 			for (j = 0; j < hif_ext_group->numirq; j++) {
2366 				irq = hif_ext_group->os_irq[j];
2367 				free_irq(irq, hif_ext_group);
2368 			}
2369 			hif_ext_group->numirq = 0;
2370 		}
2371 	}
2372 }
2373 
2374 /**
2375  * hif_nointrs(): disable IRQ
2376  *
2377  * This function stops interrupt(s)
2378  *
2379  * @scn: struct hif_softc
2380  *
2381  * Return: none
2382  */
2383 void hif_pci_nointrs(struct hif_softc *scn)
2384 {
2385 	int i, ret;
2386 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2387 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2388 
2389 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2390 
2391 	if (scn->request_irq_done == false)
2392 		return;
2393 
2394 	hif_pci_deconfigure_grp_irq(scn);
2395 
2396 	ret = hif_ce_srng_msi_free_irq(scn);
2397 	if (ret != -EINVAL) {
2398 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2399 
2400 		if (scn->wake_irq)
2401 			free_irq(scn->wake_irq, scn);
2402 		scn->wake_irq = 0;
2403 	} else if (sc->num_msi_intrs > 0) {
2404 		/* MSI interrupt(s) */
2405 		for (i = 0; i < sc->num_msi_intrs; i++)
2406 			free_irq(sc->irq + i, sc);
2407 		sc->num_msi_intrs = 0;
2408 	} else {
2409 		/* Legacy PCI line interrupt
2410 		 * Use sc->irq instead of sc->pdev-irq
2411 		 * platform_device pdev doesn't have an irq field
2412 		 */
2413 		free_irq(sc->irq, sc);
2414 	}
2415 	scn->request_irq_done = false;
2416 }
2417 
2418 /**
2419  * hif_disable_bus(): hif_disable_bus
2420  *
2421  * This function disables the bus
2422  *
2423  * @bdev: bus dev
2424  *
2425  * Return: none
2426  */
2427 void hif_pci_disable_bus(struct hif_softc *scn)
2428 {
2429 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2430 	struct pci_dev *pdev;
2431 	void __iomem *mem;
2432 	struct hif_target_info *tgt_info = &scn->target_info;
2433 
2434 	/* Attach did not succeed, all resources have been
2435 	 * freed in error handler
2436 	 */
2437 	if (!sc)
2438 		return;
2439 
2440 	pdev = sc->pdev;
2441 	if (ADRASTEA_BU) {
2442 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2443 
2444 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2445 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2446 			       HOST_GROUP0_MASK);
2447 	}
2448 
2449 #if defined(CPU_WARM_RESET_WAR)
2450 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2451 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2452 	 * verified for AR9888_REV1
2453 	 */
2454 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2455 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2456 		hif_pci_device_warm_reset(sc);
2457 	else
2458 		hif_pci_device_reset(sc);
2459 #else
2460 	hif_pci_device_reset(sc);
2461 #endif
2462 	mem = (void __iomem *)sc->mem;
2463 	if (mem) {
2464 		hif_dump_pipe_debug_count(scn);
2465 		if (scn->athdiag_procfs_inited) {
2466 			athdiag_procfs_remove();
2467 			scn->athdiag_procfs_inited = false;
2468 		}
2469 		sc->hif_pci_deinit(sc);
2470 		scn->mem = NULL;
2471 	}
2472 	HIF_INFO("%s: X", __func__);
2473 }
2474 
2475 #define OL_ATH_PCI_PM_CONTROL 0x44
2476 
2477 #ifdef FEATURE_RUNTIME_PM
2478 /**
2479  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2480  * @scn: hif context
2481  * @flag: prevent linkdown if true otherwise allow
2482  *
2483  * this api should only be called as part of bus prevent linkdown
2484  */
2485 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2486 {
2487 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2488 
2489 	if (flag)
2490 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2491 	else
2492 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2493 }
2494 #else
2495 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2496 {
2497 }
2498 #endif
2499 
2500 #if defined(CONFIG_PCI_MSM)
2501 /**
2502  * hif_bus_prevent_linkdown(): allow or permit linkdown
2503  * @flag: true prevents linkdown, false allows
2504  *
2505  * Calls into the platform driver to vote against taking down the
2506  * pcie link.
2507  *
2508  * Return: n/a
2509  */
2510 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2511 {
2512 	int errno;
2513 
2514 	HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2515 	hif_runtime_prevent_linkdown(scn, flag);
2516 
2517 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2518 	if (errno)
2519 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2520 			  __func__, errno);
2521 }
2522 #else
2523 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2524 {
2525 	HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
2526 	hif_runtime_prevent_linkdown(scn, flag);
2527 }
2528 #endif
2529 
2530 /**
2531  * hif_pci_bus_suspend(): prepare hif for suspend
2532  *
2533  * Return: Errno
2534  */
2535 int hif_pci_bus_suspend(struct hif_softc *scn)
2536 {
2537 	return 0;
2538 }
2539 
2540 /**
2541  * __hif_check_link_status() - API to check if PCIe link is active/not
2542  * @scn: HIF Context
2543  *
2544  * API reads the PCIe config space to verify if PCIe link training is
2545  * successful or not.
2546  *
2547  * Return: Success/Failure
2548  */
2549 static int __hif_check_link_status(struct hif_softc *scn)
2550 {
2551 	uint16_t dev_id = 0;
2552 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2553 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2554 
2555 	if (!sc) {
2556 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2557 		return -EINVAL;
2558 	}
2559 
2560 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2561 
2562 	if (dev_id == sc->devid)
2563 		return 0;
2564 
2565 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2566 	       __func__, dev_id);
2567 
2568 	scn->recovery = true;
2569 
2570 	if (cbk && cbk->set_recovery_in_progress)
2571 		cbk->set_recovery_in_progress(cbk->context, true);
2572 	else
2573 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2574 
2575 	pld_is_pci_link_down(sc->dev);
2576 	return -EACCES;
2577 }
2578 
2579 /**
2580  * hif_pci_bus_resume(): prepare hif for resume
2581  *
2582  * Return: Errno
2583  */
2584 int hif_pci_bus_resume(struct hif_softc *scn)
2585 {
2586 	return __hif_check_link_status(scn);
2587 }
2588 
2589 /**
2590  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2591  * @scn: hif context
2592  *
2593  * Ensure that if we received the wakeup message before the irq
2594  * was disabled that the message is pocessed before suspending.
2595  *
2596  * Return: -EBUSY if we fail to flush the tasklets.
2597  */
2598 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2599 {
2600 	if (hif_drain_tasklets(scn) != 0)
2601 		return -EBUSY;
2602 
2603 	/* Stop the HIF Sleep Timer */
2604 	hif_cancel_deferred_target_sleep(scn);
2605 
2606 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2607 		qdf_atomic_set(&scn->link_suspended, 1);
2608 
2609 	return 0;
2610 }
2611 
2612 /**
2613  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2614  * @scn: hif context
2615  *
2616  * Ensure that if we received the wakeup message before the irq
2617  * was disabled that the message is pocessed before suspending.
2618  *
2619  * Return: -EBUSY if we fail to flush the tasklets.
2620  */
2621 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2622 {
2623 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2624 		qdf_atomic_set(&scn->link_suspended, 0);
2625 
2626 	return 0;
2627 }
2628 
2629 #ifdef FEATURE_RUNTIME_PM
2630 /**
2631  * __hif_runtime_pm_set_state(): utility function
2632  * @state: state to set
2633  *
2634  * indexes into the runtime pm state and sets it.
2635  */
2636 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2637 				enum hif_pm_runtime_state state)
2638 {
2639 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2640 
2641 	if (NULL == sc) {
2642 		HIF_ERROR("%s: HIF_CTX not initialized",
2643 		       __func__);
2644 		return;
2645 	}
2646 
2647 	qdf_atomic_set(&sc->pm_state, state);
2648 }
2649 
2650 /**
2651  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2652  *
2653  * Notify hif that a runtime pm opperation has started
2654  */
2655 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2656 {
2657 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2658 }
2659 
2660 /**
2661  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2662  *
2663  * Notify hif that a the runtime pm state should be on
2664  */
2665 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2666 {
2667 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2668 }
2669 
2670 /**
2671  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2672  *
2673  * Notify hif that a runtime suspend attempt has been completed successfully
2674  */
2675 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2676 {
2677 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2678 }
2679 
2680 /**
2681  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2682  */
2683 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2684 {
2685 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2686 
2687 	if (sc == NULL)
2688 		return;
2689 
2690 	sc->pm_stats.suspended++;
2691 	sc->pm_stats.suspend_jiffies = jiffies;
2692 }
2693 
2694 /**
2695  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2696  *
2697  * log a failed runtime suspend
2698  * mark last busy to prevent immediate runtime suspend
2699  */
2700 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2701 {
2702 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2703 
2704 	if (sc == NULL)
2705 		return;
2706 
2707 	sc->pm_stats.suspend_err++;
2708 }
2709 
2710 /**
2711  * hif_log_runtime_resume_success() - log a successful runtime resume
2712  *
2713  * log a successful runtime resume
2714  * mark last busy to prevent immediate runtime suspend
2715  */
2716 static void hif_log_runtime_resume_success(void *hif_ctx)
2717 {
2718 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2719 
2720 	if (sc == NULL)
2721 		return;
2722 
2723 	sc->pm_stats.resumed++;
2724 }
2725 
2726 /**
2727  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2728  *
2729  * Record the failure.
2730  * mark last busy to delay a retry.
2731  * adjust the runtime_pm state.
2732  */
2733 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2734 {
2735 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2736 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2737 
2738 	hif_log_runtime_suspend_failure(hif_ctx);
2739 	if (hif_pci_sc != NULL)
2740 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2741 	hif_runtime_pm_set_state_on(scn);
2742 }
2743 
2744 /**
2745  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2746  *
2747  * Makes sure that the pci link will be taken down by the suspend opperation.
2748  * If the hif layer is configured to leave the bus on, runtime suspend will
2749  * not save any power.
2750  *
2751  * Set the runtime suspend state to in progress.
2752  *
2753  * return -EINVAL if the bus won't go down.  otherwise return 0
2754  */
2755 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2756 {
2757 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2758 
2759 	if (!hif_can_suspend_link(hif_ctx)) {
2760 		HIF_ERROR("Runtime PM not supported for link up suspend");
2761 		return -EINVAL;
2762 	}
2763 
2764 	hif_runtime_pm_set_state_inprogress(scn);
2765 	return 0;
2766 }
2767 
2768 /**
2769  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2770  *
2771  * Record the success.
2772  * adjust the runtime_pm state
2773  */
2774 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
2775 {
2776 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2777 
2778 	hif_runtime_pm_set_state_suspended(scn);
2779 	hif_log_runtime_suspend_success(scn);
2780 }
2781 
2782 /**
2783  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2784  *
2785  * update the runtime pm state.
2786  */
2787 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
2788 {
2789 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2790 
2791 	hif_runtime_pm_set_state_inprogress(scn);
2792 }
2793 
2794 /**
2795  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2796  *
2797  * record the success.
2798  * adjust the runtime_pm state
2799  */
2800 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
2801 {
2802 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2803 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2804 
2805 	hif_log_runtime_resume_success(hif_ctx);
2806 	if (hif_pci_sc != NULL)
2807 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2808 	hif_runtime_pm_set_state_on(scn);
2809 }
2810 
2811 /**
2812  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2813  *
2814  * Return: 0 for success and non-zero error code for failure
2815  */
2816 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2817 {
2818 	int errno;
2819 
2820 	errno = hif_bus_suspend(hif_ctx);
2821 	if (errno) {
2822 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
2823 		return errno;
2824 	}
2825 
2826 	errno = hif_apps_irqs_disable(hif_ctx);
2827 	if (errno) {
2828 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
2829 		goto bus_resume;
2830 	}
2831 
2832 	errno = hif_bus_suspend_noirq(hif_ctx);
2833 	if (errno) {
2834 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
2835 		goto irqs_enable;
2836 	}
2837 
2838 	/* link should always be down; skip enable wake irq */
2839 
2840 	return 0;
2841 
2842 irqs_enable:
2843 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2844 
2845 bus_resume:
2846 	QDF_BUG(!hif_bus_resume(hif_ctx));
2847 
2848 	return errno;
2849 }
2850 
2851 /**
2852  * hif_fastpath_resume() - resume fastpath for runtimepm
2853  *
2854  * ensure that the fastpath write index register is up to date
2855  * since runtime pm may cause ce_send_fast to skip the register
2856  * write.
2857  *
2858  * fastpath only applicable to legacy copy engine
2859  */
2860 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
2861 {
2862 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2863 	struct CE_state *ce_state;
2864 
2865 	if (!scn)
2866 		return;
2867 
2868 	if (scn->fastpath_mode_on) {
2869 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2870 			return;
2871 
2872 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
2873 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2874 
2875 		/*war_ce_src_ring_write_idx_set */
2876 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2877 				ce_state->src_ring->write_index);
2878 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2879 		Q_TARGET_ACCESS_END(scn);
2880 	}
2881 }
2882 
2883 /**
2884  * hif_runtime_resume() - do the bus resume part of a runtime resume
2885  *
2886  *  Return: 0 for success and non-zero error code for failure
2887  */
2888 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
2889 {
2890 	/* link should always be down; skip disable wake irq */
2891 
2892 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
2893 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
2894 	QDF_BUG(!hif_bus_resume(hif_ctx));
2895 	return 0;
2896 }
2897 #endif /* #ifdef FEATURE_RUNTIME_PM */
2898 
2899 #if CONFIG_PCIE_64BIT_MSI
2900 static void hif_free_msi_ctx(struct hif_softc *scn)
2901 {
2902 	struct hif_pci_softc *sc = scn->hif_sc;
2903 	struct hif_msi_info *info = &sc->msi_info;
2904 	struct device *dev = scn->qdf_dev->dev;
2905 
2906 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2907 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2908 	info->magic = NULL;
2909 	info->magic_dma = 0;
2910 }
2911 #else
2912 static void hif_free_msi_ctx(struct hif_softc *scn)
2913 {
2914 }
2915 #endif
2916 
2917 void hif_pci_disable_isr(struct hif_softc *scn)
2918 {
2919 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2920 
2921 	hif_exec_kill(&scn->osc);
2922 	hif_nointrs(scn);
2923 	hif_free_msi_ctx(scn);
2924 	/* Cancel the pending tasklet */
2925 	ce_tasklet_kill(scn);
2926 	tasklet_kill(&sc->intr_tq);
2927 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2928 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2929 }
2930 
2931 /* Function to reset SoC */
2932 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2933 {
2934 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2935 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2936 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2937 
2938 #if defined(CPU_WARM_RESET_WAR)
2939 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2940 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2941 	 * verified for AR9888_REV1
2942 	 */
2943 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2944 		hif_pci_device_warm_reset(sc);
2945 	else
2946 		hif_pci_device_reset(sc);
2947 #else
2948 	hif_pci_device_reset(sc);
2949 #endif
2950 }
2951 
2952 #ifdef CONFIG_PCI_MSM
2953 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2954 {
2955 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2956 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2957 }
2958 #else
2959 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2960 #endif
2961 
2962 /**
2963  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2964  * @sc: HIF PCIe Context
2965  *
2966  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2967  *
2968  * Return: Failure to caller
2969  */
2970 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2971 {
2972 	uint16_t val = 0;
2973 	uint32_t bar = 0;
2974 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2975 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2976 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2977 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2978 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2979 	A_target_id_t pci_addr = scn->mem;
2980 
2981 	HIF_ERROR("%s: keep_awake_count = %d",
2982 			__func__, hif_state->keep_awake_count);
2983 
2984 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2985 
2986 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
2987 
2988 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2989 
2990 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
2991 
2992 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
2993 
2994 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
2995 
2996 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
2997 
2998 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
2999 
3000 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3001 
3002 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3003 
3004 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3005 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3006 						PCIE_SOC_WAKE_ADDRESS));
3007 
3008 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3009 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3010 							RTC_STATE_ADDRESS));
3011 
3012 	HIF_ERROR("%s:error, wakeup target", __func__);
3013 	hif_msm_pcie_debug_info(sc);
3014 
3015 	if (!cfg->enable_self_recovery)
3016 		QDF_BUG(0);
3017 
3018 	scn->recovery = true;
3019 
3020 	if (cbk->set_recovery_in_progress)
3021 		cbk->set_recovery_in_progress(cbk->context, true);
3022 
3023 	pld_is_pci_link_down(sc->dev);
3024 	return -EACCES;
3025 }
3026 
3027 /*
3028  * For now, we use simple on-demand sleep/wake.
3029  * Some possible improvements:
3030  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3031  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3032  *   Careful, though, these functions may be used by
3033  *  interrupt handlers ("atomic")
3034  *  -Don't use host_reg_table for this code; instead use values directly
3035  *  -Use a separate timer to track activity and allow Target to sleep only
3036  *   if it hasn't done anything for a while; may even want to delay some
3037  *   processing for a short while in order to "batch" (e.g.) transmit
3038  *   requests with completion processing into "windows of up time".  Costs
3039  *   some performance, but improves power utilization.
3040  *  -On some platforms, it might be possible to eliminate explicit
3041  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3042  *   recover from the failure by forcing the Target awake.
3043  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3044  *   overhead in some cases. Perhaps this makes more sense when
3045  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3046  *   disabled.
3047  *  -It is possible to compile this code out and simply force the Target
3048  *   to remain awake.  That would yield optimal performance at the cost of
3049  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3050  *
3051  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3052  */
3053 /**
3054  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3055  * @scn: hif_softc pointer.
3056  * @sleep_ok: bool
3057  * @wait_for_it: bool
3058  *
3059  * Output the pipe error counts of each pipe to log file
3060  *
3061  * Return: int
3062  */
3063 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3064 			      bool sleep_ok, bool wait_for_it)
3065 {
3066 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3067 	A_target_id_t pci_addr = scn->mem;
3068 	static int max_delay;
3069 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3070 	static int debug;
3071 	if (scn->recovery)
3072 		return -EACCES;
3073 
3074 	if (qdf_atomic_read(&scn->link_suspended)) {
3075 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3076 		debug = true;
3077 		QDF_ASSERT(0);
3078 		return -EACCES;
3079 	}
3080 
3081 	if (debug) {
3082 		wait_for_it = true;
3083 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3084 				__func__);
3085 		QDF_ASSERT(0);
3086 	}
3087 
3088 	if (sleep_ok) {
3089 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3090 		hif_state->keep_awake_count--;
3091 		if (hif_state->keep_awake_count == 0) {
3092 			/* Allow sleep */
3093 			hif_state->verified_awake = false;
3094 			hif_state->sleep_ticks = qdf_system_ticks();
3095 		}
3096 		if (hif_state->fake_sleep == false) {
3097 			/* Set the Fake Sleep */
3098 			hif_state->fake_sleep = true;
3099 
3100 			/* Start the Sleep Timer */
3101 			qdf_timer_stop(&hif_state->sleep_timer);
3102 			qdf_timer_start(&hif_state->sleep_timer,
3103 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3104 		}
3105 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3106 	} else {
3107 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3108 
3109 		if (hif_state->fake_sleep) {
3110 			hif_state->verified_awake = true;
3111 		} else {
3112 			if (hif_state->keep_awake_count == 0) {
3113 				/* Force AWAKE */
3114 				hif_write32_mb(sc, pci_addr +
3115 					      PCIE_LOCAL_BASE_ADDRESS +
3116 					      PCIE_SOC_WAKE_ADDRESS,
3117 					      PCIE_SOC_WAKE_V_MASK);
3118 			}
3119 		}
3120 		hif_state->keep_awake_count++;
3121 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3122 
3123 		if (wait_for_it && !hif_state->verified_awake) {
3124 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3125 			int tot_delay = 0;
3126 			int curr_delay = 5;
3127 
3128 			for (;; ) {
3129 				if (hif_targ_is_awake(scn, pci_addr)) {
3130 					hif_state->verified_awake = true;
3131 					break;
3132 				}
3133 				if (!hif_pci_targ_is_present(scn, pci_addr))
3134 					break;
3135 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3136 					return hif_log_soc_wakeup_timeout(sc);
3137 
3138 				OS_DELAY(curr_delay);
3139 				tot_delay += curr_delay;
3140 
3141 				if (curr_delay < 50)
3142 					curr_delay += 5;
3143 			}
3144 
3145 			/*
3146 			 * NB: If Target has to come out of Deep Sleep,
3147 			 * this may take a few Msecs. Typically, though
3148 			 * this delay should be <30us.
3149 			 */
3150 			if (tot_delay > max_delay)
3151 				max_delay = tot_delay;
3152 		}
3153 	}
3154 
3155 	if (debug && hif_state->verified_awake) {
3156 		debug = 0;
3157 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3158 			__func__,
3159 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3160 				PCIE_INTR_ENABLE_ADDRESS),
3161 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3162 				PCIE_INTR_CAUSE_ADDRESS),
3163 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3164 				CPU_INTR_ADDRESS),
3165 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3166 				PCIE_INTR_CLR_ADDRESS),
3167 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3168 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3169 	}
3170 
3171 	return 0;
3172 }
3173 
3174 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3175 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3176 {
3177 	uint32_t value;
3178 	void *addr;
3179 
3180 	addr = scn->mem + offset;
3181 	value = hif_read32_mb(scn, addr);
3182 
3183 	{
3184 		unsigned long irq_flags;
3185 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3186 
3187 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3188 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3189 		pcie_access_log[idx].is_write = false;
3190 		pcie_access_log[idx].addr = addr;
3191 		pcie_access_log[idx].value = value;
3192 		pcie_access_log_seqnum++;
3193 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3194 	}
3195 
3196 	return value;
3197 }
3198 
3199 void
3200 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3201 {
3202 	void *addr;
3203 
3204 	addr = scn->mem + (offset);
3205 	hif_write32_mb(scn, addr, value);
3206 
3207 	{
3208 		unsigned long irq_flags;
3209 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3210 
3211 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3212 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3213 		pcie_access_log[idx].is_write = true;
3214 		pcie_access_log[idx].addr = addr;
3215 		pcie_access_log[idx].value = value;
3216 		pcie_access_log_seqnum++;
3217 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3218 	}
3219 }
3220 
3221 /**
3222  * hif_target_dump_access_log() - dump access log
3223  *
3224  * dump access log
3225  *
3226  * Return: n/a
3227  */
3228 void hif_target_dump_access_log(void)
3229 {
3230 	int idx, len, start_idx, cur_idx;
3231 	unsigned long irq_flags;
3232 
3233 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3234 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3235 		len = PCIE_ACCESS_LOG_NUM;
3236 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3237 	} else {
3238 		len = pcie_access_log_seqnum;
3239 		start_idx = 0;
3240 	}
3241 
3242 	for (idx = 0; idx < len; idx++) {
3243 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3244 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3245 		       __func__, idx,
3246 		       pcie_access_log[cur_idx].seqnum,
3247 		       pcie_access_log[cur_idx].is_write,
3248 		       pcie_access_log[cur_idx].addr,
3249 		       pcie_access_log[cur_idx].value);
3250 	}
3251 
3252 	pcie_access_log_seqnum = 0;
3253 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3254 }
3255 #endif
3256 
3257 #ifndef HIF_AHB
3258 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3259 {
3260 	QDF_BUG(0);
3261 	return -EINVAL;
3262 }
3263 
3264 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3265 {
3266 	QDF_BUG(0);
3267 	return -EINVAL;
3268 }
3269 #endif
3270 
3271 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3272 {
3273 	struct ce_tasklet_entry *tasklet_entry = context;
3274 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3275 }
3276 extern const char *ce_name[];
3277 
3278 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3279 {
3280 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3281 
3282 	return pci_scn->ce_msi_irq_num[ce_id];
3283 }
3284 
3285 /* hif_srng_msi_irq_disable() - disable the irq for msi
3286  * @hif_sc: hif context
3287  * @ce_id: which ce to disable copy complete interrupts for
3288  *
3289  * since MSI interrupts are not level based, the system can function
3290  * without disabling these interrupts.  Interrupt mitigation can be
3291  * added here for better system performance.
3292  */
3293 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3294 {
3295 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3296 }
3297 
3298 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3299 {
3300 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3301 }
3302 
3303 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3304 {
3305 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3306 }
3307 
3308 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3309 {
3310 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3311 }
3312 
3313 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3314 {
3315 	int ret;
3316 	int ce_id, irq;
3317 	uint32_t msi_data_start;
3318 	uint32_t msi_data_count;
3319 	uint32_t msi_irq_start;
3320 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3321 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3322 
3323 	/* do wake irq assignment */
3324 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3325 					  &msi_data_count, &msi_data_start,
3326 					  &msi_irq_start);
3327 	if (ret)
3328 		return ret;
3329 
3330 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3331 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3332 			  "wlan_wake_irq", scn);
3333 	if (ret)
3334 		return ret;
3335 
3336 	/* do ce irq assignments */
3337 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3338 					    &msi_data_count, &msi_data_start,
3339 					    &msi_irq_start);
3340 	if (ret)
3341 		goto free_wake_irq;
3342 
3343 	if (ce_srng_based(scn)) {
3344 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3345 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3346 	} else {
3347 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3348 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3349 	}
3350 
3351 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3352 
3353 	/* needs to match the ce_id -> irq data mapping
3354 	 * used in the srng parameter configuration
3355 	 */
3356 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3357 		unsigned int msi_data = (ce_id % msi_data_count) +
3358 			msi_irq_start;
3359 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3360 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3361 			 __func__, ce_id, msi_data, irq,
3362 			 &ce_sc->tasklets[ce_id]);
3363 
3364 		/* implies the ce is also initialized */
3365 		if (!ce_sc->tasklets[ce_id].inited)
3366 			continue;
3367 
3368 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3369 		ret = request_irq(irq, hif_ce_interrupt_handler,
3370 				  IRQF_SHARED,
3371 				  ce_name[ce_id],
3372 				  &ce_sc->tasklets[ce_id]);
3373 		if (ret)
3374 			goto free_irq;
3375 	}
3376 
3377 	return ret;
3378 
3379 free_irq:
3380 	/* the request_irq for the last ce_id failed so skip it. */
3381 	while (ce_id > 0 && ce_id < scn->ce_count) {
3382 		unsigned int msi_data;
3383 
3384 		ce_id--;
3385 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3386 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3387 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3388 	}
3389 
3390 free_wake_irq:
3391 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3392 	scn->wake_irq = 0;
3393 
3394 	return ret;
3395 }
3396 
3397 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3398 {
3399 	int i;
3400 
3401 	for (i = 0; i < hif_ext_group->numirq; i++)
3402 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3403 }
3404 
3405 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3406 {
3407 	int i;
3408 
3409 	for (i = 0; i < hif_ext_group->numirq; i++)
3410 		enable_irq(hif_ext_group->os_irq[i]);
3411 }
3412 
3413 
3414 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3415 			      struct hif_exec_context *hif_ext_group)
3416 {
3417 	int ret = 0;
3418 	int irq = 0;
3419 	int j;
3420 
3421 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3422 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3423 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3424 
3425 	for (j = 0; j < hif_ext_group->numirq; j++) {
3426 		irq = hif_ext_group->irq[j];
3427 
3428 		HIF_DBG("%s: request_irq = %d for grp %d",
3429 			  __func__, irq, hif_ext_group->grp_id);
3430 		ret = request_irq(irq,
3431 				  hif_ext_group_interrupt_handler,
3432 				  IRQF_SHARED, "wlan_EXT_GRP",
3433 				  hif_ext_group);
3434 		if (ret) {
3435 			HIF_ERROR("%s: request_irq failed ret = %d",
3436 				  __func__, ret);
3437 			return -EFAULT;
3438 		}
3439 		hif_ext_group->os_irq[j] = irq;
3440 	}
3441 	hif_ext_group->irq_requested = true;
3442 	return 0;
3443 }
3444 
3445 /**
3446  * hif_configure_irq() - configure interrupt
3447  *
3448  * This function configures interrupt(s)
3449  *
3450  * @sc: PCIe control struct
3451  * @hif_hdl: struct HIF_CE_state
3452  *
3453  * Return: 0 - for success
3454  */
3455 int hif_configure_irq(struct hif_softc *scn)
3456 {
3457 	int ret = 0;
3458 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3459 
3460 	HIF_TRACE("%s: E", __func__);
3461 
3462 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3463 		scn->request_irq_done = false;
3464 		return 0;
3465 	}
3466 
3467 	hif_init_reschedule_tasklet_work(sc);
3468 
3469 	ret = hif_ce_msi_configure_irq(scn);
3470 	if (ret == 0) {
3471 		goto end;
3472 	}
3473 
3474 	switch (scn->target_info.target_type) {
3475 	case TARGET_TYPE_IPQ4019:
3476 		ret = hif_ahb_configure_legacy_irq(sc);
3477 		break;
3478 	case TARGET_TYPE_QCA8074:
3479 	case TARGET_TYPE_QCA8074V2:
3480 	case TARGET_TYPE_QCA6018:
3481 		ret = hif_ahb_configure_irq(sc);
3482 		break;
3483 	default:
3484 		ret = hif_pci_configure_legacy_irq(sc);
3485 		break;
3486 	}
3487 	if (ret < 0) {
3488 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3489 			__func__, ret);
3490 		return ret;
3491 	}
3492 end:
3493 	scn->request_irq_done = true;
3494 	return 0;
3495 }
3496 
3497 /**
3498  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3499  * @scn: hif control structure
3500  *
3501  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3502  * stuck at a polling loop in pcie_address_config in FW
3503  *
3504  * Return: none
3505  */
3506 static void hif_trigger_timer_irq(struct hif_softc *scn)
3507 {
3508 	int tmp;
3509 	/* Trigger IRQ on Peregrine/Swift by setting
3510 	 * IRQ Bit of LF_TIMER 0
3511 	 */
3512 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3513 						SOC_LF_TIMER_STATUS0_ADDRESS));
3514 	/* Set Raw IRQ Bit */
3515 	tmp |= 1;
3516 	/* SOC_LF_TIMER_STATUS0 */
3517 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3518 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3519 }
3520 
3521 /**
3522  * hif_target_sync() : ensure the target is ready
3523  * @scn: hif control structure
3524  *
3525  * Informs fw that we plan to use legacy interupts so that
3526  * it can begin booting. Ensures that the fw finishes booting
3527  * before continuing. Should be called before trying to write
3528  * to the targets other registers for the first time.
3529  *
3530  * Return: none
3531  */
3532 static void hif_target_sync(struct hif_softc *scn)
3533 {
3534 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3535 			    PCIE_INTR_ENABLE_ADDRESS),
3536 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3537 	/* read to flush pcie write */
3538 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3539 			PCIE_INTR_ENABLE_ADDRESS));
3540 
3541 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3542 			PCIE_SOC_WAKE_ADDRESS,
3543 			PCIE_SOC_WAKE_V_MASK);
3544 	while (!hif_targ_is_awake(scn, scn->mem))
3545 		;
3546 
3547 	if (HAS_FW_INDICATOR) {
3548 		int wait_limit = 500;
3549 		int fw_ind = 0;
3550 		int retry_count = 0;
3551 		uint32_t target_type = scn->target_info.target_type;
3552 fw_retry:
3553 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3554 		while (1) {
3555 			fw_ind = hif_read32_mb(scn, scn->mem +
3556 					FW_INDICATOR_ADDRESS);
3557 			if (fw_ind & FW_IND_INITIALIZED)
3558 				break;
3559 			if (wait_limit-- < 0)
3560 				break;
3561 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3562 			    PCIE_INTR_ENABLE_ADDRESS),
3563 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3564 			    /* read to flush pcie write */
3565 			(void)hif_read32_mb(scn, scn->mem +
3566 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3567 
3568 			qdf_mdelay(10);
3569 		}
3570 		if (wait_limit < 0) {
3571 			if (target_type == TARGET_TYPE_AR9888 &&
3572 			    retry_count++ < 2) {
3573 				hif_trigger_timer_irq(scn);
3574 				wait_limit = 500;
3575 				goto fw_retry;
3576 			}
3577 			HIF_TRACE("%s: FW signal timed out",
3578 					__func__);
3579 			qdf_assert_always(0);
3580 		} else {
3581 			HIF_TRACE("%s: Got FW signal, retries = %x",
3582 					__func__, 500-wait_limit);
3583 		}
3584 	}
3585 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3586 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3587 }
3588 
3589 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3590 				     struct device *dev)
3591 {
3592 	struct pld_soc_info info;
3593 
3594 	pld_get_soc_info(dev, &info);
3595 	sc->mem = info.v_addr;
3596 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3597 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3598 }
3599 
3600 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3601 				       struct device *dev)
3602 {}
3603 
3604 static bool hif_is_pld_based_target(int device_id)
3605 {
3606 	switch (device_id) {
3607 	case QCA6290_DEVICE_ID:
3608 	case QCA6290_EMULATION_DEVICE_ID:
3609 #ifdef QCA_WIFI_QCA6390
3610 	case QCA6390_DEVICE_ID:
3611 #endif
3612 	case AR6320_DEVICE_ID:
3613 	case QCN7605_DEVICE_ID:
3614 		return true;
3615 	}
3616 	return false;
3617 }
3618 
3619 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3620 					   int device_id)
3621 {
3622 	if (hif_is_pld_based_target(device_id)) {
3623 		sc->hif_enable_pci = hif_enable_pci_pld;
3624 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3625 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3626 	} else {
3627 		sc->hif_enable_pci = hif_enable_pci_nopld;
3628 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3629 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3630 	}
3631 }
3632 
3633 #ifdef HIF_REG_WINDOW_SUPPORT
3634 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3635 					       u32 target_type)
3636 {
3637 	switch (target_type) {
3638 	case TARGET_TYPE_QCN7605:
3639 		sc->use_register_windowing = true;
3640 		qdf_spinlock_create(&sc->register_access_lock);
3641 		sc->register_window = 0;
3642 		break;
3643 	default:
3644 		sc->use_register_windowing = false;
3645 	}
3646 }
3647 #else
3648 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3649 					       u32 target_type)
3650 {
3651 	sc->use_register_windowing = false;
3652 }
3653 #endif
3654 
3655 /**
3656  * hif_enable_bus(): enable bus
3657  *
3658  * This function enables the bus
3659  *
3660  * @ol_sc: soft_sc struct
3661  * @dev: device pointer
3662  * @bdev: bus dev pointer
3663  * bid: bus id pointer
3664  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3665  * Return: QDF_STATUS
3666  */
3667 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3668 			  struct device *dev, void *bdev,
3669 			  const struct hif_bus_id *bid,
3670 			  enum hif_enable_type type)
3671 {
3672 	int ret = 0;
3673 	uint32_t hif_type, target_type;
3674 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3675 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3676 	uint16_t revision_id = 0;
3677 	int probe_again = 0;
3678 	struct pci_dev *pdev = bdev;
3679 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3680 	struct hif_target_info *tgt_info;
3681 
3682 	if (!ol_sc) {
3683 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3684 		return QDF_STATUS_E_NOMEM;
3685 	}
3686 
3687 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3688 		  __func__, hif_get_conparam(ol_sc), id->device);
3689 
3690 	sc->pdev = pdev;
3691 	sc->dev = &pdev->dev;
3692 	sc->devid = id->device;
3693 	sc->cacheline_sz = dma_get_cache_alignment();
3694 	tgt_info = hif_get_target_info_handle(hif_hdl);
3695 	hif_pci_init_deinit_ops_attach(sc, id->device);
3696 	sc->hif_pci_get_soc_info(sc, dev);
3697 again:
3698 	ret = sc->hif_enable_pci(sc, pdev, id);
3699 	if (ret < 0) {
3700 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3701 		       __func__, ret);
3702 		goto err_enable_pci;
3703 	}
3704 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3705 
3706 	/* Temporary FIX: disable ASPM on peregrine.
3707 	 * Will be removed after the OTP is programmed
3708 	 */
3709 	hif_disable_power_gating(hif_hdl);
3710 
3711 	device_disable_async_suspend(&pdev->dev);
3712 	pci_read_config_word(pdev, 0x08, &revision_id);
3713 
3714 	ret = hif_get_device_type(id->device, revision_id,
3715 						&hif_type, &target_type);
3716 	if (ret < 0) {
3717 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3718 		goto err_tgtstate;
3719 	}
3720 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3721 		  __func__, hif_type, target_type);
3722 
3723 	hif_register_tbl_attach(ol_sc, hif_type);
3724 	hif_target_register_tbl_attach(ol_sc, target_type);
3725 
3726 	hif_pci_init_reg_windowing_support(sc, target_type);
3727 
3728 	tgt_info->target_type = target_type;
3729 
3730 	if (ce_srng_based(ol_sc)) {
3731 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3732 	} else {
3733 		ret = hif_pci_probe_tgt_wakeup(sc);
3734 		if (ret < 0) {
3735 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3736 					__func__, ret);
3737 			if (ret == -EAGAIN)
3738 				probe_again++;
3739 			goto err_tgtstate;
3740 		}
3741 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3742 	}
3743 
3744 	if (!ol_sc->mem_pa) {
3745 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3746 		ret = -EIO;
3747 		goto err_tgtstate;
3748 	}
3749 
3750 	if (!ce_srng_based(ol_sc)) {
3751 		hif_target_sync(ol_sc);
3752 
3753 		if (ADRASTEA_BU)
3754 			hif_vote_link_up(hif_hdl);
3755 	}
3756 
3757 	return 0;
3758 
3759 err_tgtstate:
3760 	hif_disable_pci(sc);
3761 	sc->pci_enabled = false;
3762 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3763 	return QDF_STATUS_E_ABORTED;
3764 
3765 err_enable_pci:
3766 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3767 		int delay_time;
3768 
3769 		HIF_INFO("%s: pci reprobe", __func__);
3770 		/* 10, 40, 90, 100, 100, ... */
3771 		delay_time = max(100, 10 * (probe_again * probe_again));
3772 		qdf_mdelay(delay_time);
3773 		goto again;
3774 	}
3775 	return ret;
3776 }
3777 
3778 /**
3779  * hif_pci_irq_enable() - ce_irq_enable
3780  * @scn: hif_softc
3781  * @ce_id: ce_id
3782  *
3783  * Return: void
3784  */
3785 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3786 {
3787 	uint32_t tmp = 1 << ce_id;
3788 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3789 
3790 	qdf_spin_lock_irqsave(&sc->irq_lock);
3791 	scn->ce_irq_summary &= ~tmp;
3792 	if (scn->ce_irq_summary == 0) {
3793 		/* Enable Legacy PCI line interrupts */
3794 		if (LEGACY_INTERRUPTS(sc) &&
3795 			(scn->target_status != TARGET_STATUS_RESET) &&
3796 			(!qdf_atomic_read(&scn->link_suspended))) {
3797 
3798 			hif_write32_mb(scn, scn->mem +
3799 				(SOC_CORE_BASE_ADDRESS |
3800 				PCIE_INTR_ENABLE_ADDRESS),
3801 				HOST_GROUP0_MASK);
3802 
3803 			hif_read32_mb(scn, scn->mem +
3804 					(SOC_CORE_BASE_ADDRESS |
3805 					PCIE_INTR_ENABLE_ADDRESS));
3806 		}
3807 	}
3808 	if (scn->hif_init_done == true)
3809 		Q_TARGET_ACCESS_END(scn);
3810 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3811 
3812 	/* check for missed firmware crash */
3813 	hif_fw_interrupt_handler(0, scn);
3814 }
3815 
3816 /**
3817  * hif_pci_irq_disable() - ce_irq_disable
3818  * @scn: hif_softc
3819  * @ce_id: ce_id
3820  *
3821  * only applicable to legacy copy engine...
3822  *
3823  * Return: void
3824  */
3825 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3826 {
3827 	/* For Rome only need to wake up target */
3828 	/* target access is maintained until interrupts are re-enabled */
3829 	Q_TARGET_ACCESS_BEGIN(scn);
3830 }
3831 
3832 #ifdef FEATURE_RUNTIME_PM
3833 
3834 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
3835 {
3836 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3837 
3838 	if (NULL == sc)
3839 		return;
3840 
3841 	sc->pm_stats.runtime_get++;
3842 	pm_runtime_get_noresume(sc->dev);
3843 }
3844 
3845 /**
3846  * hif_pm_runtime_get() - do a get opperation on the device
3847  *
3848  * A get opperation will prevent a runtime suspend until a
3849  * corresponding put is done.  This api should be used when sending
3850  * data.
3851  *
3852  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3853  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3854  *
3855  * return: success if the bus is up and a get has been issued
3856  *   otherwise an error code.
3857  */
3858 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
3859 {
3860 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3861 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3862 	int ret;
3863 	int pm_state;
3864 
3865 	if (NULL == scn) {
3866 		HIF_ERROR("%s: Could not do runtime get, scn is null",
3867 				__func__);
3868 		return -EFAULT;
3869 	}
3870 
3871 	pm_state = qdf_atomic_read(&sc->pm_state);
3872 
3873 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
3874 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
3875 		sc->pm_stats.runtime_get++;
3876 		ret = __hif_pm_runtime_get(sc->dev);
3877 
3878 		/* Get can return 1 if the device is already active, just return
3879 		 * success in that case
3880 		 */
3881 		if (ret > 0)
3882 			ret = 0;
3883 
3884 		if (ret)
3885 			hif_pm_runtime_put(hif_ctx);
3886 
3887 		if (ret && ret != -EINPROGRESS) {
3888 			sc->pm_stats.runtime_get_err++;
3889 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
3890 				__func__, qdf_atomic_read(&sc->pm_state), ret);
3891 		}
3892 
3893 		return ret;
3894 	}
3895 
3896 	sc->pm_stats.request_resume++;
3897 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
3898 	ret = hif_pm_request_resume(sc->dev);
3899 
3900 	return -EAGAIN;
3901 }
3902 
3903 /**
3904  * hif_pm_runtime_put() - do a put opperation on the device
3905  *
3906  * A put opperation will allow a runtime suspend after a corresponding
3907  * get was done.  This api should be used when sending data.
3908  *
3909  * This api will return a failure if runtime pm is stopped
3910  * This api will return failure if it would decrement the usage count below 0.
3911  *
3912  * return: QDF_STATUS_SUCCESS if the put is performed
3913  */
3914 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
3915 {
3916 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3917 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3918 	int pm_state, usage_count;
3919 	char *error = NULL;
3920 
3921 	if (NULL == scn) {
3922 		HIF_ERROR("%s: Could not do runtime put, scn is null",
3923 				__func__);
3924 		return -EFAULT;
3925 	}
3926 	usage_count = atomic_read(&sc->dev->power.usage_count);
3927 
3928 	if (usage_count == 1) {
3929 		pm_state = qdf_atomic_read(&sc->pm_state);
3930 
3931 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
3932 			error = "Ignoring unexpected put when runtime pm is disabled";
3933 
3934 	} else if (usage_count == 0) {
3935 		error = "PUT Without a Get Operation";
3936 	}
3937 
3938 	if (error) {
3939 		hif_pci_runtime_pm_warn(sc, error);
3940 		return -EINVAL;
3941 	}
3942 
3943 	sc->pm_stats.runtime_put++;
3944 
3945 	hif_pm_runtime_mark_last_busy(sc->dev);
3946 	hif_pm_runtime_put_auto(sc->dev);
3947 
3948 	return 0;
3949 }
3950 
3951 
3952 /**
3953  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
3954  *                                      reason
3955  * @hif_sc: pci context
3956  * @lock: runtime_pm lock being acquired
3957  *
3958  * Return 0 if successful.
3959  */
3960 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
3961 		*hif_sc, struct hif_pm_runtime_lock *lock)
3962 {
3963 	int ret = 0;
3964 
3965 	/*
3966 	 * We shouldn't be setting context->timeout to zero here when
3967 	 * context is active as we will have a case where Timeout API's
3968 	 * for the same context called back to back.
3969 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
3970 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
3971 	 * API to ensure the timeout version is no more active and
3972 	 * list entry of this context will be deleted during allow suspend.
3973 	 */
3974 	if (lock->active)
3975 		return 0;
3976 
3977 	ret = __hif_pm_runtime_get(hif_sc->dev);
3978 
3979 	/**
3980 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
3981 	 * RPM_SUSPENDING. Any other negative value is an error.
3982 	 * We shouldn't be do runtime_put here as in later point allow
3983 	 * suspend gets called with the the context and there the usage count
3984 	 * is decremented, so suspend will be prevented.
3985 	 */
3986 
3987 	if (ret < 0 && ret != -EINPROGRESS) {
3988 		hif_sc->pm_stats.runtime_get_err++;
3989 		hif_pci_runtime_pm_warn(hif_sc,
3990 				"Prevent Suspend Runtime PM Error");
3991 	}
3992 
3993 	hif_sc->prevent_suspend_cnt++;
3994 
3995 	lock->active = true;
3996 
3997 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
3998 
3999 	hif_sc->pm_stats.prevent_suspend++;
4000 
4001 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4002 		hif_pm_runtime_state_to_string(
4003 			qdf_atomic_read(&hif_sc->pm_state)),
4004 					ret);
4005 
4006 	return ret;
4007 }
4008 
4009 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4010 		struct hif_pm_runtime_lock *lock)
4011 {
4012 	int ret = 0;
4013 	int usage_count;
4014 
4015 	if (hif_sc->prevent_suspend_cnt == 0)
4016 		return ret;
4017 
4018 	if (!lock->active)
4019 		return ret;
4020 
4021 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4022 
4023 	/*
4024 	 * During Driver unload, platform driver increments the usage
4025 	 * count to prevent any runtime suspend getting called.
4026 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4027 	 * usage_count should be one. Ideally this shouldn't happen as
4028 	 * context->active should be active for allow suspend to happen
4029 	 * Handling this case here to prevent any failures.
4030 	 */
4031 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4032 				&& usage_count == 1) || usage_count == 0) {
4033 		hif_pci_runtime_pm_warn(hif_sc,
4034 				"Allow without a prevent suspend");
4035 		return -EINVAL;
4036 	}
4037 
4038 	list_del(&lock->list);
4039 
4040 	hif_sc->prevent_suspend_cnt--;
4041 
4042 	lock->active = false;
4043 	lock->timeout = 0;
4044 
4045 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4046 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4047 
4048 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4049 		hif_pm_runtime_state_to_string(
4050 			qdf_atomic_read(&hif_sc->pm_state)),
4051 					ret);
4052 
4053 	hif_sc->pm_stats.allow_suspend++;
4054 	return ret;
4055 }
4056 
4057 /**
4058  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4059  * @data: calback data that is the pci context
4060  *
4061  * if runtime locks are acquired with a timeout, this function releases
4062  * the locks when the last runtime lock expires.
4063  *
4064  * dummy implementation until lock acquisition is implemented.
4065  */
4066 static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
4067 {
4068 	struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
4069 	unsigned long timer_expires;
4070 	struct hif_pm_runtime_lock *context, *temp;
4071 
4072 	spin_lock_bh(&hif_sc->runtime_lock);
4073 
4074 	timer_expires = hif_sc->runtime_timer_expires;
4075 
4076 	/* Make sure we are not called too early, this should take care of
4077 	 * following case
4078 	 *
4079 	 * CPU0                         CPU1 (timeout function)
4080 	 * ----                         ----------------------
4081 	 * spin_lock_irq
4082 	 *                              timeout function called
4083 	 *
4084 	 * mod_timer()
4085 	 *
4086 	 * spin_unlock_irq
4087 	 *                              spin_lock_irq
4088 	 */
4089 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4090 		hif_sc->runtime_timer_expires = 0;
4091 		list_for_each_entry_safe(context, temp,
4092 				&hif_sc->prevent_suspend_list, list) {
4093 			if (context->timeout) {
4094 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4095 				hif_sc->pm_stats.allow_suspend_timeout++;
4096 			}
4097 		}
4098 	}
4099 
4100 	spin_unlock_bh(&hif_sc->runtime_lock);
4101 }
4102 
4103 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4104 		struct hif_pm_runtime_lock *data)
4105 {
4106 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4107 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4108 	struct hif_pm_runtime_lock *context = data;
4109 
4110 	if (!sc->hif_config.enable_runtime_pm)
4111 		return 0;
4112 
4113 	if (!context)
4114 		return -EINVAL;
4115 
4116 	if (in_irq())
4117 		WARN_ON(1);
4118 
4119 	spin_lock_bh(&hif_sc->runtime_lock);
4120 	context->timeout = 0;
4121 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4122 	spin_unlock_bh(&hif_sc->runtime_lock);
4123 
4124 	return 0;
4125 }
4126 
4127 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4128 				struct hif_pm_runtime_lock *data)
4129 {
4130 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4131 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4132 	struct hif_pm_runtime_lock *context = data;
4133 
4134 	if (!sc->hif_config.enable_runtime_pm)
4135 		return 0;
4136 
4137 	if (!context)
4138 		return -EINVAL;
4139 
4140 	if (in_irq())
4141 		WARN_ON(1);
4142 
4143 	spin_lock_bh(&hif_sc->runtime_lock);
4144 
4145 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4146 
4147 	/* The list can be empty as well in cases where
4148 	 * we have one context in the list and the allow
4149 	 * suspend came before the timer expires and we delete
4150 	 * context above from the list.
4151 	 * When list is empty prevent_suspend count will be zero.
4152 	 */
4153 	if (hif_sc->prevent_suspend_cnt == 0 &&
4154 			hif_sc->runtime_timer_expires > 0) {
4155 		del_timer(&hif_sc->runtime_timer);
4156 		hif_sc->runtime_timer_expires = 0;
4157 	}
4158 
4159 	spin_unlock_bh(&hif_sc->runtime_lock);
4160 
4161 	return 0;
4162 }
4163 
4164 /**
4165  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4166  * @ol_sc: HIF context
4167  * @lock: which lock is being acquired
4168  * @delay: Timeout in milliseconds
4169  *
4170  * Prevent runtime suspend with a timeout after which runtime suspend would be
4171  * allowed. This API uses a single timer to allow the suspend and timer is
4172  * modified if the timeout is changed before timer fires.
4173  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4174  * of starting the timer.
4175  *
4176  * It is wise to try not to use this API and correct the design if possible.
4177  *
4178  * Return: 0 on success and negative error code on failure
4179  */
4180 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4181 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4182 {
4183 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4184 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4185 
4186 	int ret = 0;
4187 	unsigned long expires;
4188 	struct hif_pm_runtime_lock *context = lock;
4189 
4190 	if (hif_is_load_or_unload_in_progress(sc)) {
4191 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4192 				__func__);
4193 		return -EINVAL;
4194 	}
4195 
4196 	if (hif_is_recovery_in_progress(sc)) {
4197 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4198 		return -EINVAL;
4199 	}
4200 
4201 	if (!sc->hif_config.enable_runtime_pm)
4202 		return 0;
4203 
4204 	if (!context)
4205 		return -EINVAL;
4206 
4207 	if (in_irq())
4208 		WARN_ON(1);
4209 
4210 	/*
4211 	 * Don't use internal timer if the timeout is less than auto suspend
4212 	 * delay.
4213 	 */
4214 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4215 		hif_pm_request_resume(hif_sc->dev);
4216 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4217 		return ret;
4218 	}
4219 
4220 	expires = jiffies + msecs_to_jiffies(delay);
4221 	expires += !expires;
4222 
4223 	spin_lock_bh(&hif_sc->runtime_lock);
4224 
4225 	context->timeout = delay;
4226 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4227 	hif_sc->pm_stats.prevent_suspend_timeout++;
4228 
4229 	/* Modify the timer only if new timeout is after already configured
4230 	 * timeout
4231 	 */
4232 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4233 		mod_timer(&hif_sc->runtime_timer, expires);
4234 		hif_sc->runtime_timer_expires = expires;
4235 	}
4236 
4237 	spin_unlock_bh(&hif_sc->runtime_lock);
4238 
4239 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4240 		hif_pm_runtime_state_to_string(
4241 			qdf_atomic_read(&hif_sc->pm_state)),
4242 					delay, ret);
4243 
4244 	return ret;
4245 }
4246 
4247 /**
4248  * hif_runtime_lock_init() - API to initialize Runtime PM context
4249  * @name: Context name
4250  *
4251  * This API initializes the Runtime PM context of the caller and
4252  * return the pointer.
4253  *
4254  * Return: None
4255  */
4256 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4257 {
4258 	struct hif_pm_runtime_lock *context;
4259 
4260 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4261 
4262 	context = qdf_mem_malloc(sizeof(*context));
4263 	if (!context) {
4264 		HIF_ERROR("%s: No memory for Runtime PM wakelock context",
4265 			  __func__);
4266 		return -ENOMEM;
4267 	}
4268 
4269 	context->name = name ? name : "Default";
4270 	lock->lock = context;
4271 
4272 	return 0;
4273 }
4274 
4275 /**
4276  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4277  * @data: Runtime PM context
4278  *
4279  * Return: void
4280  */
4281 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4282 			     struct hif_pm_runtime_lock *data)
4283 {
4284 	struct hif_pm_runtime_lock *context = data;
4285 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4286 
4287 	if (!context) {
4288 		HIF_ERROR("Runtime PM wakelock context is NULL");
4289 		return;
4290 	}
4291 
4292 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4293 
4294 	/*
4295 	 * Ensure to delete the context list entry and reduce the usage count
4296 	 * before freeing the context if context is active.
4297 	 */
4298 	if (sc) {
4299 		spin_lock_bh(&sc->runtime_lock);
4300 		__hif_pm_runtime_allow_suspend(sc, context);
4301 		spin_unlock_bh(&sc->runtime_lock);
4302 	}
4303 
4304 	qdf_mem_free(context);
4305 }
4306 #endif /* FEATURE_RUNTIME_PM */
4307 
4308 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4309 {
4310 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4311 
4312 	/* legacy case only has one irq */
4313 	return pci_scn->irq;
4314 }
4315 
4316 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4317 {
4318 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4319 	struct hif_target_info *tgt_info;
4320 
4321 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4322 
4323 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4324 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
4325 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4326 		/*
4327 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4328 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4329 		 * well initialized/defined.
4330 		 */
4331 		return 0;
4332 	}
4333 
4334 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4335 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4336 		return 0;
4337 	}
4338 
4339 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
4340 		  offset, (uint32_t)(offset + sizeof(unsigned int)),
4341 		  sc->mem_len);
4342 
4343 	return -EINVAL;
4344 }
4345 
4346 /**
4347  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4348  * @scn: hif context
4349  *
4350  * Return: true if soc needs driver bmi otherwise false
4351  */
4352 bool hif_pci_needs_bmi(struct hif_softc *scn)
4353 {
4354 	return !ce_srng_based(scn);
4355 }
4356