xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 #ifdef CONFIG_WIN
66 extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
67 #endif
68 
69 /*
70  * Top-level interrupt handler for all PCI interrupts from a Target.
71  * When a block of MSI interrupts is allocated, this top-level handler
72  * is not used; instead, we directly call the correct sub-handler.
73  */
74 struct ce_irq_reg_table {
75 	uint32_t irq_enable;
76 	uint32_t irq_status;
77 };
78 
79 #ifndef QCA_WIFI_3_0_ADRASTEA
80 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 }
83 #else
84 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
87 	unsigned int target_enable0, target_enable1;
88 	unsigned int target_cause0, target_cause1;
89 
90 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
91 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
92 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
93 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
94 
95 	if ((target_enable0 & target_cause0) ||
96 	    (target_enable1 & target_cause1)) {
97 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
98 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
99 
100 		if (scn->notice_send)
101 			pld_intr_notify_q6(sc->dev);
102 	}
103 }
104 #endif
105 
106 
107 /**
108  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
109  * @scn: scn
110  *
111  * Return: N/A
112  */
113 static void pci_dispatch_interrupt(struct hif_softc *scn)
114 {
115 	uint32_t intr_summary;
116 	int id;
117 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
118 
119 	if (scn->hif_init_done != true)
120 		return;
121 
122 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
123 		return;
124 
125 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
126 
127 	if (intr_summary == 0) {
128 		if ((scn->target_status != TARGET_STATUS_RESET) &&
129 			(!qdf_atomic_read(&scn->link_suspended))) {
130 
131 			hif_write32_mb(scn, scn->mem +
132 				(SOC_CORE_BASE_ADDRESS |
133 				PCIE_INTR_ENABLE_ADDRESS),
134 				HOST_GROUP0_MASK);
135 
136 			hif_read32_mb(scn, scn->mem +
137 					(SOC_CORE_BASE_ADDRESS |
138 					PCIE_INTR_ENABLE_ADDRESS));
139 		}
140 		Q_TARGET_ACCESS_END(scn);
141 		return;
142 	}
143 	Q_TARGET_ACCESS_END(scn);
144 
145 	scn->ce_irq_summary = intr_summary;
146 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
147 		if (intr_summary & (1 << id)) {
148 			intr_summary &= ~(1 << id);
149 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
150 		}
151 	}
152 }
153 
154 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
155 {
156 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
157 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
158 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
159 
160 	volatile int tmp;
161 	uint16_t val = 0;
162 	uint32_t bar0 = 0;
163 	uint32_t fw_indicator_address, fw_indicator;
164 	bool ssr_irq = false;
165 	unsigned int host_cause, host_enable;
166 
167 	if (LEGACY_INTERRUPTS(sc)) {
168 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
169 			return IRQ_HANDLED;
170 
171 		if (ADRASTEA_BU) {
172 			host_enable = hif_read32_mb(sc, sc->mem +
173 						    PCIE_INTR_ENABLE_ADDRESS);
174 			host_cause = hif_read32_mb(sc, sc->mem +
175 						   PCIE_INTR_CAUSE_ADDRESS);
176 			if (!(host_enable & host_cause)) {
177 				hif_pci_route_adrastea_interrupt(sc);
178 				return IRQ_HANDLED;
179 			}
180 		}
181 
182 		/* Clear Legacy PCI line interrupts
183 		 * IMPORTANT: INTR_CLR regiser has to be set
184 		 * after INTR_ENABLE is set to 0,
185 		 * otherwise interrupt can not be really cleared
186 		 */
187 		hif_write32_mb(sc, sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS |
189 			       PCIE_INTR_ENABLE_ADDRESS), 0);
190 
191 		hif_write32_mb(sc, sc->mem +
192 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
193 			       ADRASTEA_BU ?
194 			       (host_enable & host_cause) :
195 			      HOST_GROUP0_MASK);
196 
197 		if (ADRASTEA_BU)
198 			hif_write32_mb(sc, sc->mem + 0x2f100c,
199 				       (host_cause >> 1));
200 
201 		/* IMPORTANT: this extra read transaction is required to
202 		 * flush the posted write buffer
203 		 */
204 		if (!ADRASTEA_BU) {
205 		tmp =
206 			hif_read32_mb(sc, sc->mem +
207 				     (SOC_CORE_BASE_ADDRESS |
208 				      PCIE_INTR_ENABLE_ADDRESS));
209 
210 		if (tmp == 0xdeadbeef) {
211 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
212 			       __func__);
213 
214 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
215 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
216 			       __func__, val);
217 
218 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
219 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
220 			       __func__, val);
221 
222 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
223 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
224 			       val);
225 
226 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
227 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
228 			       val);
229 
230 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
231 					      &bar0);
232 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
233 			       bar0);
234 
235 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
236 				  __func__,
237 				  hif_read32_mb(sc, sc->mem +
238 						PCIE_LOCAL_BASE_ADDRESS
239 						+ RTC_STATE_ADDRESS));
240 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
241 				  __func__,
242 				  hif_read32_mb(sc, sc->mem +
243 						PCIE_LOCAL_BASE_ADDRESS
244 						+ PCIE_SOC_WAKE_ADDRESS));
245 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
246 				  __func__,
247 				  hif_read32_mb(sc, sc->mem + 0x80008),
248 				  hif_read32_mb(sc, sc->mem + 0x8000c));
249 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
250 				  __func__,
251 				  hif_read32_mb(sc, sc->mem + 0x80010),
252 				  hif_read32_mb(sc, sc->mem + 0x80014));
253 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
254 				  __func__,
255 				  hif_read32_mb(sc, sc->mem + 0x80018),
256 				  hif_read32_mb(sc, sc->mem + 0x8001c));
257 			QDF_BUG(0);
258 		}
259 
260 		PCI_CLR_CAUSE0_REGISTER(sc);
261 		}
262 
263 		if (HAS_FW_INDICATOR) {
264 			fw_indicator_address = hif_state->fw_indicator_address;
265 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
266 			if ((fw_indicator != ~0) &&
267 			   (fw_indicator & FW_IND_EVENT_PENDING))
268 				ssr_irq = true;
269 		}
270 
271 		if (Q_TARGET_ACCESS_END(scn) < 0)
272 			return IRQ_HANDLED;
273 	}
274 	/* TBDXXX: Add support for WMAC */
275 
276 	if (ssr_irq) {
277 		sc->irq_event = irq;
278 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
279 
280 		qdf_atomic_inc(&scn->active_tasklet_cnt);
281 		tasklet_schedule(&sc->intr_tq);
282 	} else {
283 		pci_dispatch_interrupt(scn);
284 	}
285 
286 	return IRQ_HANDLED;
287 }
288 
289 static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
290 {
291 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
292 
293 	(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
294 
295 	return IRQ_HANDLED;
296 }
297 
298 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
299 {
300 	return 1;               /* FIX THIS */
301 }
302 
303 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
304 {
305 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
306 	int i = 0;
307 
308 	if (!irq || !size) {
309 		return -EINVAL;
310 	}
311 
312 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
313 		irq[0] = sc->irq;
314 		return 1;
315 	}
316 
317 	if (sc->num_msi_intrs > size) {
318 		qdf_print("Not enough space in irq buffer to return irqs");
319 		return -EINVAL;
320 	}
321 
322 	for (i = 0; i < sc->num_msi_intrs; i++) {
323 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
324 	}
325 
326 	return sc->num_msi_intrs;
327 }
328 
329 
330 /**
331  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
332  * @scn: hif_softc
333  *
334  * Return: void
335  */
336 #if CONFIG_ATH_PCIE_MAX_PERF == 0
337 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
338 {
339 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
340 	A_target_id_t pci_addr = scn->mem;
341 
342 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
343 	/*
344 	 * If the deferred sleep timer is running cancel it
345 	 * and put the soc into sleep.
346 	 */
347 	if (hif_state->fake_sleep == true) {
348 		qdf_timer_stop(&hif_state->sleep_timer);
349 		if (hif_state->verified_awake == false) {
350 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
351 				      PCIE_SOC_WAKE_ADDRESS,
352 				      PCIE_SOC_WAKE_RESET);
353 		}
354 		hif_state->fake_sleep = false;
355 	}
356 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
357 }
358 #else
359 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
360 {
361 }
362 #endif
363 
364 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
365 	hif_read32_mb(sc, (char *)(mem) + \
366 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
367 
368 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
369 	hif_write32_mb(sc, ((char *)(mem) + \
370 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
371 
372 #ifdef QCA_WIFI_3_0
373 /**
374  * hif_targ_is_awake() - check to see if the target is awake
375  * @hif_ctx: hif context
376  *
377  * emulation never goes to sleep
378  *
379  * Return: true if target is awake
380  */
381 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
382 {
383 	return true;
384 }
385 #else
386 /**
387  * hif_targ_is_awake() - check to see if the target is awake
388  * @hif_ctx: hif context
389  *
390  * Return: true if the targets clocks are on
391  */
392 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
393 {
394 	uint32_t val;
395 
396 	if (scn->recovery)
397 		return false;
398 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
399 		+ RTC_STATE_ADDRESS);
400 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
401 }
402 #endif
403 
404 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
405 static void hif_pci_device_reset(struct hif_pci_softc *sc)
406 {
407 	void __iomem *mem = sc->mem;
408 	int i;
409 	uint32_t val;
410 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
411 
412 	if (!scn->hostdef)
413 		return;
414 
415 	/* NB: Don't check resetok here.  This form of reset
416 	 * is integral to correct operation.
417 	 */
418 
419 	if (!SOC_GLOBAL_RESET_ADDRESS)
420 		return;
421 
422 	if (!mem)
423 		return;
424 
425 	HIF_ERROR("%s: Reset Device", __func__);
426 
427 	/*
428 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
429 	 * writing WAKE_V, the Target may scribble over Host memory!
430 	 */
431 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
432 			       PCIE_SOC_WAKE_V_MASK);
433 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
434 		if (hif_targ_is_awake(scn, mem))
435 			break;
436 
437 		qdf_mdelay(1);
438 	}
439 
440 	/* Put Target, including PCIe, into RESET. */
441 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
442 	val |= 1;
443 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
444 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
445 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
446 		    RTC_STATE_COLD_RESET_MASK)
447 			break;
448 
449 		qdf_mdelay(1);
450 	}
451 
452 	/* Pull Target, including PCIe, out of RESET. */
453 	val &= ~1;
454 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
455 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
456 		if (!
457 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
458 		     RTC_STATE_COLD_RESET_MASK))
459 			break;
460 
461 		qdf_mdelay(1);
462 	}
463 
464 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
465 			       PCIE_SOC_WAKE_RESET);
466 }
467 
468 /* CPU warm reset function
469  * Steps:
470  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
471  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
472  *    correctly on WARM reset
473  * 3. Clear TARGET CPU LF timer interrupt
474  * 4. Reset all CEs to clear any pending CE tarnsactions
475  * 5. Warm reset CPU
476  */
477 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
478 {
479 	void __iomem *mem = sc->mem;
480 	int i;
481 	uint32_t val;
482 	uint32_t fw_indicator;
483 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
484 
485 	/* NB: Don't check resetok here.  This form of reset is
486 	 * integral to correct operation.
487 	 */
488 
489 	if (!mem)
490 		return;
491 
492 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
493 
494 	/*
495 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
496 	 * writing WAKE_V, the Target may scribble over Host memory!
497 	 */
498 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
499 			       PCIE_SOC_WAKE_V_MASK);
500 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
501 		if (hif_targ_is_awake(scn, mem))
502 			break;
503 		qdf_mdelay(1);
504 	}
505 
506 	/*
507 	 * Disable Pending interrupts
508 	 */
509 	val =
510 		hif_read32_mb(sc, mem +
511 			     (SOC_CORE_BASE_ADDRESS |
512 			      PCIE_INTR_CAUSE_ADDRESS));
513 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
514 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
515 	/* Target CPU Intr Cause */
516 	val = hif_read32_mb(sc, mem +
517 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
518 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
519 
520 	val =
521 		hif_read32_mb(sc, mem +
522 			     (SOC_CORE_BASE_ADDRESS |
523 			      PCIE_INTR_ENABLE_ADDRESS));
524 	hif_write32_mb(sc, (mem +
525 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
526 	hif_write32_mb(sc, (mem +
527 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
528 		       HOST_GROUP0_MASK);
529 
530 	qdf_mdelay(100);
531 
532 	/* Clear FW_INDICATOR_ADDRESS */
533 	if (HAS_FW_INDICATOR) {
534 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
535 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
536 	}
537 
538 	/* Clear Target LF Timer interrupts */
539 	val =
540 		hif_read32_mb(sc, mem +
541 			     (RTC_SOC_BASE_ADDRESS +
542 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
543 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
544 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
545 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
546 	hif_write32_mb(sc, mem +
547 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
548 		      val);
549 
550 	/* Reset CE */
551 	val =
552 		hif_read32_mb(sc, mem +
553 			     (RTC_SOC_BASE_ADDRESS |
554 			      SOC_RESET_CONTROL_ADDRESS));
555 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
556 	hif_write32_mb(sc, (mem +
557 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
558 		      val);
559 	val =
560 		hif_read32_mb(sc, mem +
561 			     (RTC_SOC_BASE_ADDRESS |
562 			      SOC_RESET_CONTROL_ADDRESS));
563 	qdf_mdelay(10);
564 
565 	/* CE unreset */
566 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
567 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
568 		       SOC_RESET_CONTROL_ADDRESS), val);
569 	val =
570 		hif_read32_mb(sc, mem +
571 			     (RTC_SOC_BASE_ADDRESS |
572 			      SOC_RESET_CONTROL_ADDRESS));
573 	qdf_mdelay(10);
574 
575 	/* Read Target CPU Intr Cause */
576 	val = hif_read32_mb(sc, mem +
577 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
578 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
579 		    __func__, val);
580 
581 	/* CPU warm RESET */
582 	val =
583 		hif_read32_mb(sc, mem +
584 			     (RTC_SOC_BASE_ADDRESS |
585 			      SOC_RESET_CONTROL_ADDRESS));
586 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
587 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
588 		       SOC_RESET_CONTROL_ADDRESS), val);
589 	val =
590 		hif_read32_mb(sc, mem +
591 			     (RTC_SOC_BASE_ADDRESS |
592 			      SOC_RESET_CONTROL_ADDRESS));
593 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
594 		    __func__, val);
595 
596 	qdf_mdelay(100);
597 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
598 
599 }
600 
601 #ifndef QCA_WIFI_3_0
602 /* only applicable to legacy ce */
603 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
604 {
605 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
606 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
607 	void __iomem *mem = sc->mem;
608 	uint32_t val;
609 
610 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
611 		return ATH_ISR_NOSCHED;
612 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
613 	if (Q_TARGET_ACCESS_END(scn) < 0)
614 		return ATH_ISR_SCHED;
615 
616 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
617 
618 	if (val & FW_IND_HELPER)
619 		return 0;
620 
621 	return 1;
622 }
623 #endif
624 
625 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
626 {
627 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
628 	uint16_t device_id = 0;
629 	uint32_t val;
630 	uint16_t timeout_count = 0;
631 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
632 
633 	/* Check device ID from PCIe configuration space for link status */
634 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
635 	if (device_id != sc->devid) {
636 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
637 			  __func__, device_id, sc->devid);
638 		return -EACCES;
639 	}
640 
641 	/* Check PCIe local register for bar/memory access */
642 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
643 			   RTC_STATE_ADDRESS);
644 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
645 
646 	/* Try to wake up taget if it sleeps */
647 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
648 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
649 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
650 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
651 		PCIE_SOC_WAKE_ADDRESS));
652 
653 	/* Check if taget can be woken up */
654 	while (!hif_targ_is_awake(scn, sc->mem)) {
655 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
656 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
657 				__func__,
658 				hif_read32_mb(sc, sc->mem +
659 					     PCIE_LOCAL_BASE_ADDRESS +
660 					     RTC_STATE_ADDRESS),
661 				hif_read32_mb(sc, sc->mem +
662 					     PCIE_LOCAL_BASE_ADDRESS +
663 					PCIE_SOC_WAKE_ADDRESS));
664 			return -EACCES;
665 		}
666 
667 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
668 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
669 
670 		qdf_mdelay(100);
671 		timeout_count += 100;
672 	}
673 
674 	/* Check Power register for SoC internal bus issues */
675 	val =
676 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
677 			     SOC_POWER_REG_OFFSET);
678 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
679 
680 	return 0;
681 }
682 
683 /**
684  * __hif_pci_dump_registers(): dump other PCI debug registers
685  * @scn: struct hif_softc
686  *
687  * This function dumps pci debug registers.  The parrent function
688  * dumps the copy engine registers before calling this function.
689  *
690  * Return: void
691  */
692 static void __hif_pci_dump_registers(struct hif_softc *scn)
693 {
694 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
695 	void __iomem *mem = sc->mem;
696 	uint32_t val, i, j;
697 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
698 	uint32_t ce_base;
699 
700 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
701 		return;
702 
703 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
704 	val =
705 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
706 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
707 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
708 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
709 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
710 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
711 
712 	/* DEBUG_CONTROL_ENABLE = 0x1 */
713 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
714 			   WLAN_DEBUG_CONTROL_OFFSET);
715 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
716 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
717 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
718 		      WLAN_DEBUG_CONTROL_OFFSET, val);
719 
720 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
721 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
722 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
723 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
724 			    WLAN_DEBUG_CONTROL_OFFSET));
725 
726 	HIF_INFO_MED("%s: Debug CE", __func__);
727 	/* Loop CE debug output */
728 	/* AMBA_DEBUG_BUS_SEL = 0xc */
729 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
730 			    AMBA_DEBUG_BUS_OFFSET);
731 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
732 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
733 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
734 		       val);
735 
736 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
737 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
738 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
739 				   CE_WRAPPER_DEBUG_OFFSET);
740 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
741 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
742 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
743 			      CE_WRAPPER_DEBUG_OFFSET, val);
744 
745 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
746 			    __func__, wrapper_idx[i],
747 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
748 				AMBA_DEBUG_BUS_OFFSET),
749 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
750 				CE_WRAPPER_DEBUG_OFFSET));
751 
752 		if (wrapper_idx[i] <= 7) {
753 			for (j = 0; j <= 5; j++) {
754 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
755 				/* For (j=0~5) write CE_DEBUG_SEL = j */
756 				val =
757 					hif_read32_mb(sc, mem + ce_base +
758 						     CE_DEBUG_OFFSET);
759 				val &= ~CE_DEBUG_SEL_MASK;
760 				val |= CE_DEBUG_SEL_SET(j);
761 				hif_write32_mb(sc, mem + ce_base +
762 					       CE_DEBUG_OFFSET, val);
763 
764 				/* read (@gpio_athr_wlan_reg)
765 				 * WLAN_DEBUG_OUT_DATA
766 				 */
767 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
768 						    + WLAN_DEBUG_OUT_OFFSET);
769 				val = WLAN_DEBUG_OUT_DATA_GET(val);
770 
771 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
772 					    __func__, j,
773 					    hif_read32_mb(sc, mem + ce_base +
774 						    CE_DEBUG_OFFSET), val);
775 			}
776 		} else {
777 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
778 			val =
779 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
780 					     WLAN_DEBUG_OUT_OFFSET);
781 			val = WLAN_DEBUG_OUT_DATA_GET(val);
782 
783 			HIF_INFO_MED("%s: out: %x", __func__, val);
784 		}
785 	}
786 
787 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
788 	/* Loop PCIe debug output */
789 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
790 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
791 			    AMBA_DEBUG_BUS_OFFSET);
792 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
793 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
794 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
795 		       AMBA_DEBUG_BUS_OFFSET, val);
796 
797 	for (i = 0; i <= 8; i++) {
798 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
799 		val =
800 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
801 				     AMBA_DEBUG_BUS_OFFSET);
802 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
803 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
804 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
805 			       AMBA_DEBUG_BUS_OFFSET, val);
806 
807 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
808 		val =
809 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
810 				     WLAN_DEBUG_OUT_OFFSET);
811 		val = WLAN_DEBUG_OUT_DATA_GET(val);
812 
813 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
814 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
815 				    WLAN_DEBUG_OUT_OFFSET), val,
816 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
817 				    WLAN_DEBUG_OUT_OFFSET));
818 	}
819 
820 	Q_TARGET_ACCESS_END(scn);
821 }
822 
823 /**
824  * hif_dump_registers(): dump bus debug registers
825  * @scn: struct hif_opaque_softc
826  *
827  * This function dumps hif bus debug registers
828  *
829  * Return: 0 for success or error code
830  */
831 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
832 {
833 	int status;
834 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
835 
836 	status = hif_dump_ce_registers(scn);
837 
838 	if (status)
839 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
840 
841 	/* dump non copy engine pci registers */
842 	__hif_pci_dump_registers(scn);
843 
844 	return 0;
845 }
846 
847 /*
848  * Handler for a per-engine interrupt on a PARTICULAR CE.
849  * This is used in cases where each CE has a private
850  * MSI interrupt.
851  */
852 static irqreturn_t ce_per_engine_handler(int irq, void *arg)
853 {
854 	int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
855 
856 	/*
857 	 * NOTE: We are able to derive CE_id from irq because we
858 	 * use a one-to-one mapping for CE's 0..5.
859 	 * CE's 6 & 7 do not use interrupts at all.
860 	 *
861 	 * This mapping must be kept in sync with the mapping
862 	 * used by firmware.
863 	 */
864 
865 	ce_per_engine_service(arg, CE_id);
866 
867 	return IRQ_HANDLED;
868 }
869 
870 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
871 
872 /* worker thread to schedule wlan_tasklet in SLUB debug build */
873 static void reschedule_tasklet_work_handler(void *arg)
874 {
875 	struct hif_pci_softc *sc = arg;
876 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
877 
878 	if (!scn) {
879 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
880 		return;
881 	}
882 
883 	if (scn->hif_init_done == false) {
884 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
885 		return;
886 	}
887 
888 	tasklet_schedule(&sc->intr_tq);
889 }
890 
891 /**
892  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
893  * work
894  * @sc: HIF PCI Context
895  *
896  * Return: void
897  */
898 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
899 {
900 	qdf_create_work(0, &sc->reschedule_tasklet_work,
901 				reschedule_tasklet_work_handler, NULL);
902 }
903 #else
904 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
905 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
906 
907 void wlan_tasklet(unsigned long data)
908 {
909 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
910 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
911 
912 	if (scn->hif_init_done == false)
913 		goto end;
914 
915 	if (qdf_atomic_read(&scn->link_suspended))
916 		goto end;
917 
918 	if (!ADRASTEA_BU) {
919 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
920 		if (scn->target_status == TARGET_STATUS_RESET)
921 			goto end;
922 	}
923 
924 end:
925 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
926 	qdf_atomic_dec(&scn->active_tasklet_cnt);
927 }
928 
929 #ifdef FEATURE_RUNTIME_PM
930 static const char *hif_pm_runtime_state_to_string(uint32_t state)
931 {
932 	switch (state) {
933 	case HIF_PM_RUNTIME_STATE_NONE:
934 		return "INIT_STATE";
935 	case HIF_PM_RUNTIME_STATE_ON:
936 		return "ON";
937 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
938 		return "INPROGRESS";
939 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
940 		return "SUSPENDED";
941 	default:
942 		return "INVALID STATE";
943 	}
944 }
945 
946 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
947 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
948 /**
949  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
950  * @sc: hif_pci_softc context
951  * @msg: log message
952  *
953  * log runtime pm stats when something seems off.
954  *
955  * Return: void
956  */
957 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
958 {
959 	struct hif_pm_runtime_lock *ctx;
960 
961 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
962 			msg, atomic_read(&sc->dev->power.usage_count),
963 			hif_pm_runtime_state_to_string(
964 					atomic_read(&sc->pm_state)),
965 			sc->prevent_suspend_cnt);
966 
967 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
968 			sc->dev->power.runtime_status,
969 			sc->dev->power.runtime_error,
970 			sc->dev->power.disable_depth,
971 			sc->dev->power.autosuspend_delay);
972 
973 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
974 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
975 			sc->pm_stats.request_resume);
976 
977 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
978 			sc->pm_stats.allow_suspend,
979 			sc->pm_stats.prevent_suspend);
980 
981 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
982 			sc->pm_stats.prevent_suspend_timeout,
983 			sc->pm_stats.allow_suspend_timeout);
984 
985 	HIF_ERROR("Suspended: %u, resumed: %u count",
986 			sc->pm_stats.suspended,
987 			sc->pm_stats.resumed);
988 
989 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
990 			sc->pm_stats.suspend_err,
991 			sc->pm_stats.runtime_get_err);
992 
993 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
994 
995 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
996 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
997 	}
998 
999 	WARN_ON(1);
1000 }
1001 
1002 /**
1003  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
1004  * @s: file to print to
1005  * @data: unused
1006  *
1007  * debugging tool added to the debug fs for displaying runtimepm stats
1008  *
1009  * Return: 0
1010  */
1011 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
1012 {
1013 	struct hif_pci_softc *sc = s->private;
1014 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
1015 		"SUSPENDED"};
1016 	unsigned int msecs_age;
1017 	int pm_state = atomic_read(&sc->pm_state);
1018 	unsigned long timer_expires;
1019 	struct hif_pm_runtime_lock *ctx;
1020 
1021 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
1022 			autopm_state[pm_state]);
1023 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
1024 			sc->pm_stats.last_resume_caller);
1025 
1026 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
1027 		msecs_age = jiffies_to_msecs(
1028 				jiffies - sc->pm_stats.suspend_jiffies);
1029 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1030 				msecs_age / 1000, msecs_age % 1000);
1031 	}
1032 
1033 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1034 			atomic_read(&sc->dev->power.usage_count));
1035 
1036 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1037 			sc->prevent_suspend_cnt);
1038 
1039 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1040 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1041 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1042 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1043 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1044 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1045 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1046 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1047 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1048 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1049 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1050 
1051 	timer_expires = sc->runtime_timer_expires;
1052 	if (timer_expires > 0) {
1053 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1054 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1055 				msecs_age / 1000, msecs_age % 1000);
1056 	}
1057 
1058 	spin_lock_bh(&sc->runtime_lock);
1059 	if (list_empty(&sc->prevent_suspend_list)) {
1060 		spin_unlock_bh(&sc->runtime_lock);
1061 		return 0;
1062 	}
1063 
1064 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1065 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1066 		seq_printf(s, "%s", ctx->name);
1067 		if (ctx->timeout)
1068 			seq_printf(s, "(%d ms)", ctx->timeout);
1069 		seq_puts(s, " ");
1070 	}
1071 	seq_puts(s, "\n");
1072 	spin_unlock_bh(&sc->runtime_lock);
1073 
1074 	return 0;
1075 }
1076 #undef HIF_PCI_RUNTIME_PM_STATS
1077 
1078 /**
1079  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1080  * @inode
1081  * @file
1082  *
1083  * Return: linux error code of single_open.
1084  */
1085 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1086 {
1087 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1088 			inode->i_private);
1089 }
1090 
1091 static const struct file_operations hif_pci_runtime_pm_fops = {
1092 	.owner          = THIS_MODULE,
1093 	.open           = hif_pci_runtime_pm_open,
1094 	.release        = single_release,
1095 	.read           = seq_read,
1096 	.llseek         = seq_lseek,
1097 };
1098 
1099 /**
1100  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1101  * @sc: pci context
1102  *
1103  * creates a debugfs entry to debug the runtime pm feature.
1104  */
1105 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1106 {
1107 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1108 					0400, NULL, sc,
1109 					&hif_pci_runtime_pm_fops);
1110 }
1111 
1112 /**
1113  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1114  * @sc: pci context
1115  *
1116  * removes the debugfs entry to debug the runtime pm feature.
1117  */
1118 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1119 {
1120 	debugfs_remove(sc->pm_dentry);
1121 }
1122 
1123 static void hif_runtime_init(struct device *dev, int delay)
1124 {
1125 	pm_runtime_set_autosuspend_delay(dev, delay);
1126 	pm_runtime_use_autosuspend(dev);
1127 	pm_runtime_allow(dev);
1128 	pm_runtime_mark_last_busy(dev);
1129 	pm_runtime_put_noidle(dev);
1130 	pm_suspend_ignore_children(dev, true);
1131 }
1132 
1133 static void hif_runtime_exit(struct device *dev)
1134 {
1135 	pm_runtime_get_noresume(dev);
1136 	pm_runtime_set_active(dev);
1137 }
1138 
1139 static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
1140 
1141 /**
1142  * hif_pm_runtime_start(): start the runtime pm
1143  * @sc: pci context
1144  *
1145  * After this call, runtime pm will be active.
1146  */
1147 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1148 {
1149 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1150 	uint32_t mode = hif_get_conparam(ol_sc);
1151 
1152 	if (!ol_sc->hif_config.enable_runtime_pm) {
1153 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1154 		return;
1155 	}
1156 
1157 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1158 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1159 				__func__);
1160 		return;
1161 	}
1162 
1163 	setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1164 			(unsigned long)sc);
1165 
1166 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1167 			ol_sc->hif_config.runtime_pm_delay);
1168 
1169 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1170 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1171 	hif_runtime_pm_debugfs_create(sc);
1172 }
1173 
1174 /**
1175  * hif_pm_runtime_stop(): stop runtime pm
1176  * @sc: pci context
1177  *
1178  * Turns off runtime pm and frees corresponding resources
1179  * that were acquired by hif_runtime_pm_start().
1180  */
1181 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1182 {
1183 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1184 	uint32_t mode = hif_get_conparam(ol_sc);
1185 
1186 	if (!ol_sc->hif_config.enable_runtime_pm)
1187 		return;
1188 
1189 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1190 		return;
1191 
1192 	hif_runtime_exit(sc->dev);
1193 	hif_pm_runtime_resume(sc->dev);
1194 
1195 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1196 
1197 	hif_runtime_pm_debugfs_remove(sc);
1198 	del_timer_sync(&sc->runtime_timer);
1199 	/* doesn't wait for penting trafic unlike cld-2.0 */
1200 }
1201 
1202 /**
1203  * hif_pm_runtime_open(): initialize runtime pm
1204  * @sc: pci data structure
1205  *
1206  * Early initialization
1207  */
1208 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1209 {
1210 	spin_lock_init(&sc->runtime_lock);
1211 
1212 	qdf_atomic_init(&sc->pm_state);
1213 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1214 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1215 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1216 }
1217 
1218 /**
1219  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1220  * @sc: pci context
1221  *
1222  * Ensure we have only one vote against runtime suspend before closing
1223  * the runtime suspend feature.
1224  *
1225  * all gets by the wlan driver should have been returned
1226  * one vote should remain as part of cnss_runtime_exit
1227  *
1228  * needs to be revisited if we share the root complex.
1229  */
1230 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1231 {
1232 	struct hif_pm_runtime_lock *ctx, *tmp;
1233 
1234 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1235 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1236 	else
1237 		return;
1238 
1239 	spin_lock_bh(&sc->runtime_lock);
1240 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1241 		spin_unlock_bh(&sc->runtime_lock);
1242 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1243 		spin_lock_bh(&sc->runtime_lock);
1244 	}
1245 	spin_unlock_bh(&sc->runtime_lock);
1246 
1247 	/* ensure 1 and only 1 usage count so that when the wlan
1248 	 * driver is re-insmodded runtime pm won't be
1249 	 * disabled also ensures runtime pm doesn't get
1250 	 * broken on by being less than 1.
1251 	 */
1252 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1253 		atomic_set(&sc->dev->power.usage_count, 1);
1254 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1255 		hif_pm_runtime_put_auto(sc->dev);
1256 }
1257 
1258 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1259 					  struct hif_pm_runtime_lock *lock);
1260 
1261 /**
1262  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1263  * @sc: PCIe Context
1264  *
1265  * API is used to empty the runtime pm prevent suspend list.
1266  *
1267  * Return: void
1268  */
1269 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1270 {
1271 	struct hif_pm_runtime_lock *ctx, *tmp;
1272 
1273 	spin_lock_bh(&sc->runtime_lock);
1274 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1275 		__hif_pm_runtime_allow_suspend(sc, ctx);
1276 	}
1277 	spin_unlock_bh(&sc->runtime_lock);
1278 }
1279 
1280 /**
1281  * hif_pm_runtime_close(): close runtime pm
1282  * @sc: pci bus handle
1283  *
1284  * ensure runtime_pm is stopped before closing the driver
1285  */
1286 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1287 {
1288 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1289 
1290 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1291 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1292 		return;
1293 
1294 	hif_pm_runtime_stop(sc);
1295 
1296 	hif_is_recovery_in_progress(scn) ?
1297 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1298 		hif_pm_runtime_sanitize_on_exit(sc);
1299 }
1300 #else
1301 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1302 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1303 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1304 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1305 #endif
1306 
1307 /**
1308  * hif_disable_power_gating() - disable HW power gating
1309  * @hif_ctx: hif context
1310  *
1311  * disables pcie L1 power states
1312  */
1313 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1314 {
1315 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1316 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1317 
1318 	if (NULL == scn) {
1319 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1320 		       __func__);
1321 		return;
1322 	}
1323 
1324 	/* Disable ASPM when pkt log is enabled */
1325 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1326 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1327 }
1328 
1329 /**
1330  * hif_enable_power_gating() - enable HW power gating
1331  * @hif_ctx: hif context
1332  *
1333  * enables pcie L1 power states
1334  */
1335 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1336 {
1337 	if (NULL == sc) {
1338 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1339 		       __func__);
1340 		return;
1341 	}
1342 
1343 	/* Re-enable ASPM after firmware/OTP download is complete */
1344 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1345 }
1346 
1347 /**
1348  * hif_enable_power_management() - enable power management
1349  * @hif_ctx: hif context
1350  *
1351  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1352  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1353  *
1354  * note: epping mode does not call this function as it does not
1355  *       care about saving power.
1356  */
1357 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1358 				 bool is_packet_log_enabled)
1359 {
1360 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1361 
1362 	if (pci_ctx == NULL) {
1363 		HIF_ERROR("%s, hif_ctx null", __func__);
1364 		return;
1365 	}
1366 
1367 	hif_pm_runtime_start(pci_ctx);
1368 
1369 	if (!is_packet_log_enabled)
1370 		hif_enable_power_gating(pci_ctx);
1371 
1372 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1373 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1374 	    !ce_srng_based(hif_sc)) {
1375 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1376 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1377 			HIF_ERROR("%s, failed to set target to sleep",
1378 				  __func__);
1379 	}
1380 }
1381 
1382 /**
1383  * hif_disable_power_management() - disable power management
1384  * @hif_ctx: hif context
1385  *
1386  * Currently disables runtime pm. Should be updated to behave
1387  * if runtime pm is not started. Should be updated to take care
1388  * of aspm and soc sleep for driver load.
1389  */
1390 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1391 {
1392 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1393 
1394 	if (pci_ctx == NULL) {
1395 		HIF_ERROR("%s, hif_ctx null", __func__);
1396 		return;
1397 	}
1398 
1399 	hif_pm_runtime_stop(pci_ctx);
1400 }
1401 
1402 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1403 {
1404 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1405 
1406 	if (pci_ctx == NULL) {
1407 		HIF_ERROR("%s, hif_ctx null", __func__);
1408 		return;
1409 	}
1410 	hif_display_ce_stats(&pci_ctx->ce_sc);
1411 }
1412 
1413 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1414 {
1415 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1416 
1417 	if (pci_ctx == NULL) {
1418 		HIF_ERROR("%s, hif_ctx null", __func__);
1419 		return;
1420 	}
1421 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1422 }
1423 
1424 #define ATH_PCI_PROBE_RETRY_MAX 3
1425 /**
1426  * hif_bus_open(): hif_bus_open
1427  * @scn: scn
1428  * @bus_type: bus type
1429  *
1430  * Return: n/a
1431  */
1432 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1433 {
1434 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1435 
1436 	hif_ctx->bus_type = bus_type;
1437 	hif_pm_runtime_open(sc);
1438 
1439 	qdf_spinlock_create(&sc->irq_lock);
1440 
1441 	return hif_ce_open(hif_ctx);
1442 }
1443 
1444 /**
1445  * hif_wake_target_cpu() - wake the target's cpu
1446  * @scn: hif context
1447  *
1448  * Send an interrupt to the device to wake up the Target CPU
1449  * so it has an opportunity to notice any changed state.
1450  */
1451 static void hif_wake_target_cpu(struct hif_softc *scn)
1452 {
1453 	QDF_STATUS rv;
1454 	uint32_t core_ctrl;
1455 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1456 
1457 	rv = hif_diag_read_access(hif_hdl,
1458 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1459 				  &core_ctrl);
1460 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1461 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1462 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1463 
1464 	rv = hif_diag_write_access(hif_hdl,
1465 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1466 				   core_ctrl);
1467 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1468 }
1469 
1470 /**
1471  * soc_wake_reset() - allow the target to go to sleep
1472  * @scn: hif_softc
1473  *
1474  * Clear the force wake register.  This is done by
1475  * hif_sleep_entry and cancel defered timer sleep.
1476  */
1477 static void soc_wake_reset(struct hif_softc *scn)
1478 {
1479 	hif_write32_mb(scn, scn->mem +
1480 		PCIE_LOCAL_BASE_ADDRESS +
1481 		PCIE_SOC_WAKE_ADDRESS,
1482 		PCIE_SOC_WAKE_RESET);
1483 }
1484 
1485 /**
1486  * hif_sleep_entry() - gate target sleep
1487  * @arg: hif context
1488  *
1489  * This function is the callback for the sleep timer.
1490  * Check if last force awake critical section was at least
1491  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1492  * allow the target to go to sleep and cancel the sleep timer.
1493  * otherwise reschedule the sleep timer.
1494  */
1495 static void hif_sleep_entry(void *arg)
1496 {
1497 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1498 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1499 	uint32_t idle_ms;
1500 
1501 	if (scn->recovery)
1502 		return;
1503 
1504 	if (hif_is_driver_unloading(scn))
1505 		return;
1506 
1507 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1508 	if (hif_state->verified_awake == false) {
1509 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1510 						    - hif_state->sleep_ticks);
1511 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1512 			if (!qdf_atomic_read(&scn->link_suspended)) {
1513 				soc_wake_reset(scn);
1514 				hif_state->fake_sleep = false;
1515 			}
1516 		} else {
1517 			qdf_timer_stop(&hif_state->sleep_timer);
1518 			qdf_timer_start(&hif_state->sleep_timer,
1519 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1520 		}
1521 	} else {
1522 		qdf_timer_stop(&hif_state->sleep_timer);
1523 		qdf_timer_start(&hif_state->sleep_timer,
1524 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1525 	}
1526 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1527 }
1528 
1529 #define HIF_HIA_MAX_POLL_LOOP    1000000
1530 #define HIF_HIA_POLLING_DELAY_MS 10
1531 
1532 #ifdef CONFIG_WIN
1533 static void hif_set_hia_extnd(struct hif_softc *scn)
1534 {
1535 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1536 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1537 	uint32_t target_type = tgt_info->target_type;
1538 
1539 	HIF_TRACE("%s: E", __func__);
1540 
1541 	if ((target_type == TARGET_TYPE_AR900B) ||
1542 			target_type == TARGET_TYPE_QCA9984 ||
1543 			target_type == TARGET_TYPE_QCA9888) {
1544 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1545 		 * in RTC space
1546 		 */
1547 		tgt_info->target_revision
1548 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1549 					+ CHIP_ID_ADDRESS));
1550 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1551 			  target_type, tgt_info->target_revision);
1552 	}
1553 
1554 	{
1555 		uint32_t flag2_value = 0;
1556 		uint32_t flag2_targ_addr =
1557 			host_interest_item_address(target_type,
1558 			offsetof(struct host_interest_s, hi_skip_clock_init));
1559 
1560 		if ((ar900b_20_targ_clk != -1) &&
1561 			(frac != -1) && (intval != -1)) {
1562 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1563 				&flag2_value);
1564 			qdf_print("\n Setting clk_override");
1565 			flag2_value |= CLOCK_OVERRIDE;
1566 
1567 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1568 					flag2_value);
1569 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1570 		} else {
1571 			qdf_print("\n CLOCK PLL skipped");
1572 		}
1573 	}
1574 
1575 	if (target_type == TARGET_TYPE_AR900B
1576 			|| target_type == TARGET_TYPE_QCA9984
1577 			|| target_type == TARGET_TYPE_QCA9888) {
1578 
1579 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1580 		 * this would be supplied through module parameters,
1581 		 * if not supplied assumed default or same behavior as 1.0.
1582 		 * Assume 1.0 clock can't be tuned, reset to defaults
1583 		 */
1584 
1585 		qdf_print(KERN_INFO
1586 			  "%s: setting the target pll frac %x intval %x",
1587 			  __func__, frac, intval);
1588 
1589 		/* do not touch frac, and int val, let them be default -1,
1590 		 * if desired, host can supply these through module params
1591 		 */
1592 		if (frac != -1 || intval != -1) {
1593 			uint32_t flag2_value = 0;
1594 			uint32_t flag2_targ_addr;
1595 
1596 			flag2_targ_addr =
1597 				host_interest_item_address(target_type,
1598 				offsetof(struct host_interest_s,
1599 					hi_clock_info));
1600 			hif_diag_read_access(hif_hdl,
1601 				flag2_targ_addr, &flag2_value);
1602 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1603 				  flag2_value);
1604 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1605 			qdf_print("\n INT Val %x  Address %x",
1606 				  intval, flag2_value + 4);
1607 			hif_diag_write_access(hif_hdl,
1608 					flag2_value + 4, intval);
1609 		} else {
1610 			qdf_print(KERN_INFO
1611 				  "%s: no frac provided, skipping pre-configuring PLL",
1612 				  __func__);
1613 		}
1614 
1615 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1616 		if ((target_type == TARGET_TYPE_AR900B)
1617 			&& (tgt_info->target_revision == AR900B_REV_2)
1618 			&& ar900b_20_targ_clk != -1) {
1619 			uint32_t flag2_value = 0;
1620 			uint32_t flag2_targ_addr;
1621 
1622 			flag2_targ_addr
1623 				= host_interest_item_address(target_type,
1624 					offsetof(struct host_interest_s,
1625 					hi_desired_cpu_speed_hz));
1626 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1627 							&flag2_value);
1628 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1629 				  flag2_value);
1630 			hif_diag_write_access(hif_hdl, flag2_value,
1631 				ar900b_20_targ_clk/*300000000u*/);
1632 		} else if (target_type == TARGET_TYPE_QCA9888) {
1633 			uint32_t flag2_targ_addr;
1634 
1635 			if (200000000u != qca9888_20_targ_clk) {
1636 				qca9888_20_targ_clk = 300000000u;
1637 				/* Setting the target clock speed to 300 mhz */
1638 			}
1639 
1640 			flag2_targ_addr
1641 				= host_interest_item_address(target_type,
1642 					offsetof(struct host_interest_s,
1643 					hi_desired_cpu_speed_hz));
1644 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1645 				qca9888_20_targ_clk);
1646 		} else {
1647 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1648 				  __func__);
1649 		}
1650 	} else {
1651 		if (frac != -1 || intval != -1) {
1652 			uint32_t flag2_value = 0;
1653 			uint32_t flag2_targ_addr =
1654 				host_interest_item_address(target_type,
1655 					offsetof(struct host_interest_s,
1656 							hi_clock_info));
1657 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1658 						&flag2_value);
1659 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1660 				  flag2_value);
1661 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1662 			qdf_print("\n INT Val %x  Address %x", intval,
1663 				  flag2_value + 4);
1664 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1665 					      intval);
1666 		}
1667 	}
1668 }
1669 
1670 #else
1671 
1672 static void hif_set_hia_extnd(struct hif_softc *scn)
1673 {
1674 }
1675 
1676 #endif
1677 
1678 /**
1679  * hif_set_hia() - fill out the host interest area
1680  * @scn: hif context
1681  *
1682  * This is replaced by hif_wlan_enable for integrated targets.
1683  * This fills out the host interest area.  The firmware will
1684  * process these memory addresses when it is first brought out
1685  * of reset.
1686  *
1687  * Return: 0 for success.
1688  */
1689 static int hif_set_hia(struct hif_softc *scn)
1690 {
1691 	QDF_STATUS rv;
1692 	uint32_t interconnect_targ_addr = 0;
1693 	uint32_t pcie_state_targ_addr = 0;
1694 	uint32_t pipe_cfg_targ_addr = 0;
1695 	uint32_t svc_to_pipe_map = 0;
1696 	uint32_t pcie_config_flags = 0;
1697 	uint32_t flag2_value = 0;
1698 	uint32_t flag2_targ_addr = 0;
1699 #ifdef QCA_WIFI_3_0
1700 	uint32_t host_interest_area = 0;
1701 	uint8_t i;
1702 #else
1703 	uint32_t ealloc_value = 0;
1704 	uint32_t ealloc_targ_addr = 0;
1705 	uint8_t banks_switched = 1;
1706 	uint32_t chip_id;
1707 #endif
1708 	uint32_t pipe_cfg_addr;
1709 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1710 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1711 	uint32_t target_type = tgt_info->target_type;
1712 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1713 	static struct CE_pipe_config *target_ce_config;
1714 	struct service_to_pipe *target_service_to_ce_map;
1715 
1716 	HIF_TRACE("%s: E", __func__);
1717 
1718 	hif_get_target_ce_config(scn,
1719 				 &target_ce_config, &target_ce_config_sz,
1720 				 &target_service_to_ce_map,
1721 				 &target_service_to_ce_map_sz,
1722 				 NULL, NULL);
1723 
1724 	if (ADRASTEA_BU)
1725 		return QDF_STATUS_SUCCESS;
1726 
1727 #ifdef QCA_WIFI_3_0
1728 	i = 0;
1729 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1730 		host_interest_area = hif_read32_mb(scn, scn->mem +
1731 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1732 		if ((host_interest_area & 0x01) == 0) {
1733 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1734 			host_interest_area = 0;
1735 			i++;
1736 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1737 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1738 		} else {
1739 			host_interest_area &= (~0x01);
1740 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1741 			break;
1742 		}
1743 	}
1744 
1745 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1746 		HIF_ERROR("%s: hia polling timeout", __func__);
1747 		return -EIO;
1748 	}
1749 
1750 	if (host_interest_area == 0) {
1751 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1752 		return -EIO;
1753 	}
1754 
1755 	interconnect_targ_addr = host_interest_area +
1756 			offsetof(struct host_interest_area_t,
1757 			hi_interconnect_state);
1758 
1759 	flag2_targ_addr = host_interest_area +
1760 			offsetof(struct host_interest_area_t, hi_option_flag2);
1761 
1762 #else
1763 	interconnect_targ_addr = hif_hia_item_address(target_type,
1764 		offsetof(struct host_interest_s, hi_interconnect_state));
1765 	ealloc_targ_addr = hif_hia_item_address(target_type,
1766 		offsetof(struct host_interest_s, hi_early_alloc));
1767 	flag2_targ_addr = hif_hia_item_address(target_type,
1768 		offsetof(struct host_interest_s, hi_option_flag2));
1769 #endif
1770 	/* Supply Target-side CE configuration */
1771 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1772 			  &pcie_state_targ_addr);
1773 	if (rv != QDF_STATUS_SUCCESS) {
1774 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1775 			  __func__, interconnect_targ_addr, rv);
1776 		goto done;
1777 	}
1778 	if (pcie_state_targ_addr == 0) {
1779 		rv = QDF_STATUS_E_FAILURE;
1780 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1781 		goto done;
1782 	}
1783 	pipe_cfg_addr = pcie_state_targ_addr +
1784 			  offsetof(struct pcie_state_s,
1785 			  pipe_cfg_addr);
1786 	rv = hif_diag_read_access(hif_hdl,
1787 			  pipe_cfg_addr,
1788 			  &pipe_cfg_targ_addr);
1789 	if (rv != QDF_STATUS_SUCCESS) {
1790 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1791 			__func__, pipe_cfg_addr, rv);
1792 		goto done;
1793 	}
1794 	if (pipe_cfg_targ_addr == 0) {
1795 		rv = QDF_STATUS_E_FAILURE;
1796 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1797 		goto done;
1798 	}
1799 
1800 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1801 			(uint8_t *) target_ce_config,
1802 			target_ce_config_sz);
1803 
1804 	if (rv != QDF_STATUS_SUCCESS) {
1805 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1806 		goto done;
1807 	}
1808 
1809 	rv = hif_diag_read_access(hif_hdl,
1810 			  pcie_state_targ_addr +
1811 			  offsetof(struct pcie_state_s,
1812 			   svc_to_pipe_map),
1813 			  &svc_to_pipe_map);
1814 	if (rv != QDF_STATUS_SUCCESS) {
1815 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1816 		goto done;
1817 	}
1818 	if (svc_to_pipe_map == 0) {
1819 		rv = QDF_STATUS_E_FAILURE;
1820 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1821 		goto done;
1822 	}
1823 
1824 	rv = hif_diag_write_mem(hif_hdl,
1825 			svc_to_pipe_map,
1826 			(uint8_t *) target_service_to_ce_map,
1827 			target_service_to_ce_map_sz);
1828 	if (rv != QDF_STATUS_SUCCESS) {
1829 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1830 		goto done;
1831 	}
1832 
1833 	rv = hif_diag_read_access(hif_hdl,
1834 			pcie_state_targ_addr +
1835 			offsetof(struct pcie_state_s,
1836 			config_flags),
1837 			&pcie_config_flags);
1838 	if (rv != QDF_STATUS_SUCCESS) {
1839 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1840 		goto done;
1841 	}
1842 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1843 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1844 #else
1845 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1846 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1847 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1848 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1849 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1850 #endif
1851 	rv = hif_diag_write_mem(hif_hdl,
1852 			pcie_state_targ_addr +
1853 			offsetof(struct pcie_state_s,
1854 			config_flags),
1855 			(uint8_t *) &pcie_config_flags,
1856 			sizeof(pcie_config_flags));
1857 	if (rv != QDF_STATUS_SUCCESS) {
1858 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1859 		goto done;
1860 	}
1861 
1862 #ifndef QCA_WIFI_3_0
1863 	/* configure early allocation */
1864 	ealloc_targ_addr = hif_hia_item_address(target_type,
1865 						offsetof(
1866 						struct host_interest_s,
1867 						hi_early_alloc));
1868 
1869 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1870 			&ealloc_value);
1871 	if (rv != QDF_STATUS_SUCCESS) {
1872 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1873 		goto done;
1874 	}
1875 
1876 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1877 	ealloc_value |=
1878 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1879 		 HI_EARLY_ALLOC_MAGIC_MASK);
1880 
1881 	rv = hif_diag_read_access(hif_hdl,
1882 			  CHIP_ID_ADDRESS |
1883 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1884 	if (rv != QDF_STATUS_SUCCESS) {
1885 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1886 		goto done;
1887 	}
1888 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1889 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1890 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1891 		case 0x2:       /* ROME 1.3 */
1892 			/* 2 banks are switched to IRAM */
1893 			banks_switched = 2;
1894 			break;
1895 		case 0x4:       /* ROME 2.1 */
1896 		case 0x5:       /* ROME 2.2 */
1897 			banks_switched = 6;
1898 			break;
1899 		case 0x8:       /* ROME 3.0 */
1900 		case 0x9:       /* ROME 3.1 */
1901 		case 0xA:       /* ROME 3.2 */
1902 			banks_switched = 9;
1903 			break;
1904 		case 0x0:       /* ROME 1.0 */
1905 		case 0x1:       /* ROME 1.1 */
1906 		default:
1907 			/* 3 banks are switched to IRAM */
1908 			banks_switched = 3;
1909 			break;
1910 		}
1911 	}
1912 
1913 	ealloc_value |=
1914 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1915 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1916 
1917 	rv = hif_diag_write_access(hif_hdl,
1918 				ealloc_targ_addr,
1919 				ealloc_value);
1920 	if (rv != QDF_STATUS_SUCCESS) {
1921 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1922 		goto done;
1923 	}
1924 #endif
1925 	if ((target_type == TARGET_TYPE_AR900B)
1926 			|| (target_type == TARGET_TYPE_QCA9984)
1927 			|| (target_type == TARGET_TYPE_QCA9888)
1928 			|| (target_type == TARGET_TYPE_AR9888)) {
1929 		hif_set_hia_extnd(scn);
1930 	}
1931 
1932 	/* Tell Target to proceed with initialization */
1933 	flag2_targ_addr = hif_hia_item_address(target_type,
1934 						offsetof(
1935 						struct host_interest_s,
1936 						hi_option_flag2));
1937 
1938 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1939 			  &flag2_value);
1940 	if (rv != QDF_STATUS_SUCCESS) {
1941 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1942 		goto done;
1943 	}
1944 
1945 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1946 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1947 			   flag2_value);
1948 	if (rv != QDF_STATUS_SUCCESS) {
1949 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1950 		goto done;
1951 	}
1952 
1953 	hif_wake_target_cpu(scn);
1954 
1955 done:
1956 
1957 	return rv;
1958 }
1959 
1960 /**
1961  * hif_bus_configure() - configure the pcie bus
1962  * @hif_sc: pointer to the hif context.
1963  *
1964  * return: 0 for success. nonzero for failure.
1965  */
1966 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1967 {
1968 	int status = 0;
1969 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1970 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1971 
1972 	hif_ce_prepare_config(hif_sc);
1973 
1974 	/* initialize sleep state adjust variables */
1975 	hif_state->sleep_timer_init = true;
1976 	hif_state->keep_awake_count = 0;
1977 	hif_state->fake_sleep = false;
1978 	hif_state->sleep_ticks = 0;
1979 
1980 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1981 			       hif_sleep_entry, (void *)hif_state,
1982 			       QDF_TIMER_TYPE_WAKE_APPS);
1983 	hif_state->sleep_timer_init = true;
1984 
1985 	status = hif_wlan_enable(hif_sc);
1986 	if (status) {
1987 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1988 			  __func__, status);
1989 		goto timer_free;
1990 	}
1991 
1992 	A_TARGET_ACCESS_LIKELY(hif_sc);
1993 
1994 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1995 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1996 	    !ce_srng_based(hif_sc)) {
1997 		/*
1998 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1999 		 * prevent sleep when we want to keep firmware always awake
2000 		 * note: when we want to keep firmware always awake,
2001 		 *       hif_target_sleep_state_adjust will point to a dummy
2002 		 *       function, and hif_pci_target_sleep_state_adjust must
2003 		 *       be called instead.
2004 		 * note: bus type check is here because AHB bus is reusing
2005 		 *       hif_pci_bus_configure code.
2006 		 */
2007 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
2008 			if (hif_pci_target_sleep_state_adjust(hif_sc,
2009 					false, true) < 0) {
2010 				status = -EACCES;
2011 				goto disable_wlan;
2012 			}
2013 		}
2014 	}
2015 
2016 	/* todo: consider replacing this with an srng field */
2017 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2018 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2)) &&
2019 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
2020 		hif_sc->per_ce_irq = true;
2021 	}
2022 
2023 	status = hif_config_ce(hif_sc);
2024 	if (status)
2025 		goto disable_wlan;
2026 
2027 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
2028 	if (hif_needs_bmi(hif_osc)) {
2029 		status = hif_set_hia(hif_sc);
2030 		if (status)
2031 			goto unconfig_ce;
2032 
2033 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2034 
2035 		hif_register_bmi_callbacks(hif_sc);
2036 	}
2037 
2038 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2039 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2)) &&
2040 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2041 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2042 						__func__);
2043 	else {
2044 		status = hif_configure_irq(hif_sc);
2045 		if (status < 0)
2046 			goto unconfig_ce;
2047 	}
2048 
2049 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2050 
2051 	return status;
2052 
2053 unconfig_ce:
2054 	hif_unconfig_ce(hif_sc);
2055 disable_wlan:
2056 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2057 	hif_wlan_disable(hif_sc);
2058 
2059 timer_free:
2060 	qdf_timer_stop(&hif_state->sleep_timer);
2061 	qdf_timer_free(&hif_state->sleep_timer);
2062 	hif_state->sleep_timer_init = false;
2063 
2064 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2065 	return status;
2066 }
2067 
2068 /**
2069  * hif_bus_close(): hif_bus_close
2070  *
2071  * Return: n/a
2072  */
2073 void hif_pci_close(struct hif_softc *hif_sc)
2074 {
2075 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2076 
2077 	hif_pm_runtime_close(hif_pci_sc);
2078 	hif_ce_close(hif_sc);
2079 }
2080 
2081 #define BAR_NUM 0
2082 
2083 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2084 				struct pci_dev *pdev,
2085 				const struct pci_device_id *id)
2086 {
2087 	void __iomem *mem;
2088 	int ret = 0;
2089 	uint16_t device_id = 0;
2090 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2091 
2092 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2093 	if (device_id != id->device)  {
2094 		HIF_ERROR(
2095 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2096 		   __func__, device_id, id->device);
2097 		/* pci link is down, so returing with error code */
2098 		return -EIO;
2099 	}
2100 
2101 	/* FIXME: temp. commenting out assign_resource
2102 	 * call for dev_attach to work on 2.6.38 kernel
2103 	 */
2104 #if (!defined(__LINUX_ARM_ARCH__))
2105 	if (pci_assign_resource(pdev, BAR_NUM)) {
2106 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2107 		return -EIO;
2108 	}
2109 #endif
2110 	if (pci_enable_device(pdev)) {
2111 		HIF_ERROR("%s: pci_enable_device error",
2112 			   __func__);
2113 		return -EIO;
2114 	}
2115 
2116 	/* Request MMIO resources */
2117 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2118 	if (ret) {
2119 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2120 		ret = -EIO;
2121 		goto err_region;
2122 	}
2123 
2124 #ifdef CONFIG_ARM_LPAE
2125 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2126 	 * for 32 bits device also.
2127 	 */
2128 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2129 	if (ret) {
2130 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2131 		goto err_dma;
2132 	}
2133 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2134 	if (ret) {
2135 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2136 		goto err_dma;
2137 	}
2138 #else
2139 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2140 	if (ret) {
2141 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2142 		goto err_dma;
2143 	}
2144 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2145 	if (ret) {
2146 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2147 			   __func__);
2148 		goto err_dma;
2149 	}
2150 #endif
2151 
2152 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2153 
2154 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2155 	pci_set_master(pdev);
2156 
2157 	/* Arrange for access to Target SoC registers. */
2158 	mem = pci_iomap(pdev, BAR_NUM, 0);
2159 	if (!mem) {
2160 		HIF_ERROR("%s: PCI iomap error", __func__);
2161 		ret = -EIO;
2162 		goto err_iomap;
2163 	}
2164 
2165 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
2166 
2167 	sc->mem = mem;
2168 
2169 	/* Hawkeye emulation specific change */
2170 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2171 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2172 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2173 		(device_id == RUMIM2M_DEVICE_ID_NODE3)) {
2174 		mem = mem + 0x0c000000;
2175 		sc->mem = mem;
2176 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2177 			__func__, sc->mem);
2178 	}
2179 
2180 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2181 	ol_sc->mem = mem;
2182 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2183 	sc->pci_enabled = true;
2184 	return ret;
2185 
2186 err_iomap:
2187 	pci_clear_master(pdev);
2188 err_dma:
2189 	pci_release_region(pdev, BAR_NUM);
2190 err_region:
2191 	pci_disable_device(pdev);
2192 	return ret;
2193 }
2194 
2195 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2196 			      struct pci_dev *pdev,
2197 			      const struct pci_device_id *id)
2198 {
2199 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2200 	sc->pci_enabled = true;
2201 	return 0;
2202 }
2203 
2204 
2205 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
2206 {
2207 	pci_disable_msi(sc->pdev);
2208 	pci_iounmap(sc->pdev, sc->mem);
2209 	pci_clear_master(sc->pdev);
2210 	pci_release_region(sc->pdev, BAR_NUM);
2211 	pci_disable_device(sc->pdev);
2212 }
2213 
2214 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
2215 
2216 static void hif_disable_pci(struct hif_pci_softc *sc)
2217 {
2218 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2219 
2220 	if (ol_sc == NULL) {
2221 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2222 		return;
2223 	}
2224 	hif_pci_device_reset(sc);
2225 	sc->hif_pci_deinit(sc);
2226 
2227 	sc->mem = NULL;
2228 	ol_sc->mem = NULL;
2229 }
2230 
2231 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2232 {
2233 	int ret = 0;
2234 	int targ_awake_limit = 500;
2235 #ifndef QCA_WIFI_3_0
2236 	uint32_t fw_indicator;
2237 #endif
2238 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2239 
2240 	/*
2241 	 * Verify that the Target was started cleanly.*
2242 	 * The case where this is most likely is with an AUX-powered
2243 	 * Target and a Host in WoW mode. If the Host crashes,
2244 	 * loses power, or is restarted (without unloading the driver)
2245 	 * then the Target is left (aux) powered and running.  On a
2246 	 * subsequent driver load, the Target is in an unexpected state.
2247 	 * We try to catch that here in order to reset the Target and
2248 	 * retry the probe.
2249 	 */
2250 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2251 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2252 	while (!hif_targ_is_awake(scn, sc->mem)) {
2253 		if (0 == targ_awake_limit) {
2254 			HIF_ERROR("%s: target awake timeout", __func__);
2255 			ret = -EAGAIN;
2256 			goto end;
2257 		}
2258 		qdf_mdelay(1);
2259 		targ_awake_limit--;
2260 	}
2261 
2262 #if PCIE_BAR0_READY_CHECKING
2263 	{
2264 		int wait_limit = 200;
2265 		/* Synchronization point: wait the BAR0 is configured */
2266 		while (wait_limit-- &&
2267 			   !(hif_read32_mb(sc, c->mem +
2268 					  PCIE_LOCAL_BASE_ADDRESS +
2269 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2270 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2271 			qdf_mdelay(10);
2272 		}
2273 		if (wait_limit < 0) {
2274 			/* AR6320v1 doesn't support checking of BAR0
2275 			 * configuration, takes one sec to wait BAR0 ready
2276 			 */
2277 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2278 				    __func__);
2279 		}
2280 	}
2281 #endif
2282 
2283 #ifndef QCA_WIFI_3_0
2284 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2285 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2286 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2287 
2288 	if (fw_indicator & FW_IND_INITIALIZED) {
2289 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2290 			   __func__);
2291 		ret = -EAGAIN;
2292 		goto end;
2293 	}
2294 #endif
2295 
2296 end:
2297 	return ret;
2298 }
2299 
2300 static void wlan_tasklet_msi(unsigned long data)
2301 {
2302 	struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
2303 	struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
2304 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2305 
2306 	if (scn->hif_init_done == false)
2307 		goto irq_handled;
2308 
2309 	if (qdf_atomic_read(&scn->link_suspended))
2310 		goto irq_handled;
2311 
2312 	qdf_atomic_inc(&scn->active_tasklet_cnt);
2313 
2314 	if (entry->id == HIF_MAX_TASKLET_NUM) {
2315 		/* the last tasklet is for fw IRQ */
2316 		(irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
2317 		if (scn->target_status == TARGET_STATUS_RESET)
2318 			goto irq_handled;
2319 	} else if (entry->id < scn->ce_count) {
2320 		ce_per_engine_service(scn, entry->id);
2321 	} else {
2322 		HIF_ERROR("%s: ERROR - invalid CE_id = %d",
2323 		       __func__, entry->id);
2324 	}
2325 	return;
2326 
2327 irq_handled:
2328 	qdf_atomic_dec(&scn->active_tasklet_cnt);
2329 
2330 }
2331 
2332 /* deprecated */
2333 static int hif_configure_msi(struct hif_pci_softc *sc)
2334 {
2335 	int ret = 0;
2336 	int num_msi_desired;
2337 	int rv = -1;
2338 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2339 
2340 	HIF_TRACE("%s: E", __func__);
2341 
2342 	num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
2343 	if (num_msi_desired < 1) {
2344 		HIF_ERROR("%s: MSI is not configured", __func__);
2345 		return -EINVAL;
2346 	}
2347 
2348 	if (num_msi_desired > 1) {
2349 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
2350 		rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
2351 						num_msi_desired);
2352 #else
2353 		rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
2354 #endif
2355 	}
2356 	HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
2357 		  __func__, num_msi_desired, rv);
2358 
2359 	if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
2360 		int i;
2361 
2362 		sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
2363 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
2364 			(void *)sc;
2365 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
2366 			HIF_MAX_TASKLET_NUM;
2367 		tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2368 			 (unsigned long)&sc->tasklet_entries[
2369 			 HIF_MAX_TASKLET_NUM-1]);
2370 		ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
2371 				  hif_pci_msi_fw_handler,
2372 				  IRQF_SHARED, "wlan_pci", sc);
2373 		if (ret) {
2374 			HIF_ERROR("%s: request_irq failed", __func__);
2375 			goto err_intr;
2376 		}
2377 		for (i = 0; i <= scn->ce_count; i++) {
2378 			sc->tasklet_entries[i].hif_handler = (void *)sc;
2379 			sc->tasklet_entries[i].id = i;
2380 			tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2381 				 (unsigned long)&sc->tasklet_entries[i]);
2382 			ret = request_irq((sc->pdev->irq +
2383 					   i + MSI_ASSIGN_CE_INITIAL),
2384 					  ce_per_engine_handler, IRQF_SHARED,
2385 					  "wlan_pci", sc);
2386 			if (ret) {
2387 				HIF_ERROR("%s: request_irq failed", __func__);
2388 				goto err_intr;
2389 			}
2390 		}
2391 	} else if (rv > 0) {
2392 		HIF_TRACE("%s: use single msi", __func__);
2393 
2394 		ret = pci_enable_msi(sc->pdev);
2395 		if (ret < 0) {
2396 			HIF_ERROR("%s: single MSI allocation failed",
2397 				  __func__);
2398 			/* Try for legacy PCI line interrupts */
2399 			sc->num_msi_intrs = 0;
2400 		} else {
2401 			sc->num_msi_intrs = 1;
2402 			tasklet_init(&sc->intr_tq,
2403 				wlan_tasklet, (unsigned long)sc);
2404 			ret = request_irq(sc->pdev->irq,
2405 					 hif_pci_legacy_ce_interrupt_handler,
2406 					  IRQF_SHARED, "wlan_pci", sc);
2407 			if (ret) {
2408 				HIF_ERROR("%s: request_irq failed", __func__);
2409 				goto err_intr;
2410 			}
2411 		}
2412 	} else {
2413 		sc->num_msi_intrs = 0;
2414 		ret = -EIO;
2415 		HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
2416 	}
2417 	ret = pci_enable_msi(sc->pdev);
2418 	if (ret < 0) {
2419 		HIF_ERROR("%s: single MSI interrupt allocation failed",
2420 			  __func__);
2421 		/* Try for legacy PCI line interrupts */
2422 		sc->num_msi_intrs = 0;
2423 	} else {
2424 		sc->num_msi_intrs = 1;
2425 		tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2426 		ret = request_irq(sc->pdev->irq,
2427 				  hif_pci_legacy_ce_interrupt_handler,
2428 				  IRQF_SHARED, "wlan_pci", sc);
2429 		if (ret) {
2430 			HIF_ERROR("%s: request_irq failed", __func__);
2431 			goto err_intr;
2432 		}
2433 	}
2434 
2435 	if (ret == 0) {
2436 		hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2437 			  PCIE_INTR_ENABLE_ADDRESS),
2438 			  HOST_GROUP0_MASK);
2439 		hif_write32_mb(sc, sc->mem +
2440 			  PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
2441 			  PCIE_SOC_WAKE_RESET);
2442 	}
2443 	HIF_TRACE("%s: X, ret = %d", __func__, ret);
2444 
2445 	return ret;
2446 
2447 err_intr:
2448 	if (sc->num_msi_intrs >= 1)
2449 		pci_disable_msi(sc->pdev);
2450 	return ret;
2451 }
2452 
2453 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2454 {
2455 	int ret = 0;
2456 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2457 	uint32_t target_type = scn->target_info.target_type;
2458 
2459 	HIF_TRACE("%s: E", __func__);
2460 
2461 	/* do notn support MSI or MSI IRQ failed */
2462 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2463 	ret = request_irq(sc->pdev->irq,
2464 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2465 			  "wlan_pci", sc);
2466 	if (ret) {
2467 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2468 		goto end;
2469 	}
2470 	scn->wake_irq = sc->pdev->irq;
2471 	/* Use sc->irq instead of sc->pdev-irq
2472 	 * platform_device pdev doesn't have an irq field
2473 	 */
2474 	sc->irq = sc->pdev->irq;
2475 	/* Use Legacy PCI Interrupts */
2476 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2477 		  PCIE_INTR_ENABLE_ADDRESS),
2478 		  HOST_GROUP0_MASK);
2479 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2480 			       PCIE_INTR_ENABLE_ADDRESS));
2481 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2482 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2483 
2484 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2485 			(target_type == TARGET_TYPE_AR900B)  ||
2486 			(target_type == TARGET_TYPE_QCA9984) ||
2487 			(target_type == TARGET_TYPE_AR9888) ||
2488 			(target_type == TARGET_TYPE_QCA9888) ||
2489 			(target_type == TARGET_TYPE_AR6320V1) ||
2490 			(target_type == TARGET_TYPE_AR6320V2) ||
2491 			(target_type == TARGET_TYPE_AR6320V3)) {
2492 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2493 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2494 	}
2495 end:
2496 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2497 			  "%s: X, ret = %d", __func__, ret);
2498 	return ret;
2499 }
2500 
2501 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2502 {
2503 	int ret;
2504 	int ce_id, irq;
2505 	uint32_t msi_data_start;
2506 	uint32_t msi_data_count;
2507 	uint32_t msi_irq_start;
2508 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2509 
2510 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2511 					    &msi_data_count, &msi_data_start,
2512 					    &msi_irq_start);
2513 	if (ret)
2514 		return ret;
2515 
2516 	/* needs to match the ce_id -> irq data mapping
2517 	 * used in the srng parameter configuration
2518 	 */
2519 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2520 		unsigned int msi_data;
2521 
2522 		if (!ce_sc->tasklets[ce_id].inited)
2523 			continue;
2524 
2525 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2526 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2527 
2528 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2529 			  ce_id, msi_data, irq);
2530 
2531 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2532 	}
2533 
2534 	return ret;
2535 }
2536 
2537 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2538 {
2539 	int i, j, irq;
2540 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2541 	struct hif_exec_context *hif_ext_group;
2542 
2543 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2544 		hif_ext_group = hif_state->hif_ext_group[i];
2545 		if (hif_ext_group->irq_requested) {
2546 			hif_ext_group->irq_requested = false;
2547 			for (j = 0; j < hif_ext_group->numirq; j++) {
2548 				irq = hif_ext_group->os_irq[j];
2549 				free_irq(irq, hif_ext_group);
2550 			}
2551 			hif_ext_group->numirq = 0;
2552 		}
2553 	}
2554 }
2555 
2556 /**
2557  * hif_nointrs(): disable IRQ
2558  *
2559  * This function stops interrupt(s)
2560  *
2561  * @scn: struct hif_softc
2562  *
2563  * Return: none
2564  */
2565 void hif_pci_nointrs(struct hif_softc *scn)
2566 {
2567 	int i, ret;
2568 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2569 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2570 
2571 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2572 
2573 	if (scn->request_irq_done == false)
2574 		return;
2575 
2576 	hif_pci_deconfigure_grp_irq(scn);
2577 
2578 	ret = hif_ce_srng_msi_free_irq(scn);
2579 	if (ret != -EINVAL) {
2580 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2581 
2582 		if (scn->wake_irq)
2583 			free_irq(scn->wake_irq, scn);
2584 		scn->wake_irq = 0;
2585 	} else if (sc->num_msi_intrs > 0) {
2586 		/* MSI interrupt(s) */
2587 		for (i = 0; i < sc->num_msi_intrs; i++)
2588 			free_irq(sc->irq + i, sc);
2589 		sc->num_msi_intrs = 0;
2590 	} else {
2591 		/* Legacy PCI line interrupt
2592 		 * Use sc->irq instead of sc->pdev-irq
2593 		 * platform_device pdev doesn't have an irq field
2594 		 */
2595 		free_irq(sc->irq, sc);
2596 	}
2597 	scn->request_irq_done = false;
2598 }
2599 
2600 /**
2601  * hif_disable_bus(): hif_disable_bus
2602  *
2603  * This function disables the bus
2604  *
2605  * @bdev: bus dev
2606  *
2607  * Return: none
2608  */
2609 void hif_pci_disable_bus(struct hif_softc *scn)
2610 {
2611 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2612 	struct pci_dev *pdev;
2613 	void __iomem *mem;
2614 	struct hif_target_info *tgt_info = &scn->target_info;
2615 
2616 	/* Attach did not succeed, all resources have been
2617 	 * freed in error handler
2618 	 */
2619 	if (!sc)
2620 		return;
2621 
2622 	pdev = sc->pdev;
2623 	if (ADRASTEA_BU) {
2624 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2625 
2626 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2627 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2628 			       HOST_GROUP0_MASK);
2629 	}
2630 
2631 #if defined(CPU_WARM_RESET_WAR)
2632 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2633 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2634 	 * verified for AR9888_REV1
2635 	 */
2636 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2637 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2638 		hif_pci_device_warm_reset(sc);
2639 	else
2640 		hif_pci_device_reset(sc);
2641 #else
2642 	hif_pci_device_reset(sc);
2643 #endif
2644 	mem = (void __iomem *)sc->mem;
2645 	if (mem) {
2646 		hif_dump_pipe_debug_count(scn);
2647 		if (scn->athdiag_procfs_inited) {
2648 			athdiag_procfs_remove();
2649 			scn->athdiag_procfs_inited = false;
2650 		}
2651 		sc->hif_pci_deinit(sc);
2652 		scn->mem = NULL;
2653 	}
2654 	HIF_INFO("%s: X", __func__);
2655 }
2656 
2657 #define OL_ATH_PCI_PM_CONTROL 0x44
2658 
2659 #ifdef FEATURE_RUNTIME_PM
2660 /**
2661  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2662  * @scn: hif context
2663  * @flag: prevent linkdown if true otherwise allow
2664  *
2665  * this api should only be called as part of bus prevent linkdown
2666  */
2667 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2668 {
2669 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2670 
2671 	if (flag)
2672 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2673 	else
2674 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2675 }
2676 #else
2677 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2678 {
2679 }
2680 #endif
2681 
2682 #if defined(CONFIG_PCI_MSM)
2683 /**
2684  * hif_bus_prevent_linkdown(): allow or permit linkdown
2685  * @flag: true prevents linkdown, false allows
2686  *
2687  * Calls into the platform driver to vote against taking down the
2688  * pcie link.
2689  *
2690  * Return: n/a
2691  */
2692 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2693 {
2694 	int errno;
2695 
2696 	HIF_DBG("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2697 	hif_runtime_prevent_linkdown(scn, flag);
2698 
2699 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2700 	if (errno)
2701 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2702 			  __func__, errno);
2703 }
2704 #else
2705 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2706 {
2707 	HIF_DBG("wlan: %s pcie power collapse",
2708 			(flag ? "disable" : "enable"));
2709 	hif_runtime_prevent_linkdown(scn, flag);
2710 }
2711 #endif
2712 
2713 static int hif_mark_wake_irq_wakeable(struct hif_softc *scn)
2714 {
2715 	int errno;
2716 
2717 	errno = enable_irq_wake(scn->wake_irq);
2718 	if (errno) {
2719 		HIF_ERROR("%s: Failed to mark wake IRQ: %d", __func__, errno);
2720 		return errno;
2721 	}
2722 
2723 	return 0;
2724 }
2725 
2726 /**
2727  * hif_pci_bus_suspend(): prepare hif for suspend
2728  *
2729  * Enables pci bus wake irq based on link suspend voting.
2730  *
2731  * Return: 0 for success and non-zero error code for failure
2732  */
2733 int hif_pci_bus_suspend(struct hif_softc *scn)
2734 {
2735 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2736 		return 0;
2737 
2738 	/* pci link is staying up; enable wake irq */
2739 	return hif_mark_wake_irq_wakeable(scn);
2740 }
2741 
2742 /**
2743  * __hif_check_link_status() - API to check if PCIe link is active/not
2744  * @scn: HIF Context
2745  *
2746  * API reads the PCIe config space to verify if PCIe link training is
2747  * successful or not.
2748  *
2749  * Return: Success/Failure
2750  */
2751 static int __hif_check_link_status(struct hif_softc *scn)
2752 {
2753 	uint16_t dev_id = 0;
2754 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2755 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2756 
2757 	if (!sc) {
2758 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2759 		return -EINVAL;
2760 	}
2761 
2762 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2763 
2764 	if (dev_id == sc->devid)
2765 		return 0;
2766 
2767 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2768 	       __func__, dev_id);
2769 
2770 	scn->recovery = true;
2771 
2772 	if (cbk && cbk->set_recovery_in_progress)
2773 		cbk->set_recovery_in_progress(cbk->context, true);
2774 	else
2775 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2776 
2777 	pld_is_pci_link_down(sc->dev);
2778 	return -EACCES;
2779 }
2780 
2781 static int hif_unmark_wake_irq_wakeable(struct hif_softc *scn)
2782 {
2783 	int errno;
2784 
2785 	errno = disable_irq_wake(scn->wake_irq);
2786 	if (errno) {
2787 		HIF_ERROR("%s: Failed to unmark wake IRQ: %d", __func__, errno);
2788 		return errno;
2789 	}
2790 
2791 	return 0;
2792 }
2793 
2794 /**
2795  * hif_pci_bus_resume(): prepare hif for resume
2796  *
2797  * Disables pci bus wake irq based on link suspend voting.
2798  *
2799  * Return: 0 for success and non-zero error code for failure
2800  */
2801 int hif_pci_bus_resume(struct hif_softc *scn)
2802 {
2803 	int ret;
2804 
2805 	ret = __hif_check_link_status(scn);
2806 	if (ret)
2807 		return ret;
2808 
2809 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2810 		return 0;
2811 
2812 	/* pci link is up; disable wake irq */
2813 	return hif_unmark_wake_irq_wakeable(scn);
2814 }
2815 
2816 /**
2817  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2818  * @scn: hif context
2819  *
2820  * Ensure that if we received the wakeup message before the irq
2821  * was disabled that the message is pocessed before suspending.
2822  *
2823  * Return: -EBUSY if we fail to flush the tasklets.
2824  */
2825 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2826 {
2827 	if (hif_drain_tasklets(scn) != 0)
2828 		return -EBUSY;
2829 
2830 	/* Stop the HIF Sleep Timer */
2831 	hif_cancel_deferred_target_sleep(scn);
2832 
2833 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2834 		qdf_atomic_set(&scn->link_suspended, 1);
2835 
2836 	return 0;
2837 }
2838 
2839 /**
2840  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2841  * @scn: hif context
2842  *
2843  * Ensure that if we received the wakeup message before the irq
2844  * was disabled that the message is pocessed before suspending.
2845  *
2846  * Return: -EBUSY if we fail to flush the tasklets.
2847  */
2848 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2849 {
2850 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2851 		qdf_atomic_set(&scn->link_suspended, 0);
2852 
2853 	return 0;
2854 }
2855 
2856 #ifdef FEATURE_RUNTIME_PM
2857 /**
2858  * __hif_runtime_pm_set_state(): utility function
2859  * @state: state to set
2860  *
2861  * indexes into the runtime pm state and sets it.
2862  */
2863 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2864 				enum hif_pm_runtime_state state)
2865 {
2866 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2867 
2868 	if (NULL == sc) {
2869 		HIF_ERROR("%s: HIF_CTX not initialized",
2870 		       __func__);
2871 		return;
2872 	}
2873 
2874 	qdf_atomic_set(&sc->pm_state, state);
2875 }
2876 
2877 /**
2878  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2879  *
2880  * Notify hif that a runtime pm opperation has started
2881  */
2882 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2883 {
2884 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2885 }
2886 
2887 /**
2888  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2889  *
2890  * Notify hif that a the runtime pm state should be on
2891  */
2892 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2893 {
2894 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2895 }
2896 
2897 /**
2898  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2899  *
2900  * Notify hif that a runtime suspend attempt has been completed successfully
2901  */
2902 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2903 {
2904 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2905 }
2906 
2907 /**
2908  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2909  */
2910 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2911 {
2912 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2913 
2914 	if (sc == NULL)
2915 		return;
2916 
2917 	sc->pm_stats.suspended++;
2918 	sc->pm_stats.suspend_jiffies = jiffies;
2919 }
2920 
2921 /**
2922  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2923  *
2924  * log a failed runtime suspend
2925  * mark last busy to prevent immediate runtime suspend
2926  */
2927 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2928 {
2929 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2930 
2931 	if (sc == NULL)
2932 		return;
2933 
2934 	sc->pm_stats.suspend_err++;
2935 }
2936 
2937 /**
2938  * hif_log_runtime_resume_success() - log a successful runtime resume
2939  *
2940  * log a successful runtime resume
2941  * mark last busy to prevent immediate runtime suspend
2942  */
2943 static void hif_log_runtime_resume_success(void *hif_ctx)
2944 {
2945 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2946 
2947 	if (sc == NULL)
2948 		return;
2949 
2950 	sc->pm_stats.resumed++;
2951 }
2952 
2953 /**
2954  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2955  *
2956  * Record the failure.
2957  * mark last busy to delay a retry.
2958  * adjust the runtime_pm state.
2959  */
2960 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2961 {
2962 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2963 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2964 
2965 	hif_log_runtime_suspend_failure(hif_ctx);
2966 	if (hif_pci_sc != NULL)
2967 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2968 	hif_runtime_pm_set_state_on(scn);
2969 }
2970 
2971 /**
2972  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2973  *
2974  * Makes sure that the pci link will be taken down by the suspend opperation.
2975  * If the hif layer is configured to leave the bus on, runtime suspend will
2976  * not save any power.
2977  *
2978  * Set the runtime suspend state to in progress.
2979  *
2980  * return -EINVAL if the bus won't go down.  otherwise return 0
2981  */
2982 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2983 {
2984 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2985 
2986 	if (!hif_can_suspend_link(hif_ctx)) {
2987 		HIF_ERROR("Runtime PM not supported for link up suspend");
2988 		return -EINVAL;
2989 	}
2990 
2991 	hif_runtime_pm_set_state_inprogress(scn);
2992 	return 0;
2993 }
2994 
2995 /**
2996  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2997  *
2998  * Record the success.
2999  * adjust the runtime_pm state
3000  */
3001 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
3002 {
3003 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3004 
3005 	hif_runtime_pm_set_state_suspended(scn);
3006 	hif_log_runtime_suspend_success(scn);
3007 }
3008 
3009 /**
3010  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
3011  *
3012  * update the runtime pm state.
3013  */
3014 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
3015 {
3016 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3017 
3018 	hif_runtime_pm_set_state_inprogress(scn);
3019 }
3020 
3021 /**
3022  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
3023  *
3024  * record the success.
3025  * adjust the runtime_pm state
3026  */
3027 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
3028 {
3029 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
3030 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3031 
3032 	hif_log_runtime_resume_success(hif_ctx);
3033 	if (hif_pci_sc != NULL)
3034 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
3035 	hif_runtime_pm_set_state_on(scn);
3036 }
3037 
3038 /**
3039  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
3040  *
3041  * Return: 0 for success and non-zero error code for failure
3042  */
3043 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
3044 {
3045 	int errno;
3046 
3047 	errno = hif_bus_suspend(hif_ctx);
3048 	if (errno) {
3049 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
3050 		return errno;
3051 	}
3052 
3053 	errno = hif_apps_irqs_disable(hif_ctx);
3054 	if (errno) {
3055 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
3056 		goto bus_resume;
3057 	}
3058 
3059 	errno = hif_bus_suspend_noirq(hif_ctx);
3060 	if (errno) {
3061 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
3062 		goto irqs_enable;
3063 	}
3064 
3065 	/* link should always be down; skip enable wake irq */
3066 
3067 	return 0;
3068 
3069 irqs_enable:
3070 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3071 
3072 bus_resume:
3073 	QDF_BUG(!hif_bus_resume(hif_ctx));
3074 
3075 	return errno;
3076 }
3077 
3078 /**
3079  * hif_fastpath_resume() - resume fastpath for runtimepm
3080  *
3081  * ensure that the fastpath write index register is up to date
3082  * since runtime pm may cause ce_send_fast to skip the register
3083  * write.
3084  *
3085  * fastpath only applicable to legacy copy engine
3086  */
3087 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
3088 {
3089 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3090 	struct CE_state *ce_state;
3091 
3092 	if (!scn)
3093 		return;
3094 
3095 	if (scn->fastpath_mode_on) {
3096 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3097 			return;
3098 
3099 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
3100 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
3101 
3102 		/*war_ce_src_ring_write_idx_set */
3103 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
3104 				ce_state->src_ring->write_index);
3105 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
3106 		Q_TARGET_ACCESS_END(scn);
3107 	}
3108 }
3109 
3110 /**
3111  * hif_runtime_resume() - do the bus resume part of a runtime resume
3112  *
3113  *  Return: 0 for success and non-zero error code for failure
3114  */
3115 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
3116 {
3117 	/* link should always be down; skip disable wake irq */
3118 
3119 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
3120 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3121 	QDF_BUG(!hif_bus_resume(hif_ctx));
3122 	return 0;
3123 }
3124 #endif /* #ifdef FEATURE_RUNTIME_PM */
3125 
3126 #if CONFIG_PCIE_64BIT_MSI
3127 static void hif_free_msi_ctx(struct hif_softc *scn)
3128 {
3129 	struct hif_pci_softc *sc = scn->hif_sc;
3130 	struct hif_msi_info *info = &sc->msi_info;
3131 	struct device *dev = scn->qdf_dev->dev;
3132 
3133 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
3134 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
3135 	info->magic = NULL;
3136 	info->magic_dma = 0;
3137 }
3138 #else
3139 static void hif_free_msi_ctx(struct hif_softc *scn)
3140 {
3141 }
3142 #endif
3143 
3144 void hif_pci_disable_isr(struct hif_softc *scn)
3145 {
3146 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3147 
3148 	hif_exec_kill(&scn->osc);
3149 	hif_nointrs(scn);
3150 	hif_free_msi_ctx(scn);
3151 	/* Cancel the pending tasklet */
3152 	ce_tasklet_kill(scn);
3153 	tasklet_kill(&sc->intr_tq);
3154 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
3155 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
3156 }
3157 
3158 /* Function to reset SoC */
3159 void hif_pci_reset_soc(struct hif_softc *hif_sc)
3160 {
3161 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
3162 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
3163 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
3164 
3165 #if defined(CPU_WARM_RESET_WAR)
3166 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
3167 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
3168 	 * verified for AR9888_REV1
3169 	 */
3170 	if (tgt_info->target_version == AR9888_REV2_VERSION)
3171 		hif_pci_device_warm_reset(sc);
3172 	else
3173 		hif_pci_device_reset(sc);
3174 #else
3175 	hif_pci_device_reset(sc);
3176 #endif
3177 }
3178 
3179 #ifdef CONFIG_PCI_MSM
3180 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
3181 {
3182 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
3183 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
3184 }
3185 #else
3186 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3187 #endif
3188 
3189 /**
3190  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3191  * @sc: HIF PCIe Context
3192  *
3193  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3194  *
3195  * Return: Failure to caller
3196  */
3197 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3198 {
3199 	uint16_t val = 0;
3200 	uint32_t bar = 0;
3201 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3202 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3203 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3204 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
3205 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
3206 	A_target_id_t pci_addr = scn->mem;
3207 
3208 	HIF_ERROR("%s: keep_awake_count = %d",
3209 			__func__, hif_state->keep_awake_count);
3210 
3211 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3212 
3213 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3214 
3215 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3216 
3217 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3218 
3219 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3220 
3221 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3222 
3223 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3224 
3225 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3226 
3227 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3228 
3229 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3230 
3231 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3232 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3233 						PCIE_SOC_WAKE_ADDRESS));
3234 
3235 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3236 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3237 							RTC_STATE_ADDRESS));
3238 
3239 	HIF_ERROR("%s:error, wakeup target", __func__);
3240 	hif_msm_pcie_debug_info(sc);
3241 
3242 	if (!cfg->enable_self_recovery)
3243 		QDF_BUG(0);
3244 
3245 	scn->recovery = true;
3246 
3247 	if (cbk->set_recovery_in_progress)
3248 		cbk->set_recovery_in_progress(cbk->context, true);
3249 
3250 	pld_is_pci_link_down(sc->dev);
3251 	return -EACCES;
3252 }
3253 
3254 /*
3255  * For now, we use simple on-demand sleep/wake.
3256  * Some possible improvements:
3257  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3258  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3259  *   Careful, though, these functions may be used by
3260  *  interrupt handlers ("atomic")
3261  *  -Don't use host_reg_table for this code; instead use values directly
3262  *  -Use a separate timer to track activity and allow Target to sleep only
3263  *   if it hasn't done anything for a while; may even want to delay some
3264  *   processing for a short while in order to "batch" (e.g.) transmit
3265  *   requests with completion processing into "windows of up time".  Costs
3266  *   some performance, but improves power utilization.
3267  *  -On some platforms, it might be possible to eliminate explicit
3268  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3269  *   recover from the failure by forcing the Target awake.
3270  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3271  *   overhead in some cases. Perhaps this makes more sense when
3272  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3273  *   disabled.
3274  *  -It is possible to compile this code out and simply force the Target
3275  *   to remain awake.  That would yield optimal performance at the cost of
3276  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3277  *
3278  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3279  */
3280 /**
3281  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3282  * @scn: hif_softc pointer.
3283  * @sleep_ok: bool
3284  * @wait_for_it: bool
3285  *
3286  * Output the pipe error counts of each pipe to log file
3287  *
3288  * Return: int
3289  */
3290 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3291 			      bool sleep_ok, bool wait_for_it)
3292 {
3293 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3294 	A_target_id_t pci_addr = scn->mem;
3295 	static int max_delay;
3296 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3297 	static int debug;
3298 	if (scn->recovery)
3299 		return -EACCES;
3300 
3301 	if (qdf_atomic_read(&scn->link_suspended)) {
3302 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3303 		debug = true;
3304 		QDF_ASSERT(0);
3305 		return -EACCES;
3306 	}
3307 
3308 	if (debug) {
3309 		wait_for_it = true;
3310 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3311 				__func__);
3312 		QDF_ASSERT(0);
3313 	}
3314 
3315 	if (sleep_ok) {
3316 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3317 		hif_state->keep_awake_count--;
3318 		if (hif_state->keep_awake_count == 0) {
3319 			/* Allow sleep */
3320 			hif_state->verified_awake = false;
3321 			hif_state->sleep_ticks = qdf_system_ticks();
3322 		}
3323 		if (hif_state->fake_sleep == false) {
3324 			/* Set the Fake Sleep */
3325 			hif_state->fake_sleep = true;
3326 
3327 			/* Start the Sleep Timer */
3328 			qdf_timer_stop(&hif_state->sleep_timer);
3329 			qdf_timer_start(&hif_state->sleep_timer,
3330 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3331 		}
3332 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3333 	} else {
3334 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3335 
3336 		if (hif_state->fake_sleep) {
3337 			hif_state->verified_awake = true;
3338 		} else {
3339 			if (hif_state->keep_awake_count == 0) {
3340 				/* Force AWAKE */
3341 				hif_write32_mb(sc, pci_addr +
3342 					      PCIE_LOCAL_BASE_ADDRESS +
3343 					      PCIE_SOC_WAKE_ADDRESS,
3344 					      PCIE_SOC_WAKE_V_MASK);
3345 			}
3346 		}
3347 		hif_state->keep_awake_count++;
3348 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3349 
3350 		if (wait_for_it && !hif_state->verified_awake) {
3351 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3352 			int tot_delay = 0;
3353 			int curr_delay = 5;
3354 
3355 			for (;; ) {
3356 				if (hif_targ_is_awake(scn, pci_addr)) {
3357 					hif_state->verified_awake = true;
3358 					break;
3359 				}
3360 				if (!hif_pci_targ_is_present(scn, pci_addr))
3361 					break;
3362 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3363 					return hif_log_soc_wakeup_timeout(sc);
3364 
3365 				OS_DELAY(curr_delay);
3366 				tot_delay += curr_delay;
3367 
3368 				if (curr_delay < 50)
3369 					curr_delay += 5;
3370 			}
3371 
3372 			/*
3373 			 * NB: If Target has to come out of Deep Sleep,
3374 			 * this may take a few Msecs. Typically, though
3375 			 * this delay should be <30us.
3376 			 */
3377 			if (tot_delay > max_delay)
3378 				max_delay = tot_delay;
3379 		}
3380 	}
3381 
3382 	if (debug && hif_state->verified_awake) {
3383 		debug = 0;
3384 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3385 			__func__,
3386 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3387 				PCIE_INTR_ENABLE_ADDRESS),
3388 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3389 				PCIE_INTR_CAUSE_ADDRESS),
3390 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3391 				CPU_INTR_ADDRESS),
3392 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3393 				PCIE_INTR_CLR_ADDRESS),
3394 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3395 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3396 	}
3397 
3398 	return 0;
3399 }
3400 
3401 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3402 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3403 {
3404 	uint32_t value;
3405 	void *addr;
3406 
3407 	addr = scn->mem + offset;
3408 	value = hif_read32_mb(scn, addr);
3409 
3410 	{
3411 		unsigned long irq_flags;
3412 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3413 
3414 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3415 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3416 		pcie_access_log[idx].is_write = false;
3417 		pcie_access_log[idx].addr = addr;
3418 		pcie_access_log[idx].value = value;
3419 		pcie_access_log_seqnum++;
3420 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3421 	}
3422 
3423 	return value;
3424 }
3425 
3426 void
3427 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3428 {
3429 	void *addr;
3430 
3431 	addr = scn->mem + (offset);
3432 	hif_write32_mb(scn, addr, value);
3433 
3434 	{
3435 		unsigned long irq_flags;
3436 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3437 
3438 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3439 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3440 		pcie_access_log[idx].is_write = true;
3441 		pcie_access_log[idx].addr = addr;
3442 		pcie_access_log[idx].value = value;
3443 		pcie_access_log_seqnum++;
3444 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3445 	}
3446 }
3447 
3448 /**
3449  * hif_target_dump_access_log() - dump access log
3450  *
3451  * dump access log
3452  *
3453  * Return: n/a
3454  */
3455 void hif_target_dump_access_log(void)
3456 {
3457 	int idx, len, start_idx, cur_idx;
3458 	unsigned long irq_flags;
3459 
3460 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3461 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3462 		len = PCIE_ACCESS_LOG_NUM;
3463 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3464 	} else {
3465 		len = pcie_access_log_seqnum;
3466 		start_idx = 0;
3467 	}
3468 
3469 	for (idx = 0; idx < len; idx++) {
3470 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3471 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3472 		       __func__, idx,
3473 		       pcie_access_log[cur_idx].seqnum,
3474 		       pcie_access_log[cur_idx].is_write,
3475 		       pcie_access_log[cur_idx].addr,
3476 		       pcie_access_log[cur_idx].value);
3477 	}
3478 
3479 	pcie_access_log_seqnum = 0;
3480 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3481 }
3482 #endif
3483 
3484 #ifndef HIF_AHB
3485 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3486 {
3487 	QDF_BUG(0);
3488 	return -EINVAL;
3489 }
3490 
3491 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3492 {
3493 	QDF_BUG(0);
3494 	return -EINVAL;
3495 }
3496 #endif
3497 
3498 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3499 {
3500 	struct ce_tasklet_entry *tasklet_entry = context;
3501 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3502 }
3503 extern const char *ce_name[];
3504 
3505 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3506 {
3507 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3508 
3509 	return pci_scn->ce_msi_irq_num[ce_id];
3510 }
3511 
3512 /* hif_srng_msi_irq_disable() - disable the irq for msi
3513  * @hif_sc: hif context
3514  * @ce_id: which ce to disable copy complete interrupts for
3515  *
3516  * since MSI interrupts are not level based, the system can function
3517  * without disabling these interrupts.  Interrupt mitigation can be
3518  * added here for better system performance.
3519  */
3520 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3521 {
3522 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3523 }
3524 
3525 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3526 {
3527 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3528 }
3529 
3530 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3531 {}
3532 
3533 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3534 {}
3535 
3536 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3537 {
3538 	int ret;
3539 	int ce_id, irq;
3540 	uint32_t msi_data_start;
3541 	uint32_t msi_data_count;
3542 	uint32_t msi_irq_start;
3543 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3544 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3545 
3546 	/* do wake irq assignment */
3547 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3548 					  &msi_data_count, &msi_data_start,
3549 					  &msi_irq_start);
3550 	if (ret)
3551 		return ret;
3552 
3553 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3554 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3555 			  "wlan_wake_irq", scn);
3556 	if (ret)
3557 		return ret;
3558 
3559 	/* do ce irq assignments */
3560 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3561 					    &msi_data_count, &msi_data_start,
3562 					    &msi_irq_start);
3563 	if (ret)
3564 		goto free_wake_irq;
3565 
3566 	if (ce_srng_based(scn)) {
3567 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3568 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3569 	} else {
3570 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3571 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3572 	}
3573 
3574 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3575 
3576 	/* needs to match the ce_id -> irq data mapping
3577 	 * used in the srng parameter configuration
3578 	 */
3579 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3580 		unsigned int msi_data = (ce_id % msi_data_count) +
3581 			msi_irq_start;
3582 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3583 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3584 			 __func__, ce_id, msi_data, irq,
3585 			 &ce_sc->tasklets[ce_id]);
3586 
3587 		/* implies the ce is also initialized */
3588 		if (!ce_sc->tasklets[ce_id].inited)
3589 			continue;
3590 
3591 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3592 		ret = request_irq(irq, hif_ce_interrupt_handler,
3593 				  IRQF_SHARED,
3594 				  ce_name[ce_id],
3595 				  &ce_sc->tasklets[ce_id]);
3596 		if (ret)
3597 			goto free_irq;
3598 	}
3599 
3600 	return ret;
3601 
3602 free_irq:
3603 	/* the request_irq for the last ce_id failed so skip it. */
3604 	while (ce_id > 0 && ce_id < scn->ce_count) {
3605 		unsigned int msi_data;
3606 
3607 		ce_id--;
3608 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3609 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3610 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3611 	}
3612 
3613 free_wake_irq:
3614 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3615 	scn->wake_irq = 0;
3616 
3617 	return ret;
3618 }
3619 
3620 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3621 {
3622 	int i;
3623 
3624 	for (i = 0; i < hif_ext_group->numirq; i++)
3625 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3626 }
3627 
3628 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3629 {
3630 	int i;
3631 
3632 	for (i = 0; i < hif_ext_group->numirq; i++)
3633 		enable_irq(hif_ext_group->os_irq[i]);
3634 }
3635 
3636 
3637 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3638 			      struct hif_exec_context *hif_ext_group)
3639 {
3640 	int ret = 0;
3641 	int irq = 0;
3642 	int j;
3643 
3644 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3645 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3646 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3647 
3648 	for (j = 0; j < hif_ext_group->numirq; j++) {
3649 		irq = hif_ext_group->irq[j];
3650 
3651 		HIF_DBG("%s: request_irq = %d for grp %d",
3652 			  __func__, irq, hif_ext_group->grp_id);
3653 		ret = request_irq(irq,
3654 				  hif_ext_group_interrupt_handler,
3655 				  IRQF_SHARED, "wlan_EXT_GRP",
3656 				  hif_ext_group);
3657 		if (ret) {
3658 			HIF_ERROR("%s: request_irq failed ret = %d",
3659 				  __func__, ret);
3660 			return -EFAULT;
3661 		}
3662 		hif_ext_group->os_irq[j] = irq;
3663 	}
3664 	hif_ext_group->irq_requested = true;
3665 	return 0;
3666 }
3667 
3668 /**
3669  * hif_configure_irq() - configure interrupt
3670  *
3671  * This function configures interrupt(s)
3672  *
3673  * @sc: PCIe control struct
3674  * @hif_hdl: struct HIF_CE_state
3675  *
3676  * Return: 0 - for success
3677  */
3678 int hif_configure_irq(struct hif_softc *scn)
3679 {
3680 	int ret = 0;
3681 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3682 
3683 	HIF_TRACE("%s: E", __func__);
3684 
3685 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3686 		scn->request_irq_done = false;
3687 		return 0;
3688 	}
3689 
3690 	hif_init_reschedule_tasklet_work(sc);
3691 
3692 	ret = hif_ce_msi_configure_irq(scn);
3693 	if (ret == 0) {
3694 		goto end;
3695 	}
3696 
3697 	if (ENABLE_MSI) {
3698 		ret = hif_configure_msi(sc);
3699 		if (ret == 0)
3700 			goto end;
3701 	}
3702 	/* MSI failed. Try legacy irq */
3703 	switch (scn->target_info.target_type) {
3704 	case TARGET_TYPE_IPQ4019:
3705 		ret = hif_ahb_configure_legacy_irq(sc);
3706 		break;
3707 	case TARGET_TYPE_QCA8074:
3708 	case TARGET_TYPE_QCA8074V2:
3709 		ret = hif_ahb_configure_irq(sc);
3710 		break;
3711 	default:
3712 		ret = hif_pci_configure_legacy_irq(sc);
3713 		break;
3714 	}
3715 	if (ret < 0) {
3716 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3717 			__func__, ret);
3718 		return ret;
3719 	}
3720 end:
3721 	scn->request_irq_done = true;
3722 	return 0;
3723 }
3724 
3725 /**
3726  * hif_target_sync() : ensure the target is ready
3727  * @scn: hif control structure
3728  *
3729  * Informs fw that we plan to use legacy interupts so that
3730  * it can begin booting. Ensures that the fw finishes booting
3731  * before continuing. Should be called before trying to write
3732  * to the targets other registers for the first time.
3733  *
3734  * Return: none
3735  */
3736 static void hif_target_sync(struct hif_softc *scn)
3737 {
3738 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3739 			    PCIE_INTR_ENABLE_ADDRESS),
3740 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3741 	/* read to flush pcie write */
3742 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3743 			PCIE_INTR_ENABLE_ADDRESS));
3744 
3745 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3746 			PCIE_SOC_WAKE_ADDRESS,
3747 			PCIE_SOC_WAKE_V_MASK);
3748 	while (!hif_targ_is_awake(scn, scn->mem))
3749 		;
3750 
3751 	if (HAS_FW_INDICATOR) {
3752 		int wait_limit = 500;
3753 		int fw_ind = 0;
3754 
3755 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3756 		while (1) {
3757 			fw_ind = hif_read32_mb(scn, scn->mem +
3758 					FW_INDICATOR_ADDRESS);
3759 			if (fw_ind & FW_IND_INITIALIZED)
3760 				break;
3761 			if (wait_limit-- < 0)
3762 				break;
3763 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3764 			    PCIE_INTR_ENABLE_ADDRESS),
3765 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3766 			    /* read to flush pcie write */
3767 			(void)hif_read32_mb(scn, scn->mem +
3768 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3769 
3770 			qdf_mdelay(10);
3771 		}
3772 		if (wait_limit < 0)
3773 			HIF_TRACE("%s: FW signal timed out",
3774 					__func__);
3775 		else
3776 			HIF_TRACE("%s: Got FW signal, retries = %x",
3777 					__func__, 500-wait_limit);
3778 	}
3779 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3780 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3781 }
3782 
3783 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3784 				     struct device *dev)
3785 {
3786 	struct pld_soc_info info;
3787 
3788 	pld_get_soc_info(dev, &info);
3789 	sc->mem = info.v_addr;
3790 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3791 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3792 }
3793 
3794 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3795 				       struct device *dev)
3796 {}
3797 
3798 static bool hif_is_pld_based_target(int device_id)
3799 {
3800 	switch (device_id) {
3801 	case QCA6290_DEVICE_ID:
3802 	case QCA6290_EMULATION_DEVICE_ID:
3803 #ifdef QCA_WIFI_QCA6390
3804 	case QCA6390_DEVICE_ID:
3805 #endif
3806 	case AR6320_DEVICE_ID:
3807 		return true;
3808 	}
3809 	return false;
3810 }
3811 
3812 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3813 					   int device_id)
3814 {
3815 	if (hif_is_pld_based_target(device_id)) {
3816 		sc->hif_enable_pci = hif_enable_pci_pld;
3817 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3818 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3819 	} else {
3820 		sc->hif_enable_pci = hif_enable_pci_nopld;
3821 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3822 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3823 	}
3824 }
3825 
3826 #ifdef HIF_REG_WINDOW_SUPPORT
3827 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3828 					       u32 target_type)
3829 {
3830 	switch (target_type) {
3831 	case TARGET_TYPE_QCN7605:
3832 		sc->use_register_windowing = true;
3833 		qdf_spinlock_create(&sc->register_access_lock);
3834 		sc->register_window = 0;
3835 		break;
3836 	default:
3837 		sc->use_register_windowing = false;
3838 	}
3839 }
3840 #else
3841 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3842 					       u32 target_type)
3843 {
3844 	sc->use_register_windowing = false;
3845 }
3846 #endif
3847 
3848 /**
3849  * hif_enable_bus(): enable bus
3850  *
3851  * This function enables the bus
3852  *
3853  * @ol_sc: soft_sc struct
3854  * @dev: device pointer
3855  * @bdev: bus dev pointer
3856  * bid: bus id pointer
3857  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3858  * Return: QDF_STATUS
3859  */
3860 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3861 			  struct device *dev, void *bdev,
3862 			  const struct hif_bus_id *bid,
3863 			  enum hif_enable_type type)
3864 {
3865 	int ret = 0;
3866 	uint32_t hif_type, target_type;
3867 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3868 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3869 	uint16_t revision_id;
3870 	int probe_again = 0;
3871 	struct pci_dev *pdev = bdev;
3872 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3873 	struct hif_target_info *tgt_info;
3874 
3875 	if (!ol_sc) {
3876 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3877 		return QDF_STATUS_E_NOMEM;
3878 	}
3879 
3880 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3881 		  __func__, hif_get_conparam(ol_sc), id->device);
3882 
3883 	sc->pdev = pdev;
3884 	sc->dev = &pdev->dev;
3885 	sc->devid = id->device;
3886 	sc->cacheline_sz = dma_get_cache_alignment();
3887 	tgt_info = hif_get_target_info_handle(hif_hdl);
3888 	hif_pci_init_deinit_ops_attach(sc, id->device);
3889 	sc->hif_pci_get_soc_info(sc, dev);
3890 again:
3891 	ret = sc->hif_enable_pci(sc, pdev, id);
3892 	if (ret < 0) {
3893 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3894 		       __func__, ret);
3895 		goto err_enable_pci;
3896 	}
3897 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3898 
3899 	/* Temporary FIX: disable ASPM on peregrine.
3900 	 * Will be removed after the OTP is programmed
3901 	 */
3902 	hif_disable_power_gating(hif_hdl);
3903 
3904 	device_disable_async_suspend(&pdev->dev);
3905 	pci_read_config_word(pdev, 0x08, &revision_id);
3906 
3907 	ret = hif_get_device_type(id->device, revision_id,
3908 						&hif_type, &target_type);
3909 	if (ret < 0) {
3910 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3911 		goto err_tgtstate;
3912 	}
3913 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3914 		  __func__, hif_type, target_type);
3915 
3916 	hif_register_tbl_attach(ol_sc, hif_type);
3917 	hif_target_register_tbl_attach(ol_sc, target_type);
3918 
3919 	hif_pci_init_reg_windowing_support(sc, target_type);
3920 
3921 	tgt_info->target_type = target_type;
3922 
3923 	if (ce_srng_based(ol_sc)) {
3924 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3925 	} else {
3926 		ret = hif_pci_probe_tgt_wakeup(sc);
3927 		if (ret < 0) {
3928 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3929 					__func__, ret);
3930 			if (ret == -EAGAIN)
3931 				probe_again++;
3932 			goto err_tgtstate;
3933 		}
3934 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3935 	}
3936 
3937 	if (!ol_sc->mem_pa) {
3938 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3939 		ret = -EIO;
3940 		goto err_tgtstate;
3941 	}
3942 
3943 	if (!ce_srng_based(ol_sc)) {
3944 		hif_target_sync(ol_sc);
3945 
3946 		if (ADRASTEA_BU)
3947 			hif_vote_link_up(hif_hdl);
3948 	}
3949 
3950 	return 0;
3951 
3952 err_tgtstate:
3953 	hif_disable_pci(sc);
3954 	sc->pci_enabled = false;
3955 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3956 	return QDF_STATUS_E_ABORTED;
3957 
3958 err_enable_pci:
3959 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3960 		int delay_time;
3961 
3962 		HIF_INFO("%s: pci reprobe", __func__);
3963 		/* 10, 40, 90, 100, 100, ... */
3964 		delay_time = max(100, 10 * (probe_again * probe_again));
3965 		qdf_mdelay(delay_time);
3966 		goto again;
3967 	}
3968 	return ret;
3969 }
3970 
3971 /**
3972  * hif_pci_irq_enable() - ce_irq_enable
3973  * @scn: hif_softc
3974  * @ce_id: ce_id
3975  *
3976  * Return: void
3977  */
3978 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3979 {
3980 	uint32_t tmp = 1 << ce_id;
3981 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3982 
3983 	qdf_spin_lock_irqsave(&sc->irq_lock);
3984 	scn->ce_irq_summary &= ~tmp;
3985 	if (scn->ce_irq_summary == 0) {
3986 		/* Enable Legacy PCI line interrupts */
3987 		if (LEGACY_INTERRUPTS(sc) &&
3988 			(scn->target_status != TARGET_STATUS_RESET) &&
3989 			(!qdf_atomic_read(&scn->link_suspended))) {
3990 
3991 			hif_write32_mb(scn, scn->mem +
3992 				(SOC_CORE_BASE_ADDRESS |
3993 				PCIE_INTR_ENABLE_ADDRESS),
3994 				HOST_GROUP0_MASK);
3995 
3996 			hif_read32_mb(scn, scn->mem +
3997 					(SOC_CORE_BASE_ADDRESS |
3998 					PCIE_INTR_ENABLE_ADDRESS));
3999 		}
4000 	}
4001 	if (scn->hif_init_done == true)
4002 		Q_TARGET_ACCESS_END(scn);
4003 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
4004 
4005 	/* check for missed firmware crash */
4006 	hif_fw_interrupt_handler(0, scn);
4007 }
4008 
4009 /**
4010  * hif_pci_irq_disable() - ce_irq_disable
4011  * @scn: hif_softc
4012  * @ce_id: ce_id
4013  *
4014  * only applicable to legacy copy engine...
4015  *
4016  * Return: void
4017  */
4018 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
4019 {
4020 	/* For Rome only need to wake up target */
4021 	/* target access is maintained until interrupts are re-enabled */
4022 	Q_TARGET_ACCESS_BEGIN(scn);
4023 }
4024 
4025 #ifdef FEATURE_RUNTIME_PM
4026 
4027 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
4028 {
4029 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4030 
4031 	if (NULL == sc)
4032 		return;
4033 
4034 	sc->pm_stats.runtime_get++;
4035 	pm_runtime_get_noresume(sc->dev);
4036 }
4037 
4038 /**
4039  * hif_pm_runtime_get() - do a get opperation on the device
4040  *
4041  * A get opperation will prevent a runtime suspend until a
4042  * corresponding put is done.  This api should be used when sending
4043  * data.
4044  *
4045  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
4046  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
4047  *
4048  * return: success if the bus is up and a get has been issued
4049  *   otherwise an error code.
4050  */
4051 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
4052 {
4053 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4054 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4055 	int ret;
4056 	int pm_state;
4057 
4058 	if (NULL == scn) {
4059 		HIF_ERROR("%s: Could not do runtime get, scn is null",
4060 				__func__);
4061 		return -EFAULT;
4062 	}
4063 
4064 	pm_state = qdf_atomic_read(&sc->pm_state);
4065 
4066 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
4067 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
4068 		sc->pm_stats.runtime_get++;
4069 		ret = __hif_pm_runtime_get(sc->dev);
4070 
4071 		/* Get can return 1 if the device is already active, just return
4072 		 * success in that case
4073 		 */
4074 		if (ret > 0)
4075 			ret = 0;
4076 
4077 		if (ret)
4078 			hif_pm_runtime_put(hif_ctx);
4079 
4080 		if (ret && ret != -EINPROGRESS) {
4081 			sc->pm_stats.runtime_get_err++;
4082 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
4083 				__func__, qdf_atomic_read(&sc->pm_state), ret);
4084 		}
4085 
4086 		return ret;
4087 	}
4088 
4089 	sc->pm_stats.request_resume++;
4090 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4091 	ret = hif_pm_request_resume(sc->dev);
4092 
4093 	return -EAGAIN;
4094 }
4095 
4096 /**
4097  * hif_pm_runtime_put() - do a put opperation on the device
4098  *
4099  * A put opperation will allow a runtime suspend after a corresponding
4100  * get was done.  This api should be used when sending data.
4101  *
4102  * This api will return a failure if runtime pm is stopped
4103  * This api will return failure if it would decrement the usage count below 0.
4104  *
4105  * return: QDF_STATUS_SUCCESS if the put is performed
4106  */
4107 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
4108 {
4109 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4110 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4111 	int pm_state, usage_count;
4112 	char *error = NULL;
4113 
4114 	if (NULL == scn) {
4115 		HIF_ERROR("%s: Could not do runtime put, scn is null",
4116 				__func__);
4117 		return -EFAULT;
4118 	}
4119 	usage_count = atomic_read(&sc->dev->power.usage_count);
4120 
4121 	if (usage_count == 1) {
4122 		pm_state = qdf_atomic_read(&sc->pm_state);
4123 
4124 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4125 			error = "Ignoring unexpected put when runtime pm is disabled";
4126 
4127 	} else if (usage_count == 0) {
4128 		error = "PUT Without a Get Operation";
4129 	}
4130 
4131 	if (error) {
4132 		hif_pci_runtime_pm_warn(sc, error);
4133 		return -EINVAL;
4134 	}
4135 
4136 	sc->pm_stats.runtime_put++;
4137 
4138 	hif_pm_runtime_mark_last_busy(sc->dev);
4139 	hif_pm_runtime_put_auto(sc->dev);
4140 
4141 	return 0;
4142 }
4143 
4144 
4145 /**
4146  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
4147  *                                      reason
4148  * @hif_sc: pci context
4149  * @lock: runtime_pm lock being acquired
4150  *
4151  * Return 0 if successful.
4152  */
4153 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4154 		*hif_sc, struct hif_pm_runtime_lock *lock)
4155 {
4156 	int ret = 0;
4157 
4158 	/*
4159 	 * We shouldn't be setting context->timeout to zero here when
4160 	 * context is active as we will have a case where Timeout API's
4161 	 * for the same context called back to back.
4162 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4163 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4164 	 * API to ensure the timeout version is no more active and
4165 	 * list entry of this context will be deleted during allow suspend.
4166 	 */
4167 	if (lock->active)
4168 		return 0;
4169 
4170 	ret = __hif_pm_runtime_get(hif_sc->dev);
4171 
4172 	/**
4173 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4174 	 * RPM_SUSPENDING. Any other negative value is an error.
4175 	 * We shouldn't be do runtime_put here as in later point allow
4176 	 * suspend gets called with the the context and there the usage count
4177 	 * is decremented, so suspend will be prevented.
4178 	 */
4179 
4180 	if (ret < 0 && ret != -EINPROGRESS) {
4181 		hif_sc->pm_stats.runtime_get_err++;
4182 		hif_pci_runtime_pm_warn(hif_sc,
4183 				"Prevent Suspend Runtime PM Error");
4184 	}
4185 
4186 	hif_sc->prevent_suspend_cnt++;
4187 
4188 	lock->active = true;
4189 
4190 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4191 
4192 	hif_sc->pm_stats.prevent_suspend++;
4193 
4194 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4195 		hif_pm_runtime_state_to_string(
4196 			qdf_atomic_read(&hif_sc->pm_state)),
4197 					ret);
4198 
4199 	return ret;
4200 }
4201 
4202 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4203 		struct hif_pm_runtime_lock *lock)
4204 {
4205 	int ret = 0;
4206 	int usage_count;
4207 
4208 	if (hif_sc->prevent_suspend_cnt == 0)
4209 		return ret;
4210 
4211 	if (!lock->active)
4212 		return ret;
4213 
4214 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4215 
4216 	/*
4217 	 * During Driver unload, platform driver increments the usage
4218 	 * count to prevent any runtime suspend getting called.
4219 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4220 	 * usage_count should be one. Ideally this shouldn't happen as
4221 	 * context->active should be active for allow suspend to happen
4222 	 * Handling this case here to prevent any failures.
4223 	 */
4224 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4225 				&& usage_count == 1) || usage_count == 0) {
4226 		hif_pci_runtime_pm_warn(hif_sc,
4227 				"Allow without a prevent suspend");
4228 		return -EINVAL;
4229 	}
4230 
4231 	list_del(&lock->list);
4232 
4233 	hif_sc->prevent_suspend_cnt--;
4234 
4235 	lock->active = false;
4236 	lock->timeout = 0;
4237 
4238 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4239 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4240 
4241 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4242 		hif_pm_runtime_state_to_string(
4243 			qdf_atomic_read(&hif_sc->pm_state)),
4244 					ret);
4245 
4246 	hif_sc->pm_stats.allow_suspend++;
4247 	return ret;
4248 }
4249 
4250 /**
4251  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4252  * @data: calback data that is the pci context
4253  *
4254  * if runtime locks are acquired with a timeout, this function releases
4255  * the locks when the last runtime lock expires.
4256  *
4257  * dummy implementation until lock acquisition is implemented.
4258  */
4259 static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
4260 {
4261 	struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
4262 	unsigned long timer_expires;
4263 	struct hif_pm_runtime_lock *context, *temp;
4264 
4265 	spin_lock_bh(&hif_sc->runtime_lock);
4266 
4267 	timer_expires = hif_sc->runtime_timer_expires;
4268 
4269 	/* Make sure we are not called too early, this should take care of
4270 	 * following case
4271 	 *
4272 	 * CPU0                         CPU1 (timeout function)
4273 	 * ----                         ----------------------
4274 	 * spin_lock_irq
4275 	 *                              timeout function called
4276 	 *
4277 	 * mod_timer()
4278 	 *
4279 	 * spin_unlock_irq
4280 	 *                              spin_lock_irq
4281 	 */
4282 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4283 		hif_sc->runtime_timer_expires = 0;
4284 		list_for_each_entry_safe(context, temp,
4285 				&hif_sc->prevent_suspend_list, list) {
4286 			if (context->timeout) {
4287 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4288 				hif_sc->pm_stats.allow_suspend_timeout++;
4289 			}
4290 		}
4291 	}
4292 
4293 	spin_unlock_bh(&hif_sc->runtime_lock);
4294 }
4295 
4296 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4297 		struct hif_pm_runtime_lock *data)
4298 {
4299 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4300 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4301 	struct hif_pm_runtime_lock *context = data;
4302 
4303 	if (!sc->hif_config.enable_runtime_pm)
4304 		return 0;
4305 
4306 	if (!context)
4307 		return -EINVAL;
4308 
4309 	if (in_irq())
4310 		WARN_ON(1);
4311 
4312 	spin_lock_bh(&hif_sc->runtime_lock);
4313 	context->timeout = 0;
4314 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4315 	spin_unlock_bh(&hif_sc->runtime_lock);
4316 
4317 	return 0;
4318 }
4319 
4320 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4321 				struct hif_pm_runtime_lock *data)
4322 {
4323 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4324 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4325 	struct hif_pm_runtime_lock *context = data;
4326 
4327 	if (!sc->hif_config.enable_runtime_pm)
4328 		return 0;
4329 
4330 	if (!context)
4331 		return -EINVAL;
4332 
4333 	if (in_irq())
4334 		WARN_ON(1);
4335 
4336 	spin_lock_bh(&hif_sc->runtime_lock);
4337 
4338 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4339 
4340 	/* The list can be empty as well in cases where
4341 	 * we have one context in the list and the allow
4342 	 * suspend came before the timer expires and we delete
4343 	 * context above from the list.
4344 	 * When list is empty prevent_suspend count will be zero.
4345 	 */
4346 	if (hif_sc->prevent_suspend_cnt == 0 &&
4347 			hif_sc->runtime_timer_expires > 0) {
4348 		del_timer(&hif_sc->runtime_timer);
4349 		hif_sc->runtime_timer_expires = 0;
4350 	}
4351 
4352 	spin_unlock_bh(&hif_sc->runtime_lock);
4353 
4354 	return 0;
4355 }
4356 
4357 /**
4358  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4359  * @ol_sc: HIF context
4360  * @lock: which lock is being acquired
4361  * @delay: Timeout in milliseconds
4362  *
4363  * Prevent runtime suspend with a timeout after which runtime suspend would be
4364  * allowed. This API uses a single timer to allow the suspend and timer is
4365  * modified if the timeout is changed before timer fires.
4366  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4367  * of starting the timer.
4368  *
4369  * It is wise to try not to use this API and correct the design if possible.
4370  *
4371  * Return: 0 on success and negative error code on failure
4372  */
4373 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4374 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4375 {
4376 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4377 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4378 
4379 	int ret = 0;
4380 	unsigned long expires;
4381 	struct hif_pm_runtime_lock *context = lock;
4382 
4383 	if (hif_is_load_or_unload_in_progress(sc)) {
4384 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4385 				__func__);
4386 		return -EINVAL;
4387 	}
4388 
4389 	if (hif_is_recovery_in_progress(sc)) {
4390 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4391 		return -EINVAL;
4392 	}
4393 
4394 	if (!sc->hif_config.enable_runtime_pm)
4395 		return 0;
4396 
4397 	if (!context)
4398 		return -EINVAL;
4399 
4400 	if (in_irq())
4401 		WARN_ON(1);
4402 
4403 	/*
4404 	 * Don't use internal timer if the timeout is less than auto suspend
4405 	 * delay.
4406 	 */
4407 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4408 		hif_pm_request_resume(hif_sc->dev);
4409 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4410 		return ret;
4411 	}
4412 
4413 	expires = jiffies + msecs_to_jiffies(delay);
4414 	expires += !expires;
4415 
4416 	spin_lock_bh(&hif_sc->runtime_lock);
4417 
4418 	context->timeout = delay;
4419 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4420 	hif_sc->pm_stats.prevent_suspend_timeout++;
4421 
4422 	/* Modify the timer only if new timeout is after already configured
4423 	 * timeout
4424 	 */
4425 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4426 		mod_timer(&hif_sc->runtime_timer, expires);
4427 		hif_sc->runtime_timer_expires = expires;
4428 	}
4429 
4430 	spin_unlock_bh(&hif_sc->runtime_lock);
4431 
4432 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4433 		hif_pm_runtime_state_to_string(
4434 			qdf_atomic_read(&hif_sc->pm_state)),
4435 					delay, ret);
4436 
4437 	return ret;
4438 }
4439 
4440 /**
4441  * hif_runtime_lock_init() - API to initialize Runtime PM context
4442  * @name: Context name
4443  *
4444  * This API initializes the Runtime PM context of the caller and
4445  * return the pointer.
4446  *
4447  * Return: None
4448  */
4449 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4450 {
4451 	struct hif_pm_runtime_lock *context;
4452 
4453 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4454 
4455 	context = qdf_mem_malloc(sizeof(*context));
4456 	if (!context) {
4457 		HIF_ERROR("%s: No memory for Runtime PM wakelock context",
4458 			  __func__);
4459 		return -ENOMEM;
4460 	}
4461 
4462 	context->name = name ? name : "Default";
4463 	lock->lock = context;
4464 
4465 	return 0;
4466 }
4467 
4468 /**
4469  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4470  * @data: Runtime PM context
4471  *
4472  * Return: void
4473  */
4474 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4475 			     struct hif_pm_runtime_lock *data)
4476 {
4477 	struct hif_pm_runtime_lock *context = data;
4478 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4479 
4480 	if (!context) {
4481 		HIF_ERROR("Runtime PM wakelock context is NULL");
4482 		return;
4483 	}
4484 
4485 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4486 
4487 	/*
4488 	 * Ensure to delete the context list entry and reduce the usage count
4489 	 * before freeing the context if context is active.
4490 	 */
4491 	if (sc) {
4492 		spin_lock_bh(&sc->runtime_lock);
4493 		__hif_pm_runtime_allow_suspend(sc, context);
4494 		spin_unlock_bh(&sc->runtime_lock);
4495 	}
4496 
4497 	qdf_mem_free(context);
4498 }
4499 #endif /* FEATURE_RUNTIME_PM */
4500 
4501 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4502 {
4503 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4504 
4505 	/* legacy case only has one irq */
4506 	return pci_scn->irq;
4507 }
4508 
4509 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4510 {
4511 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4512 	struct hif_target_info *tgt_info;
4513 
4514 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4515 
4516 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4517 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
4518 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4519 		/*
4520 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4521 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4522 		 * well initialized/defined.
4523 		 */
4524 		return 0;
4525 	}
4526 
4527 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4528 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4529 		return 0;
4530 	}
4531 
4532 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%lx (max 0x%zx)\n",
4533 		 offset, offset + sizeof(unsigned int), sc->mem_len);
4534 
4535 	return -EINVAL;
4536 }
4537 
4538 /**
4539  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4540  * @scn: hif context
4541  *
4542  * Return: true if soc needs driver bmi otherwise false
4543  */
4544 bool hif_pci_needs_bmi(struct hif_softc *scn)
4545 {
4546 	return !ce_srng_based(scn);
4547 }
4548