xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision a175314c51a4ce5cec2835cc8a8c7dc0c1810915)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 #ifdef CONFIG_WIN
66 extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
67 #endif
68 
69 /*
70  * Top-level interrupt handler for all PCI interrupts from a Target.
71  * When a block of MSI interrupts is allocated, this top-level handler
72  * is not used; instead, we directly call the correct sub-handler.
73  */
74 struct ce_irq_reg_table {
75 	uint32_t irq_enable;
76 	uint32_t irq_status;
77 };
78 
79 #ifndef QCA_WIFI_3_0_ADRASTEA
80 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 }
83 #else
84 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
87 	unsigned int target_enable0, target_enable1;
88 	unsigned int target_cause0, target_cause1;
89 
90 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
91 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
92 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
93 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
94 
95 	if ((target_enable0 & target_cause0) ||
96 	    (target_enable1 & target_cause1)) {
97 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
98 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
99 
100 		if (scn->notice_send)
101 			pld_intr_notify_q6(sc->dev);
102 	}
103 }
104 #endif
105 
106 
107 /**
108  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
109  * @scn: scn
110  *
111  * Return: N/A
112  */
113 static void pci_dispatch_interrupt(struct hif_softc *scn)
114 {
115 	uint32_t intr_summary;
116 	int id;
117 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
118 
119 	if (scn->hif_init_done != true)
120 		return;
121 
122 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
123 		return;
124 
125 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
126 
127 	if (intr_summary == 0) {
128 		if ((scn->target_status != TARGET_STATUS_RESET) &&
129 			(!qdf_atomic_read(&scn->link_suspended))) {
130 
131 			hif_write32_mb(scn, scn->mem +
132 				(SOC_CORE_BASE_ADDRESS |
133 				PCIE_INTR_ENABLE_ADDRESS),
134 				HOST_GROUP0_MASK);
135 
136 			hif_read32_mb(scn, scn->mem +
137 					(SOC_CORE_BASE_ADDRESS |
138 					PCIE_INTR_ENABLE_ADDRESS));
139 		}
140 		Q_TARGET_ACCESS_END(scn);
141 		return;
142 	}
143 	Q_TARGET_ACCESS_END(scn);
144 
145 	scn->ce_irq_summary = intr_summary;
146 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
147 		if (intr_summary & (1 << id)) {
148 			intr_summary &= ~(1 << id);
149 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
150 		}
151 	}
152 }
153 
154 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
155 {
156 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
157 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
158 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
159 
160 	volatile int tmp;
161 	uint16_t val = 0;
162 	uint32_t bar0 = 0;
163 	uint32_t fw_indicator_address, fw_indicator;
164 	bool ssr_irq = false;
165 	unsigned int host_cause, host_enable;
166 
167 	if (LEGACY_INTERRUPTS(sc)) {
168 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
169 			return IRQ_HANDLED;
170 
171 		if (ADRASTEA_BU) {
172 			host_enable = hif_read32_mb(sc, sc->mem +
173 						    PCIE_INTR_ENABLE_ADDRESS);
174 			host_cause = hif_read32_mb(sc, sc->mem +
175 						   PCIE_INTR_CAUSE_ADDRESS);
176 			if (!(host_enable & host_cause)) {
177 				hif_pci_route_adrastea_interrupt(sc);
178 				return IRQ_HANDLED;
179 			}
180 		}
181 
182 		/* Clear Legacy PCI line interrupts
183 		 * IMPORTANT: INTR_CLR regiser has to be set
184 		 * after INTR_ENABLE is set to 0,
185 		 * otherwise interrupt can not be really cleared
186 		 */
187 		hif_write32_mb(sc, sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS |
189 			       PCIE_INTR_ENABLE_ADDRESS), 0);
190 
191 		hif_write32_mb(sc, sc->mem +
192 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
193 			       ADRASTEA_BU ?
194 			       (host_enable & host_cause) :
195 			      HOST_GROUP0_MASK);
196 
197 		if (ADRASTEA_BU)
198 			hif_write32_mb(sc, sc->mem + 0x2f100c,
199 				       (host_cause >> 1));
200 
201 		/* IMPORTANT: this extra read transaction is required to
202 		 * flush the posted write buffer
203 		 */
204 		if (!ADRASTEA_BU) {
205 		tmp =
206 			hif_read32_mb(sc, sc->mem +
207 				     (SOC_CORE_BASE_ADDRESS |
208 				      PCIE_INTR_ENABLE_ADDRESS));
209 
210 		if (tmp == 0xdeadbeef) {
211 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
212 			       __func__);
213 
214 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
215 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
216 			       __func__, val);
217 
218 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
219 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
220 			       __func__, val);
221 
222 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
223 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
224 			       val);
225 
226 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
227 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
228 			       val);
229 
230 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
231 					      &bar0);
232 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
233 			       bar0);
234 
235 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
236 				  __func__,
237 				  hif_read32_mb(sc, sc->mem +
238 						PCIE_LOCAL_BASE_ADDRESS
239 						+ RTC_STATE_ADDRESS));
240 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
241 				  __func__,
242 				  hif_read32_mb(sc, sc->mem +
243 						PCIE_LOCAL_BASE_ADDRESS
244 						+ PCIE_SOC_WAKE_ADDRESS));
245 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
246 				  __func__,
247 				  hif_read32_mb(sc, sc->mem + 0x80008),
248 				  hif_read32_mb(sc, sc->mem + 0x8000c));
249 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
250 				  __func__,
251 				  hif_read32_mb(sc, sc->mem + 0x80010),
252 				  hif_read32_mb(sc, sc->mem + 0x80014));
253 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
254 				  __func__,
255 				  hif_read32_mb(sc, sc->mem + 0x80018),
256 				  hif_read32_mb(sc, sc->mem + 0x8001c));
257 			QDF_BUG(0);
258 		}
259 
260 		PCI_CLR_CAUSE0_REGISTER(sc);
261 		}
262 
263 		if (HAS_FW_INDICATOR) {
264 			fw_indicator_address = hif_state->fw_indicator_address;
265 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
266 			if ((fw_indicator != ~0) &&
267 			   (fw_indicator & FW_IND_EVENT_PENDING))
268 				ssr_irq = true;
269 		}
270 
271 		if (Q_TARGET_ACCESS_END(scn) < 0)
272 			return IRQ_HANDLED;
273 	}
274 	/* TBDXXX: Add support for WMAC */
275 
276 	if (ssr_irq) {
277 		sc->irq_event = irq;
278 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
279 
280 		qdf_atomic_inc(&scn->active_tasklet_cnt);
281 		tasklet_schedule(&sc->intr_tq);
282 	} else {
283 		pci_dispatch_interrupt(scn);
284 	}
285 
286 	return IRQ_HANDLED;
287 }
288 
289 static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
290 {
291 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
292 
293 	(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
294 
295 	return IRQ_HANDLED;
296 }
297 
298 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
299 {
300 	return 1;               /* FIX THIS */
301 }
302 
303 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
304 {
305 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
306 	int i = 0;
307 
308 	if (!irq || !size) {
309 		return -EINVAL;
310 	}
311 
312 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
313 		irq[0] = sc->irq;
314 		return 1;
315 	}
316 
317 	if (sc->num_msi_intrs > size) {
318 		qdf_print("Not enough space in irq buffer to return irqs\n");
319 		return -EINVAL;
320 	}
321 
322 	for (i = 0; i < sc->num_msi_intrs; i++) {
323 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
324 	}
325 
326 	return sc->num_msi_intrs;
327 }
328 
329 
330 /**
331  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
332  * @scn: hif_softc
333  *
334  * Return: void
335  */
336 #if CONFIG_ATH_PCIE_MAX_PERF == 0
337 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
338 {
339 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
340 	A_target_id_t pci_addr = scn->mem;
341 
342 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
343 	/*
344 	 * If the deferred sleep timer is running cancel it
345 	 * and put the soc into sleep.
346 	 */
347 	if (hif_state->fake_sleep == true) {
348 		qdf_timer_stop(&hif_state->sleep_timer);
349 		if (hif_state->verified_awake == false) {
350 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
351 				      PCIE_SOC_WAKE_ADDRESS,
352 				      PCIE_SOC_WAKE_RESET);
353 		}
354 		hif_state->fake_sleep = false;
355 	}
356 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
357 }
358 #else
359 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
360 {
361 }
362 #endif
363 
364 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
365 	hif_read32_mb(sc, (char *)(mem) + \
366 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
367 
368 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
369 	hif_write32_mb(sc, ((char *)(mem) + \
370 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
371 
372 #ifdef QCA_WIFI_3_0
373 /**
374  * hif_targ_is_awake() - check to see if the target is awake
375  * @hif_ctx: hif context
376  *
377  * emulation never goes to sleep
378  *
379  * Return: true if target is awake
380  */
381 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
382 {
383 	return true;
384 }
385 #else
386 /**
387  * hif_targ_is_awake() - check to see if the target is awake
388  * @hif_ctx: hif context
389  *
390  * Return: true if the targets clocks are on
391  */
392 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
393 {
394 	uint32_t val;
395 
396 	if (scn->recovery)
397 		return false;
398 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
399 		+ RTC_STATE_ADDRESS);
400 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
401 }
402 #endif
403 
404 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
405 static void hif_pci_device_reset(struct hif_pci_softc *sc)
406 {
407 	void __iomem *mem = sc->mem;
408 	int i;
409 	uint32_t val;
410 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
411 
412 	if (!scn->hostdef)
413 		return;
414 
415 	/* NB: Don't check resetok here.  This form of reset
416 	 * is integral to correct operation.
417 	 */
418 
419 	if (!SOC_GLOBAL_RESET_ADDRESS)
420 		return;
421 
422 	if (!mem)
423 		return;
424 
425 	HIF_ERROR("%s: Reset Device", __func__);
426 
427 	/*
428 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
429 	 * writing WAKE_V, the Target may scribble over Host memory!
430 	 */
431 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
432 			       PCIE_SOC_WAKE_V_MASK);
433 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
434 		if (hif_targ_is_awake(scn, mem))
435 			break;
436 
437 		qdf_mdelay(1);
438 	}
439 
440 	/* Put Target, including PCIe, into RESET. */
441 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
442 	val |= 1;
443 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
444 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
445 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
446 		    RTC_STATE_COLD_RESET_MASK)
447 			break;
448 
449 		qdf_mdelay(1);
450 	}
451 
452 	/* Pull Target, including PCIe, out of RESET. */
453 	val &= ~1;
454 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
455 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
456 		if (!
457 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
458 		     RTC_STATE_COLD_RESET_MASK))
459 			break;
460 
461 		qdf_mdelay(1);
462 	}
463 
464 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
465 			       PCIE_SOC_WAKE_RESET);
466 }
467 
468 /* CPU warm reset function
469  * Steps:
470  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
471  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
472  *    correctly on WARM reset
473  * 3. Clear TARGET CPU LF timer interrupt
474  * 4. Reset all CEs to clear any pending CE tarnsactions
475  * 5. Warm reset CPU
476  */
477 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
478 {
479 	void __iomem *mem = sc->mem;
480 	int i;
481 	uint32_t val;
482 	uint32_t fw_indicator;
483 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
484 
485 	/* NB: Don't check resetok here.  This form of reset is
486 	 * integral to correct operation.
487 	 */
488 
489 	if (!mem)
490 		return;
491 
492 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
493 
494 	/*
495 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
496 	 * writing WAKE_V, the Target may scribble over Host memory!
497 	 */
498 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
499 			       PCIE_SOC_WAKE_V_MASK);
500 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
501 		if (hif_targ_is_awake(scn, mem))
502 			break;
503 		qdf_mdelay(1);
504 	}
505 
506 	/*
507 	 * Disable Pending interrupts
508 	 */
509 	val =
510 		hif_read32_mb(sc, mem +
511 			     (SOC_CORE_BASE_ADDRESS |
512 			      PCIE_INTR_CAUSE_ADDRESS));
513 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
514 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
515 	/* Target CPU Intr Cause */
516 	val = hif_read32_mb(sc, mem +
517 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
518 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
519 
520 	val =
521 		hif_read32_mb(sc, mem +
522 			     (SOC_CORE_BASE_ADDRESS |
523 			      PCIE_INTR_ENABLE_ADDRESS));
524 	hif_write32_mb(sc, (mem +
525 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
526 	hif_write32_mb(sc, (mem +
527 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
528 		       HOST_GROUP0_MASK);
529 
530 	qdf_mdelay(100);
531 
532 	/* Clear FW_INDICATOR_ADDRESS */
533 	if (HAS_FW_INDICATOR) {
534 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
535 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
536 	}
537 
538 	/* Clear Target LF Timer interrupts */
539 	val =
540 		hif_read32_mb(sc, mem +
541 			     (RTC_SOC_BASE_ADDRESS +
542 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
543 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
544 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
545 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
546 	hif_write32_mb(sc, mem +
547 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
548 		      val);
549 
550 	/* Reset CE */
551 	val =
552 		hif_read32_mb(sc, mem +
553 			     (RTC_SOC_BASE_ADDRESS |
554 			      SOC_RESET_CONTROL_ADDRESS));
555 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
556 	hif_write32_mb(sc, (mem +
557 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
558 		      val);
559 	val =
560 		hif_read32_mb(sc, mem +
561 			     (RTC_SOC_BASE_ADDRESS |
562 			      SOC_RESET_CONTROL_ADDRESS));
563 	qdf_mdelay(10);
564 
565 	/* CE unreset */
566 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
567 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
568 		       SOC_RESET_CONTROL_ADDRESS), val);
569 	val =
570 		hif_read32_mb(sc, mem +
571 			     (RTC_SOC_BASE_ADDRESS |
572 			      SOC_RESET_CONTROL_ADDRESS));
573 	qdf_mdelay(10);
574 
575 	/* Read Target CPU Intr Cause */
576 	val = hif_read32_mb(sc, mem +
577 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
578 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
579 		    __func__, val);
580 
581 	/* CPU warm RESET */
582 	val =
583 		hif_read32_mb(sc, mem +
584 			     (RTC_SOC_BASE_ADDRESS |
585 			      SOC_RESET_CONTROL_ADDRESS));
586 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
587 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
588 		       SOC_RESET_CONTROL_ADDRESS), val);
589 	val =
590 		hif_read32_mb(sc, mem +
591 			     (RTC_SOC_BASE_ADDRESS |
592 			      SOC_RESET_CONTROL_ADDRESS));
593 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
594 		    __func__, val);
595 
596 	qdf_mdelay(100);
597 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
598 
599 }
600 
601 #ifndef QCA_WIFI_3_0
602 /* only applicable to legacy ce */
603 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
604 {
605 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
606 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
607 	void __iomem *mem = sc->mem;
608 	uint32_t val;
609 
610 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
611 		return ATH_ISR_NOSCHED;
612 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
613 	if (Q_TARGET_ACCESS_END(scn) < 0)
614 		return ATH_ISR_SCHED;
615 
616 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
617 
618 	if (val & FW_IND_HELPER)
619 		return 0;
620 
621 	return 1;
622 }
623 #endif
624 
625 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
626 {
627 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
628 	uint16_t device_id = 0;
629 	uint32_t val;
630 	uint16_t timeout_count = 0;
631 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
632 
633 	/* Check device ID from PCIe configuration space for link status */
634 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
635 	if (device_id != sc->devid) {
636 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
637 			  __func__, device_id, sc->devid);
638 		return -EACCES;
639 	}
640 
641 	/* Check PCIe local register for bar/memory access */
642 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
643 			   RTC_STATE_ADDRESS);
644 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
645 
646 	/* Try to wake up taget if it sleeps */
647 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
648 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
649 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
650 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
651 		PCIE_SOC_WAKE_ADDRESS));
652 
653 	/* Check if taget can be woken up */
654 	while (!hif_targ_is_awake(scn, sc->mem)) {
655 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
656 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
657 				__func__,
658 				hif_read32_mb(sc, sc->mem +
659 					     PCIE_LOCAL_BASE_ADDRESS +
660 					     RTC_STATE_ADDRESS),
661 				hif_read32_mb(sc, sc->mem +
662 					     PCIE_LOCAL_BASE_ADDRESS +
663 					PCIE_SOC_WAKE_ADDRESS));
664 			return -EACCES;
665 		}
666 
667 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
668 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
669 
670 		qdf_mdelay(100);
671 		timeout_count += 100;
672 	}
673 
674 	/* Check Power register for SoC internal bus issues */
675 	val =
676 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
677 			     SOC_POWER_REG_OFFSET);
678 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
679 
680 	return 0;
681 }
682 
683 /**
684  * __hif_pci_dump_registers(): dump other PCI debug registers
685  * @scn: struct hif_softc
686  *
687  * This function dumps pci debug registers.  The parrent function
688  * dumps the copy engine registers before calling this function.
689  *
690  * Return: void
691  */
692 static void __hif_pci_dump_registers(struct hif_softc *scn)
693 {
694 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
695 	void __iomem *mem = sc->mem;
696 	uint32_t val, i, j;
697 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
698 	uint32_t ce_base;
699 
700 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
701 		return;
702 
703 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
704 	val =
705 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
706 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
707 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
708 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
709 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
710 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
711 
712 	/* DEBUG_CONTROL_ENABLE = 0x1 */
713 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
714 			   WLAN_DEBUG_CONTROL_OFFSET);
715 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
716 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
717 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
718 		      WLAN_DEBUG_CONTROL_OFFSET, val);
719 
720 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
721 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
722 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
723 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
724 			    WLAN_DEBUG_CONTROL_OFFSET));
725 
726 	HIF_INFO_MED("%s: Debug CE", __func__);
727 	/* Loop CE debug output */
728 	/* AMBA_DEBUG_BUS_SEL = 0xc */
729 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
730 			    AMBA_DEBUG_BUS_OFFSET);
731 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
732 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
733 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
734 		       val);
735 
736 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
737 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
738 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
739 				   CE_WRAPPER_DEBUG_OFFSET);
740 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
741 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
742 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
743 			      CE_WRAPPER_DEBUG_OFFSET, val);
744 
745 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
746 			    __func__, wrapper_idx[i],
747 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
748 				AMBA_DEBUG_BUS_OFFSET),
749 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
750 				CE_WRAPPER_DEBUG_OFFSET));
751 
752 		if (wrapper_idx[i] <= 7) {
753 			for (j = 0; j <= 5; j++) {
754 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
755 				/* For (j=0~5) write CE_DEBUG_SEL = j */
756 				val =
757 					hif_read32_mb(sc, mem + ce_base +
758 						     CE_DEBUG_OFFSET);
759 				val &= ~CE_DEBUG_SEL_MASK;
760 				val |= CE_DEBUG_SEL_SET(j);
761 				hif_write32_mb(sc, mem + ce_base +
762 					       CE_DEBUG_OFFSET, val);
763 
764 				/* read (@gpio_athr_wlan_reg)
765 				 * WLAN_DEBUG_OUT_DATA
766 				 */
767 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
768 						    + WLAN_DEBUG_OUT_OFFSET);
769 				val = WLAN_DEBUG_OUT_DATA_GET(val);
770 
771 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
772 					    __func__, j,
773 					    hif_read32_mb(sc, mem + ce_base +
774 						    CE_DEBUG_OFFSET), val);
775 			}
776 		} else {
777 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
778 			val =
779 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
780 					     WLAN_DEBUG_OUT_OFFSET);
781 			val = WLAN_DEBUG_OUT_DATA_GET(val);
782 
783 			HIF_INFO_MED("%s: out: %x", __func__, val);
784 		}
785 	}
786 
787 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
788 	/* Loop PCIe debug output */
789 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
790 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
791 			    AMBA_DEBUG_BUS_OFFSET);
792 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
793 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
794 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
795 		       AMBA_DEBUG_BUS_OFFSET, val);
796 
797 	for (i = 0; i <= 8; i++) {
798 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
799 		val =
800 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
801 				     AMBA_DEBUG_BUS_OFFSET);
802 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
803 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
804 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
805 			       AMBA_DEBUG_BUS_OFFSET, val);
806 
807 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
808 		val =
809 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
810 				     WLAN_DEBUG_OUT_OFFSET);
811 		val = WLAN_DEBUG_OUT_DATA_GET(val);
812 
813 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
814 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
815 				    WLAN_DEBUG_OUT_OFFSET), val,
816 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
817 				    WLAN_DEBUG_OUT_OFFSET));
818 	}
819 
820 	Q_TARGET_ACCESS_END(scn);
821 }
822 
823 /**
824  * hif_dump_registers(): dump bus debug registers
825  * @scn: struct hif_opaque_softc
826  *
827  * This function dumps hif bus debug registers
828  *
829  * Return: 0 for success or error code
830  */
831 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
832 {
833 	int status;
834 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
835 
836 	status = hif_dump_ce_registers(scn);
837 
838 	if (status)
839 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
840 
841 	/* dump non copy engine pci registers */
842 	__hif_pci_dump_registers(scn);
843 
844 	return 0;
845 }
846 
847 /*
848  * Handler for a per-engine interrupt on a PARTICULAR CE.
849  * This is used in cases where each CE has a private
850  * MSI interrupt.
851  */
852 static irqreturn_t ce_per_engine_handler(int irq, void *arg)
853 {
854 	int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
855 
856 	/*
857 	 * NOTE: We are able to derive CE_id from irq because we
858 	 * use a one-to-one mapping for CE's 0..5.
859 	 * CE's 6 & 7 do not use interrupts at all.
860 	 *
861 	 * This mapping must be kept in sync with the mapping
862 	 * used by firmware.
863 	 */
864 
865 	ce_per_engine_service(arg, CE_id);
866 
867 	return IRQ_HANDLED;
868 }
869 
870 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
871 
872 /* worker thread to schedule wlan_tasklet in SLUB debug build */
873 static void reschedule_tasklet_work_handler(void *arg)
874 {
875 	struct hif_pci_softc *sc = arg;
876 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
877 
878 	if (!scn) {
879 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
880 		return;
881 	}
882 
883 	if (scn->hif_init_done == false) {
884 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
885 		return;
886 	}
887 
888 	tasklet_schedule(&sc->intr_tq);
889 }
890 
891 /**
892  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
893  * work
894  * @sc: HIF PCI Context
895  *
896  * Return: void
897  */
898 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
899 {
900 	qdf_create_work(0, &sc->reschedule_tasklet_work,
901 				reschedule_tasklet_work_handler, NULL);
902 }
903 #else
904 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
905 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
906 
907 void wlan_tasklet(unsigned long data)
908 {
909 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
910 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
911 
912 	if (scn->hif_init_done == false)
913 		goto end;
914 
915 	if (qdf_atomic_read(&scn->link_suspended))
916 		goto end;
917 
918 	if (!ADRASTEA_BU) {
919 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
920 		if (scn->target_status == TARGET_STATUS_RESET)
921 			goto end;
922 	}
923 
924 end:
925 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
926 	qdf_atomic_dec(&scn->active_tasklet_cnt);
927 }
928 
929 #ifdef FEATURE_RUNTIME_PM
930 static const char *hif_pm_runtime_state_to_string(uint32_t state)
931 {
932 	switch (state) {
933 	case HIF_PM_RUNTIME_STATE_NONE:
934 		return "INIT_STATE";
935 	case HIF_PM_RUNTIME_STATE_ON:
936 		return "ON";
937 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
938 		return "INPROGRESS";
939 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
940 		return "SUSPENDED";
941 	default:
942 		return "INVALID STATE";
943 	}
944 }
945 
946 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
947 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
948 /**
949  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
950  * @sc: hif_pci_softc context
951  * @msg: log message
952  *
953  * log runtime pm stats when something seems off.
954  *
955  * Return: void
956  */
957 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
958 {
959 	struct hif_pm_runtime_lock *ctx;
960 
961 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
962 			msg, atomic_read(&sc->dev->power.usage_count),
963 			hif_pm_runtime_state_to_string(
964 					atomic_read(&sc->pm_state)),
965 			sc->prevent_suspend_cnt);
966 
967 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
968 			sc->dev->power.runtime_status,
969 			sc->dev->power.runtime_error,
970 			sc->dev->power.disable_depth,
971 			sc->dev->power.autosuspend_delay);
972 
973 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
974 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
975 			sc->pm_stats.request_resume);
976 
977 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
978 			sc->pm_stats.allow_suspend,
979 			sc->pm_stats.prevent_suspend);
980 
981 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
982 			sc->pm_stats.prevent_suspend_timeout,
983 			sc->pm_stats.allow_suspend_timeout);
984 
985 	HIF_ERROR("Suspended: %u, resumed: %u count",
986 			sc->pm_stats.suspended,
987 			sc->pm_stats.resumed);
988 
989 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
990 			sc->pm_stats.suspend_err,
991 			sc->pm_stats.runtime_get_err);
992 
993 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
994 
995 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
996 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
997 	}
998 
999 	WARN_ON(1);
1000 }
1001 
1002 /**
1003  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
1004  * @s: file to print to
1005  * @data: unused
1006  *
1007  * debugging tool added to the debug fs for displaying runtimepm stats
1008  *
1009  * Return: 0
1010  */
1011 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
1012 {
1013 	struct hif_pci_softc *sc = s->private;
1014 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
1015 		"SUSPENDED"};
1016 	unsigned int msecs_age;
1017 	int pm_state = atomic_read(&sc->pm_state);
1018 	unsigned long timer_expires;
1019 	struct hif_pm_runtime_lock *ctx;
1020 
1021 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
1022 			autopm_state[pm_state]);
1023 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
1024 			sc->pm_stats.last_resume_caller);
1025 
1026 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
1027 		msecs_age = jiffies_to_msecs(
1028 				jiffies - sc->pm_stats.suspend_jiffies);
1029 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1030 				msecs_age / 1000, msecs_age % 1000);
1031 	}
1032 
1033 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1034 			atomic_read(&sc->dev->power.usage_count));
1035 
1036 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1037 			sc->prevent_suspend_cnt);
1038 
1039 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1040 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1041 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1042 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1043 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1044 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1045 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1046 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1047 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1048 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1049 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1050 
1051 	timer_expires = sc->runtime_timer_expires;
1052 	if (timer_expires > 0) {
1053 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1054 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1055 				msecs_age / 1000, msecs_age % 1000);
1056 	}
1057 
1058 	spin_lock_bh(&sc->runtime_lock);
1059 	if (list_empty(&sc->prevent_suspend_list)) {
1060 		spin_unlock_bh(&sc->runtime_lock);
1061 		return 0;
1062 	}
1063 
1064 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1065 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1066 		seq_printf(s, "%s", ctx->name);
1067 		if (ctx->timeout)
1068 			seq_printf(s, "(%d ms)", ctx->timeout);
1069 		seq_puts(s, " ");
1070 	}
1071 	seq_puts(s, "\n");
1072 	spin_unlock_bh(&sc->runtime_lock);
1073 
1074 	return 0;
1075 }
1076 #undef HIF_PCI_RUNTIME_PM_STATS
1077 
1078 /**
1079  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1080  * @inode
1081  * @file
1082  *
1083  * Return: linux error code of single_open.
1084  */
1085 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1086 {
1087 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1088 			inode->i_private);
1089 }
1090 
1091 static const struct file_operations hif_pci_runtime_pm_fops = {
1092 	.owner          = THIS_MODULE,
1093 	.open           = hif_pci_runtime_pm_open,
1094 	.release        = single_release,
1095 	.read           = seq_read,
1096 	.llseek         = seq_lseek,
1097 };
1098 
1099 /**
1100  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1101  * @sc: pci context
1102  *
1103  * creates a debugfs entry to debug the runtime pm feature.
1104  */
1105 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1106 {
1107 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1108 					0400, NULL, sc,
1109 					&hif_pci_runtime_pm_fops);
1110 }
1111 
1112 /**
1113  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1114  * @sc: pci context
1115  *
1116  * removes the debugfs entry to debug the runtime pm feature.
1117  */
1118 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1119 {
1120 	debugfs_remove(sc->pm_dentry);
1121 }
1122 
1123 static void hif_runtime_init(struct device *dev, int delay)
1124 {
1125 	pm_runtime_set_autosuspend_delay(dev, delay);
1126 	pm_runtime_use_autosuspend(dev);
1127 	pm_runtime_allow(dev);
1128 	pm_runtime_mark_last_busy(dev);
1129 	pm_runtime_put_noidle(dev);
1130 	pm_suspend_ignore_children(dev, true);
1131 }
1132 
1133 static void hif_runtime_exit(struct device *dev)
1134 {
1135 	pm_runtime_get_noresume(dev);
1136 	pm_runtime_set_active(dev);
1137 }
1138 
1139 static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
1140 
1141 /**
1142  * hif_pm_runtime_start(): start the runtime pm
1143  * @sc: pci context
1144  *
1145  * After this call, runtime pm will be active.
1146  */
1147 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1148 {
1149 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1150 	uint32_t mode = hif_get_conparam(ol_sc);
1151 
1152 	if (!ol_sc->hif_config.enable_runtime_pm) {
1153 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1154 		return;
1155 	}
1156 
1157 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1158 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1159 				__func__);
1160 		return;
1161 	}
1162 
1163 	setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1164 			(unsigned long)sc);
1165 
1166 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1167 			ol_sc->hif_config.runtime_pm_delay);
1168 
1169 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1170 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1171 	hif_runtime_pm_debugfs_create(sc);
1172 }
1173 
1174 /**
1175  * hif_pm_runtime_stop(): stop runtime pm
1176  * @sc: pci context
1177  *
1178  * Turns off runtime pm and frees corresponding resources
1179  * that were acquired by hif_runtime_pm_start().
1180  */
1181 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1182 {
1183 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1184 	uint32_t mode = hif_get_conparam(ol_sc);
1185 
1186 	if (!ol_sc->hif_config.enable_runtime_pm)
1187 		return;
1188 
1189 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1190 		return;
1191 
1192 	hif_runtime_exit(sc->dev);
1193 	hif_pm_runtime_resume(sc->dev);
1194 
1195 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1196 
1197 	hif_runtime_pm_debugfs_remove(sc);
1198 	del_timer_sync(&sc->runtime_timer);
1199 	/* doesn't wait for penting trafic unlike cld-2.0 */
1200 }
1201 
1202 /**
1203  * hif_pm_runtime_open(): initialize runtime pm
1204  * @sc: pci data structure
1205  *
1206  * Early initialization
1207  */
1208 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1209 {
1210 	spin_lock_init(&sc->runtime_lock);
1211 
1212 	qdf_atomic_init(&sc->pm_state);
1213 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1214 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1215 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1216 }
1217 
1218 /**
1219  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1220  * @sc: pci context
1221  *
1222  * Ensure we have only one vote against runtime suspend before closing
1223  * the runtime suspend feature.
1224  *
1225  * all gets by the wlan driver should have been returned
1226  * one vote should remain as part of cnss_runtime_exit
1227  *
1228  * needs to be revisited if we share the root complex.
1229  */
1230 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1231 {
1232 	struct hif_pm_runtime_lock *ctx, *tmp;
1233 
1234 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1235 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1236 	else
1237 		return;
1238 
1239 	spin_lock_bh(&sc->runtime_lock);
1240 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1241 		spin_unlock_bh(&sc->runtime_lock);
1242 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1243 		spin_lock_bh(&sc->runtime_lock);
1244 	}
1245 	spin_unlock_bh(&sc->runtime_lock);
1246 
1247 	/* ensure 1 and only 1 usage count so that when the wlan
1248 	 * driver is re-insmodded runtime pm won't be
1249 	 * disabled also ensures runtime pm doesn't get
1250 	 * broken on by being less than 1.
1251 	 */
1252 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1253 		atomic_set(&sc->dev->power.usage_count, 1);
1254 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1255 		hif_pm_runtime_put_auto(sc->dev);
1256 }
1257 
1258 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1259 					  struct hif_pm_runtime_lock *lock);
1260 
1261 /**
1262  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1263  * @sc: PCIe Context
1264  *
1265  * API is used to empty the runtime pm prevent suspend list.
1266  *
1267  * Return: void
1268  */
1269 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1270 {
1271 	struct hif_pm_runtime_lock *ctx, *tmp;
1272 
1273 	spin_lock_bh(&sc->runtime_lock);
1274 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1275 		__hif_pm_runtime_allow_suspend(sc, ctx);
1276 	}
1277 	spin_unlock_bh(&sc->runtime_lock);
1278 }
1279 
1280 /**
1281  * hif_pm_runtime_close(): close runtime pm
1282  * @sc: pci bus handle
1283  *
1284  * ensure runtime_pm is stopped before closing the driver
1285  */
1286 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1287 {
1288 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1289 
1290 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1291 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1292 		return;
1293 
1294 	hif_pm_runtime_stop(sc);
1295 
1296 	hif_is_recovery_in_progress(scn) ?
1297 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1298 		hif_pm_runtime_sanitize_on_exit(sc);
1299 }
1300 #else
1301 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1302 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1303 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1304 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1305 #endif
1306 
1307 /**
1308  * hif_disable_power_gating() - disable HW power gating
1309  * @hif_ctx: hif context
1310  *
1311  * disables pcie L1 power states
1312  */
1313 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1314 {
1315 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1316 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1317 
1318 	if (NULL == scn) {
1319 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1320 		       __func__);
1321 		return;
1322 	}
1323 
1324 	/* Disable ASPM when pkt log is enabled */
1325 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1326 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1327 }
1328 
1329 /**
1330  * hif_enable_power_gating() - enable HW power gating
1331  * @hif_ctx: hif context
1332  *
1333  * enables pcie L1 power states
1334  */
1335 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1336 {
1337 	if (NULL == sc) {
1338 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1339 		       __func__);
1340 		return;
1341 	}
1342 
1343 	/* Re-enable ASPM after firmware/OTP download is complete */
1344 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1345 }
1346 
1347 /**
1348  * hif_enable_power_management() - enable power management
1349  * @hif_ctx: hif context
1350  *
1351  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1352  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1353  *
1354  * note: epping mode does not call this function as it does not
1355  *       care about saving power.
1356  */
1357 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1358 				 bool is_packet_log_enabled)
1359 {
1360 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1361 
1362 	if (pci_ctx == NULL) {
1363 		HIF_ERROR("%s, hif_ctx null", __func__);
1364 		return;
1365 	}
1366 
1367 	hif_pm_runtime_start(pci_ctx);
1368 
1369 	if (!is_packet_log_enabled)
1370 		hif_enable_power_gating(pci_ctx);
1371 
1372 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1373 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1374 	    !ce_srng_based(hif_sc)) {
1375 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1376 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1377 			HIF_ERROR("%s, failed to set target to sleep",
1378 				  __func__);
1379 	}
1380 }
1381 
1382 /**
1383  * hif_disable_power_management() - disable power management
1384  * @hif_ctx: hif context
1385  *
1386  * Currently disables runtime pm. Should be updated to behave
1387  * if runtime pm is not started. Should be updated to take care
1388  * of aspm and soc sleep for driver load.
1389  */
1390 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1391 {
1392 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1393 
1394 	if (pci_ctx == NULL) {
1395 		HIF_ERROR("%s, hif_ctx null", __func__);
1396 		return;
1397 	}
1398 
1399 	hif_pm_runtime_stop(pci_ctx);
1400 }
1401 
1402 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1403 {
1404 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1405 
1406 	if (pci_ctx == NULL) {
1407 		HIF_ERROR("%s, hif_ctx null", __func__);
1408 		return;
1409 	}
1410 	hif_display_ce_stats(&pci_ctx->ce_sc);
1411 }
1412 
1413 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1414 {
1415 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1416 
1417 	if (pci_ctx == NULL) {
1418 		HIF_ERROR("%s, hif_ctx null", __func__);
1419 		return;
1420 	}
1421 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1422 }
1423 
1424 #define ATH_PCI_PROBE_RETRY_MAX 3
1425 /**
1426  * hif_bus_open(): hif_bus_open
1427  * @scn: scn
1428  * @bus_type: bus type
1429  *
1430  * Return: n/a
1431  */
1432 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1433 {
1434 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1435 
1436 	hif_ctx->bus_type = bus_type;
1437 	hif_pm_runtime_open(sc);
1438 
1439 	qdf_spinlock_create(&sc->irq_lock);
1440 
1441 	return hif_ce_open(hif_ctx);
1442 }
1443 
1444 /**
1445  * hif_wake_target_cpu() - wake the target's cpu
1446  * @scn: hif context
1447  *
1448  * Send an interrupt to the device to wake up the Target CPU
1449  * so it has an opportunity to notice any changed state.
1450  */
1451 static void hif_wake_target_cpu(struct hif_softc *scn)
1452 {
1453 	QDF_STATUS rv;
1454 	uint32_t core_ctrl;
1455 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1456 
1457 	rv = hif_diag_read_access(hif_hdl,
1458 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1459 				  &core_ctrl);
1460 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1461 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1462 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1463 
1464 	rv = hif_diag_write_access(hif_hdl,
1465 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1466 				   core_ctrl);
1467 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1468 }
1469 
1470 /**
1471  * soc_wake_reset() - allow the target to go to sleep
1472  * @scn: hif_softc
1473  *
1474  * Clear the force wake register.  This is done by
1475  * hif_sleep_entry and cancel defered timer sleep.
1476  */
1477 static void soc_wake_reset(struct hif_softc *scn)
1478 {
1479 	hif_write32_mb(scn, scn->mem +
1480 		PCIE_LOCAL_BASE_ADDRESS +
1481 		PCIE_SOC_WAKE_ADDRESS,
1482 		PCIE_SOC_WAKE_RESET);
1483 }
1484 
1485 /**
1486  * hif_sleep_entry() - gate target sleep
1487  * @arg: hif context
1488  *
1489  * This function is the callback for the sleep timer.
1490  * Check if last force awake critical section was at least
1491  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1492  * allow the target to go to sleep and cancel the sleep timer.
1493  * otherwise reschedule the sleep timer.
1494  */
1495 static void hif_sleep_entry(void *arg)
1496 {
1497 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1498 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1499 	uint32_t idle_ms;
1500 
1501 	if (scn->recovery)
1502 		return;
1503 
1504 	if (hif_is_driver_unloading(scn))
1505 		return;
1506 
1507 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1508 	if (hif_state->verified_awake == false) {
1509 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1510 						    - hif_state->sleep_ticks);
1511 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1512 			if (!qdf_atomic_read(&scn->link_suspended)) {
1513 				soc_wake_reset(scn);
1514 				hif_state->fake_sleep = false;
1515 			}
1516 		} else {
1517 			qdf_timer_stop(&hif_state->sleep_timer);
1518 			qdf_timer_start(&hif_state->sleep_timer,
1519 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1520 		}
1521 	} else {
1522 		qdf_timer_stop(&hif_state->sleep_timer);
1523 		qdf_timer_start(&hif_state->sleep_timer,
1524 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1525 	}
1526 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1527 }
1528 
1529 #define HIF_HIA_MAX_POLL_LOOP    1000000
1530 #define HIF_HIA_POLLING_DELAY_MS 10
1531 
1532 #ifdef CONFIG_WIN
1533 static void hif_set_hia_extnd(struct hif_softc *scn)
1534 {
1535 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1536 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1537 	uint32_t target_type = tgt_info->target_type;
1538 
1539 	HIF_TRACE("%s: E", __func__);
1540 
1541 	if ((target_type == TARGET_TYPE_AR900B) ||
1542 			target_type == TARGET_TYPE_QCA9984 ||
1543 			target_type == TARGET_TYPE_QCA9888) {
1544 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1545 		 * in RTC space
1546 		 */
1547 		tgt_info->target_revision
1548 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1549 					+ CHIP_ID_ADDRESS));
1550 		qdf_print(KERN_INFO"chip_id 0x%x chip_revision 0x%x\n",
1551 			target_type, tgt_info->target_revision);
1552 	}
1553 
1554 	{
1555 		uint32_t flag2_value = 0;
1556 		uint32_t flag2_targ_addr =
1557 			host_interest_item_address(target_type,
1558 			offsetof(struct host_interest_s, hi_skip_clock_init));
1559 
1560 		if ((ar900b_20_targ_clk != -1) &&
1561 			(frac != -1) && (intval != -1)) {
1562 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1563 				&flag2_value);
1564 			qdf_print("\n Setting clk_override\n");
1565 			flag2_value |= CLOCK_OVERRIDE;
1566 
1567 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1568 					flag2_value);
1569 			qdf_print("\n CLOCK PLL val set %d\n", flag2_value);
1570 		} else {
1571 			qdf_print(KERN_INFO"\n CLOCK PLL skipped\n");
1572 		}
1573 	}
1574 
1575 	if (target_type == TARGET_TYPE_AR900B
1576 			|| target_type == TARGET_TYPE_QCA9984
1577 			|| target_type == TARGET_TYPE_QCA9888) {
1578 
1579 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1580 		 * this would be supplied through module parameters,
1581 		 * if not supplied assumed default or same behavior as 1.0.
1582 		 * Assume 1.0 clock can't be tuned, reset to defaults
1583 		 */
1584 
1585 		qdf_print(KERN_INFO
1586 			  "%s: setting the target pll frac %x intval %x\n",
1587 			  __func__, frac, intval);
1588 
1589 		/* do not touch frac, and int val, let them be default -1,
1590 		 * if desired, host can supply these through module params
1591 		 */
1592 		if (frac != -1 || intval != -1) {
1593 			uint32_t flag2_value = 0;
1594 			uint32_t flag2_targ_addr;
1595 
1596 			flag2_targ_addr =
1597 				host_interest_item_address(target_type,
1598 				offsetof(struct host_interest_s,
1599 					hi_clock_info));
1600 			hif_diag_read_access(hif_hdl,
1601 				flag2_targ_addr, &flag2_value);
1602 			qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1603 				flag2_value);
1604 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1605 			qdf_print("\n INT Val %x  Address %x\n",
1606 				intval, flag2_value + 4);
1607 			hif_diag_write_access(hif_hdl,
1608 					flag2_value + 4, intval);
1609 		} else {
1610 			qdf_print(KERN_INFO
1611 				  "%s: no frac provided, skipping pre-configuring PLL\n",
1612 				  __func__);
1613 		}
1614 
1615 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1616 		if ((target_type == TARGET_TYPE_AR900B)
1617 			&& (tgt_info->target_revision == AR900B_REV_2)
1618 			&& ar900b_20_targ_clk != -1) {
1619 			uint32_t flag2_value = 0;
1620 			uint32_t flag2_targ_addr;
1621 
1622 			flag2_targ_addr
1623 				= host_interest_item_address(target_type,
1624 					offsetof(struct host_interest_s,
1625 					hi_desired_cpu_speed_hz));
1626 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1627 							&flag2_value);
1628 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x\n",
1629 				  flag2_value);
1630 			hif_diag_write_access(hif_hdl, flag2_value,
1631 				ar900b_20_targ_clk/*300000000u*/);
1632 		} else if (target_type == TARGET_TYPE_QCA9888) {
1633 			uint32_t flag2_targ_addr;
1634 
1635 			if (200000000u != qca9888_20_targ_clk) {
1636 				qca9888_20_targ_clk = 300000000u;
1637 				/* Setting the target clock speed to 300 mhz */
1638 			}
1639 
1640 			flag2_targ_addr
1641 				= host_interest_item_address(target_type,
1642 					offsetof(struct host_interest_s,
1643 					hi_desired_cpu_speed_hz));
1644 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1645 				qca9888_20_targ_clk);
1646 		} else {
1647 			qdf_print(KERN_INFO"%s: targ_clk is not provided, skipping pre-configuring PLL\n",
1648 				  __func__);
1649 		}
1650 	} else {
1651 		if (frac != -1 || intval != -1) {
1652 			uint32_t flag2_value = 0;
1653 			uint32_t flag2_targ_addr =
1654 				host_interest_item_address(target_type,
1655 					offsetof(struct host_interest_s,
1656 							hi_clock_info));
1657 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1658 						&flag2_value);
1659 			qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1660 							flag2_value);
1661 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1662 			qdf_print("\n INT Val %x  Address %x\n", intval,
1663 							flag2_value + 4);
1664 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1665 					intval);
1666 		}
1667 	}
1668 }
1669 
1670 #else
1671 
1672 static void hif_set_hia_extnd(struct hif_softc *scn)
1673 {
1674 }
1675 
1676 #endif
1677 
1678 /**
1679  * hif_set_hia() - fill out the host interest area
1680  * @scn: hif context
1681  *
1682  * This is replaced by hif_wlan_enable for integrated targets.
1683  * This fills out the host interest area.  The firmware will
1684  * process these memory addresses when it is first brought out
1685  * of reset.
1686  *
1687  * Return: 0 for success.
1688  */
1689 static int hif_set_hia(struct hif_softc *scn)
1690 {
1691 	QDF_STATUS rv;
1692 	uint32_t interconnect_targ_addr = 0;
1693 	uint32_t pcie_state_targ_addr = 0;
1694 	uint32_t pipe_cfg_targ_addr = 0;
1695 	uint32_t svc_to_pipe_map = 0;
1696 	uint32_t pcie_config_flags = 0;
1697 	uint32_t flag2_value = 0;
1698 	uint32_t flag2_targ_addr = 0;
1699 #ifdef QCA_WIFI_3_0
1700 	uint32_t host_interest_area = 0;
1701 	uint8_t i;
1702 #else
1703 	uint32_t ealloc_value = 0;
1704 	uint32_t ealloc_targ_addr = 0;
1705 	uint8_t banks_switched = 1;
1706 	uint32_t chip_id;
1707 #endif
1708 	uint32_t pipe_cfg_addr;
1709 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1710 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1711 	uint32_t target_type = tgt_info->target_type;
1712 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1713 	static struct CE_pipe_config *target_ce_config;
1714 	struct service_to_pipe *target_service_to_ce_map;
1715 
1716 	HIF_TRACE("%s: E", __func__);
1717 
1718 	hif_get_target_ce_config(scn,
1719 				 &target_ce_config, &target_ce_config_sz,
1720 				 &target_service_to_ce_map,
1721 				 &target_service_to_ce_map_sz,
1722 				 NULL, NULL);
1723 
1724 	if (ADRASTEA_BU)
1725 		return QDF_STATUS_SUCCESS;
1726 
1727 #ifdef QCA_WIFI_3_0
1728 	i = 0;
1729 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1730 		host_interest_area = hif_read32_mb(scn, scn->mem +
1731 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1732 		if ((host_interest_area & 0x01) == 0) {
1733 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1734 			host_interest_area = 0;
1735 			i++;
1736 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1737 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1738 		} else {
1739 			host_interest_area &= (~0x01);
1740 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1741 			break;
1742 		}
1743 	}
1744 
1745 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1746 		HIF_ERROR("%s: hia polling timeout", __func__);
1747 		return -EIO;
1748 	}
1749 
1750 	if (host_interest_area == 0) {
1751 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1752 		return -EIO;
1753 	}
1754 
1755 	interconnect_targ_addr = host_interest_area +
1756 			offsetof(struct host_interest_area_t,
1757 			hi_interconnect_state);
1758 
1759 	flag2_targ_addr = host_interest_area +
1760 			offsetof(struct host_interest_area_t, hi_option_flag2);
1761 
1762 #else
1763 	interconnect_targ_addr = hif_hia_item_address(target_type,
1764 		offsetof(struct host_interest_s, hi_interconnect_state));
1765 	ealloc_targ_addr = hif_hia_item_address(target_type,
1766 		offsetof(struct host_interest_s, hi_early_alloc));
1767 	flag2_targ_addr = hif_hia_item_address(target_type,
1768 		offsetof(struct host_interest_s, hi_option_flag2));
1769 #endif
1770 	/* Supply Target-side CE configuration */
1771 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1772 			  &pcie_state_targ_addr);
1773 	if (rv != QDF_STATUS_SUCCESS) {
1774 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1775 			  __func__, interconnect_targ_addr, rv);
1776 		goto done;
1777 	}
1778 	if (pcie_state_targ_addr == 0) {
1779 		rv = QDF_STATUS_E_FAILURE;
1780 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1781 		goto done;
1782 	}
1783 	pipe_cfg_addr = pcie_state_targ_addr +
1784 			  offsetof(struct pcie_state_s,
1785 			  pipe_cfg_addr);
1786 	rv = hif_diag_read_access(hif_hdl,
1787 			  pipe_cfg_addr,
1788 			  &pipe_cfg_targ_addr);
1789 	if (rv != QDF_STATUS_SUCCESS) {
1790 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1791 			__func__, pipe_cfg_addr, rv);
1792 		goto done;
1793 	}
1794 	if (pipe_cfg_targ_addr == 0) {
1795 		rv = QDF_STATUS_E_FAILURE;
1796 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1797 		goto done;
1798 	}
1799 
1800 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1801 			(uint8_t *) target_ce_config,
1802 			target_ce_config_sz);
1803 
1804 	if (rv != QDF_STATUS_SUCCESS) {
1805 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1806 		goto done;
1807 	}
1808 
1809 	rv = hif_diag_read_access(hif_hdl,
1810 			  pcie_state_targ_addr +
1811 			  offsetof(struct pcie_state_s,
1812 			   svc_to_pipe_map),
1813 			  &svc_to_pipe_map);
1814 	if (rv != QDF_STATUS_SUCCESS) {
1815 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1816 		goto done;
1817 	}
1818 	if (svc_to_pipe_map == 0) {
1819 		rv = QDF_STATUS_E_FAILURE;
1820 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1821 		goto done;
1822 	}
1823 
1824 	rv = hif_diag_write_mem(hif_hdl,
1825 			svc_to_pipe_map,
1826 			(uint8_t *) target_service_to_ce_map,
1827 			target_service_to_ce_map_sz);
1828 	if (rv != QDF_STATUS_SUCCESS) {
1829 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1830 		goto done;
1831 	}
1832 
1833 	rv = hif_diag_read_access(hif_hdl,
1834 			pcie_state_targ_addr +
1835 			offsetof(struct pcie_state_s,
1836 			config_flags),
1837 			&pcie_config_flags);
1838 	if (rv != QDF_STATUS_SUCCESS) {
1839 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1840 		goto done;
1841 	}
1842 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1843 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1844 #else
1845 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1846 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1847 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1848 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1849 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1850 #endif
1851 	rv = hif_diag_write_mem(hif_hdl,
1852 			pcie_state_targ_addr +
1853 			offsetof(struct pcie_state_s,
1854 			config_flags),
1855 			(uint8_t *) &pcie_config_flags,
1856 			sizeof(pcie_config_flags));
1857 	if (rv != QDF_STATUS_SUCCESS) {
1858 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1859 		goto done;
1860 	}
1861 
1862 #ifndef QCA_WIFI_3_0
1863 	/* configure early allocation */
1864 	ealloc_targ_addr = hif_hia_item_address(target_type,
1865 						offsetof(
1866 						struct host_interest_s,
1867 						hi_early_alloc));
1868 
1869 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1870 			&ealloc_value);
1871 	if (rv != QDF_STATUS_SUCCESS) {
1872 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1873 		goto done;
1874 	}
1875 
1876 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1877 	ealloc_value |=
1878 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1879 		 HI_EARLY_ALLOC_MAGIC_MASK);
1880 
1881 	rv = hif_diag_read_access(hif_hdl,
1882 			  CHIP_ID_ADDRESS |
1883 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1884 	if (rv != QDF_STATUS_SUCCESS) {
1885 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1886 		goto done;
1887 	}
1888 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1889 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1890 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1891 		case 0x2:       /* ROME 1.3 */
1892 			/* 2 banks are switched to IRAM */
1893 			banks_switched = 2;
1894 			break;
1895 		case 0x4:       /* ROME 2.1 */
1896 		case 0x5:       /* ROME 2.2 */
1897 			banks_switched = 6;
1898 			break;
1899 		case 0x8:       /* ROME 3.0 */
1900 		case 0x9:       /* ROME 3.1 */
1901 		case 0xA:       /* ROME 3.2 */
1902 			banks_switched = 9;
1903 			break;
1904 		case 0x0:       /* ROME 1.0 */
1905 		case 0x1:       /* ROME 1.1 */
1906 		default:
1907 			/* 3 banks are switched to IRAM */
1908 			banks_switched = 3;
1909 			break;
1910 		}
1911 	}
1912 
1913 	ealloc_value |=
1914 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1915 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1916 
1917 	rv = hif_diag_write_access(hif_hdl,
1918 				ealloc_targ_addr,
1919 				ealloc_value);
1920 	if (rv != QDF_STATUS_SUCCESS) {
1921 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1922 		goto done;
1923 	}
1924 #endif
1925 	if ((target_type == TARGET_TYPE_AR900B)
1926 			|| (target_type == TARGET_TYPE_QCA9984)
1927 			|| (target_type == TARGET_TYPE_QCA9888)
1928 			|| (target_type == TARGET_TYPE_AR9888)) {
1929 		hif_set_hia_extnd(scn);
1930 	}
1931 
1932 	/* Tell Target to proceed with initialization */
1933 	flag2_targ_addr = hif_hia_item_address(target_type,
1934 						offsetof(
1935 						struct host_interest_s,
1936 						hi_option_flag2));
1937 
1938 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1939 			  &flag2_value);
1940 	if (rv != QDF_STATUS_SUCCESS) {
1941 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1942 		goto done;
1943 	}
1944 
1945 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1946 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1947 			   flag2_value);
1948 	if (rv != QDF_STATUS_SUCCESS) {
1949 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1950 		goto done;
1951 	}
1952 
1953 	hif_wake_target_cpu(scn);
1954 
1955 done:
1956 
1957 	return rv;
1958 }
1959 
1960 /**
1961  * hif_bus_configure() - configure the pcie bus
1962  * @hif_sc: pointer to the hif context.
1963  *
1964  * return: 0 for success. nonzero for failure.
1965  */
1966 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1967 {
1968 	int status = 0;
1969 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1970 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1971 
1972 	hif_ce_prepare_config(hif_sc);
1973 
1974 	/* initialize sleep state adjust variables */
1975 	hif_state->sleep_timer_init = true;
1976 	hif_state->keep_awake_count = 0;
1977 	hif_state->fake_sleep = false;
1978 	hif_state->sleep_ticks = 0;
1979 
1980 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1981 			       hif_sleep_entry, (void *)hif_state,
1982 			       QDF_TIMER_TYPE_WAKE_APPS);
1983 	hif_state->sleep_timer_init = true;
1984 
1985 	status = hif_wlan_enable(hif_sc);
1986 	if (status) {
1987 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1988 			  __func__, status);
1989 		goto timer_free;
1990 	}
1991 
1992 	A_TARGET_ACCESS_LIKELY(hif_sc);
1993 
1994 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1995 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1996 	    !ce_srng_based(hif_sc)) {
1997 		/*
1998 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1999 		 * prevent sleep when we want to keep firmware always awake
2000 		 * note: when we want to keep firmware always awake,
2001 		 *       hif_target_sleep_state_adjust will point to a dummy
2002 		 *       function, and hif_pci_target_sleep_state_adjust must
2003 		 *       be called instead.
2004 		 * note: bus type check is here because AHB bus is reusing
2005 		 *       hif_pci_bus_configure code.
2006 		 */
2007 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
2008 			if (hif_pci_target_sleep_state_adjust(hif_sc,
2009 					false, true) < 0) {
2010 				status = -EACCES;
2011 				goto disable_wlan;
2012 			}
2013 		}
2014 	}
2015 
2016 	/* todo: consider replacing this with an srng field */
2017 	if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2018 			(hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
2019 		hif_sc->per_ce_irq = true;
2020 	}
2021 
2022 	status = hif_config_ce(hif_sc);
2023 	if (status)
2024 		goto disable_wlan;
2025 
2026 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
2027 	if (hif_needs_bmi(hif_osc)) {
2028 		status = hif_set_hia(hif_sc);
2029 		if (status)
2030 			goto unconfig_ce;
2031 
2032 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2033 
2034 		hif_register_bmi_callbacks(hif_sc);
2035 	}
2036 
2037 	if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2038 			(hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2039 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2040 						__func__);
2041 	else {
2042 		status = hif_configure_irq(hif_sc);
2043 		if (status < 0)
2044 			goto unconfig_ce;
2045 	}
2046 
2047 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2048 
2049 	return status;
2050 
2051 unconfig_ce:
2052 	hif_unconfig_ce(hif_sc);
2053 disable_wlan:
2054 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2055 	hif_wlan_disable(hif_sc);
2056 
2057 timer_free:
2058 	qdf_timer_stop(&hif_state->sleep_timer);
2059 	qdf_timer_free(&hif_state->sleep_timer);
2060 	hif_state->sleep_timer_init = false;
2061 
2062 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2063 	return status;
2064 }
2065 
2066 /**
2067  * hif_bus_close(): hif_bus_close
2068  *
2069  * Return: n/a
2070  */
2071 void hif_pci_close(struct hif_softc *hif_sc)
2072 {
2073 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2074 
2075 	hif_pm_runtime_close(hif_pci_sc);
2076 	hif_ce_close(hif_sc);
2077 }
2078 
2079 #define BAR_NUM 0
2080 
2081 #ifndef CONFIG_PLD_PCIE_INIT
2082 static int hif_enable_pci(struct hif_pci_softc *sc,
2083 			  struct pci_dev *pdev,
2084 			  const struct pci_device_id *id)
2085 {
2086 	void __iomem *mem;
2087 	int ret = 0;
2088 	uint16_t device_id = 0;
2089 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2090 
2091 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2092 	if (device_id != id->device)  {
2093 		HIF_ERROR(
2094 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2095 		   __func__, device_id, id->device);
2096 		/* pci link is down, so returing with error code */
2097 		return -EIO;
2098 	}
2099 
2100 	/* FIXME: temp. commenting out assign_resource
2101 	 * call for dev_attach to work on 2.6.38 kernel
2102 	 */
2103 #if (!defined(__LINUX_ARM_ARCH__))
2104 	if (pci_assign_resource(pdev, BAR_NUM)) {
2105 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2106 		return -EIO;
2107 	}
2108 #endif
2109 	if (pci_enable_device(pdev)) {
2110 		HIF_ERROR("%s: pci_enable_device error",
2111 			   __func__);
2112 		return -EIO;
2113 	}
2114 
2115 	/* Request MMIO resources */
2116 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2117 	if (ret) {
2118 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2119 		ret = -EIO;
2120 		goto err_region;
2121 	}
2122 
2123 #ifdef CONFIG_ARM_LPAE
2124 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2125 	 * for 32 bits device also.
2126 	 */
2127 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2128 	if (ret) {
2129 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2130 		goto err_dma;
2131 	}
2132 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2133 	if (ret) {
2134 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2135 		goto err_dma;
2136 	}
2137 #else
2138 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2139 	if (ret) {
2140 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2141 		goto err_dma;
2142 	}
2143 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2144 	if (ret) {
2145 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2146 			   __func__);
2147 		goto err_dma;
2148 	}
2149 #endif
2150 
2151 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2152 
2153 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2154 	pci_set_master(pdev);
2155 
2156 	/* Arrange for access to Target SoC registers. */
2157 	mem = pci_iomap(pdev, BAR_NUM, 0);
2158 	if (!mem) {
2159 		HIF_ERROR("%s: PCI iomap error", __func__);
2160 		ret = -EIO;
2161 		goto err_iomap;
2162 	}
2163 
2164 	pr_err("*****BAR is %pK\n", mem);
2165 
2166 	sc->mem = mem;
2167 
2168 	HIF_INFO("%s, mem after pci_iomap:%pK\n",
2169 	       __func__, sc->mem);
2170 
2171 	/* Hawkeye emulation specific change */
2172 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2173 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2174 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2175 		(device_id == RUMIM2M_DEVICE_ID_NODE3)) {
2176 		mem = mem + 0x0c000000;
2177 		sc->mem = mem;
2178 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2179 			__func__, sc->mem);
2180 	}
2181 
2182 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2183 	ol_sc->mem = mem;
2184 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2185 	sc->pci_enabled = true;
2186 	return ret;
2187 
2188 err_iomap:
2189 	pci_clear_master(pdev);
2190 err_dma:
2191 	pci_release_region(pdev, BAR_NUM);
2192 err_region:
2193 	pci_disable_device(pdev);
2194 	return ret;
2195 }
2196 #else
2197 static int hif_enable_pci(struct hif_pci_softc *sc,
2198 			  struct pci_dev *pdev,
2199 			  const struct pci_device_id *id)
2200 {
2201 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2202 	sc->pci_enabled = true;
2203 	return 0;
2204 }
2205 #endif
2206 
2207 
2208 #ifndef CONFIG_PLD_PCIE_INIT
2209 static inline void hif_pci_deinit(struct hif_pci_softc *sc)
2210 {
2211 	pci_iounmap(sc->pdev, sc->mem);
2212 	pci_clear_master(sc->pdev);
2213 	pci_release_region(sc->pdev, BAR_NUM);
2214 	pci_disable_device(sc->pdev);
2215 }
2216 #else
2217 static inline void hif_pci_deinit(struct hif_pci_softc *sc) {}
2218 #endif
2219 
2220 static void hif_disable_pci(struct hif_pci_softc *sc)
2221 {
2222 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2223 
2224 	if (ol_sc == NULL) {
2225 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2226 		return;
2227 	}
2228 	hif_pci_device_reset(sc);
2229 
2230 	hif_pci_deinit(sc);
2231 
2232 	sc->mem = NULL;
2233 	ol_sc->mem = NULL;
2234 }
2235 
2236 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2237 {
2238 	int ret = 0;
2239 	int targ_awake_limit = 500;
2240 #ifndef QCA_WIFI_3_0
2241 	uint32_t fw_indicator;
2242 #endif
2243 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2244 
2245 	/*
2246 	 * Verify that the Target was started cleanly.*
2247 	 * The case where this is most likely is with an AUX-powered
2248 	 * Target and a Host in WoW mode. If the Host crashes,
2249 	 * loses power, or is restarted (without unloading the driver)
2250 	 * then the Target is left (aux) powered and running.  On a
2251 	 * subsequent driver load, the Target is in an unexpected state.
2252 	 * We try to catch that here in order to reset the Target and
2253 	 * retry the probe.
2254 	 */
2255 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2256 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2257 	while (!hif_targ_is_awake(scn, sc->mem)) {
2258 		if (0 == targ_awake_limit) {
2259 			HIF_ERROR("%s: target awake timeout", __func__);
2260 			ret = -EAGAIN;
2261 			goto end;
2262 		}
2263 		qdf_mdelay(1);
2264 		targ_awake_limit--;
2265 	}
2266 
2267 #if PCIE_BAR0_READY_CHECKING
2268 	{
2269 		int wait_limit = 200;
2270 		/* Synchronization point: wait the BAR0 is configured */
2271 		while (wait_limit-- &&
2272 			   !(hif_read32_mb(sc, c->mem +
2273 					  PCIE_LOCAL_BASE_ADDRESS +
2274 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2275 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2276 			qdf_mdelay(10);
2277 		}
2278 		if (wait_limit < 0) {
2279 			/* AR6320v1 doesn't support checking of BAR0
2280 			 * configuration, takes one sec to wait BAR0 ready
2281 			 */
2282 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2283 				    __func__);
2284 		}
2285 	}
2286 #endif
2287 
2288 #ifndef QCA_WIFI_3_0
2289 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2290 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2291 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2292 
2293 	if (fw_indicator & FW_IND_INITIALIZED) {
2294 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2295 			   __func__);
2296 		ret = -EAGAIN;
2297 		goto end;
2298 	}
2299 #endif
2300 
2301 end:
2302 	return ret;
2303 }
2304 
2305 static void wlan_tasklet_msi(unsigned long data)
2306 {
2307 	struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
2308 	struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
2309 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2310 
2311 	if (scn->hif_init_done == false)
2312 		goto irq_handled;
2313 
2314 	if (qdf_atomic_read(&scn->link_suspended))
2315 		goto irq_handled;
2316 
2317 	qdf_atomic_inc(&scn->active_tasklet_cnt);
2318 
2319 	if (entry->id == HIF_MAX_TASKLET_NUM) {
2320 		/* the last tasklet is for fw IRQ */
2321 		(irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
2322 		if (scn->target_status == TARGET_STATUS_RESET)
2323 			goto irq_handled;
2324 	} else if (entry->id < scn->ce_count) {
2325 		ce_per_engine_service(scn, entry->id);
2326 	} else {
2327 		HIF_ERROR("%s: ERROR - invalid CE_id = %d",
2328 		       __func__, entry->id);
2329 	}
2330 	return;
2331 
2332 irq_handled:
2333 	qdf_atomic_dec(&scn->active_tasklet_cnt);
2334 
2335 }
2336 
2337 /* deprecated */
2338 static int hif_configure_msi(struct hif_pci_softc *sc)
2339 {
2340 	int ret = 0;
2341 	int num_msi_desired;
2342 	int rv = -1;
2343 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2344 
2345 	HIF_TRACE("%s: E", __func__);
2346 
2347 	num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
2348 	if (num_msi_desired < 1) {
2349 		HIF_ERROR("%s: MSI is not configured", __func__);
2350 		return -EINVAL;
2351 	}
2352 
2353 	if (num_msi_desired > 1) {
2354 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
2355 		rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
2356 						num_msi_desired);
2357 #else
2358 		rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
2359 #endif
2360 	}
2361 	HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
2362 		  __func__, num_msi_desired, rv);
2363 
2364 	if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
2365 		int i;
2366 
2367 		sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
2368 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
2369 			(void *)sc;
2370 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
2371 			HIF_MAX_TASKLET_NUM;
2372 		tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2373 			 (unsigned long)&sc->tasklet_entries[
2374 			 HIF_MAX_TASKLET_NUM-1]);
2375 		ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
2376 				  hif_pci_msi_fw_handler,
2377 				  IRQF_SHARED, "wlan_pci", sc);
2378 		if (ret) {
2379 			HIF_ERROR("%s: request_irq failed", __func__);
2380 			goto err_intr;
2381 		}
2382 		for (i = 0; i <= scn->ce_count; i++) {
2383 			sc->tasklet_entries[i].hif_handler = (void *)sc;
2384 			sc->tasklet_entries[i].id = i;
2385 			tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2386 				 (unsigned long)&sc->tasklet_entries[i]);
2387 			ret = request_irq((sc->pdev->irq +
2388 					   i + MSI_ASSIGN_CE_INITIAL),
2389 					  ce_per_engine_handler, IRQF_SHARED,
2390 					  "wlan_pci", sc);
2391 			if (ret) {
2392 				HIF_ERROR("%s: request_irq failed", __func__);
2393 				goto err_intr;
2394 			}
2395 		}
2396 	} else if (rv > 0) {
2397 		HIF_TRACE("%s: use single msi", __func__);
2398 
2399 		ret = pci_enable_msi(sc->pdev);
2400 		if (ret < 0) {
2401 			HIF_ERROR("%s: single MSI allocation failed",
2402 				  __func__);
2403 			/* Try for legacy PCI line interrupts */
2404 			sc->num_msi_intrs = 0;
2405 		} else {
2406 			sc->num_msi_intrs = 1;
2407 			tasklet_init(&sc->intr_tq,
2408 				wlan_tasklet, (unsigned long)sc);
2409 			ret = request_irq(sc->pdev->irq,
2410 					 hif_pci_legacy_ce_interrupt_handler,
2411 					  IRQF_SHARED, "wlan_pci", sc);
2412 			if (ret) {
2413 				HIF_ERROR("%s: request_irq failed", __func__);
2414 				goto err_intr;
2415 			}
2416 		}
2417 	} else {
2418 		sc->num_msi_intrs = 0;
2419 		ret = -EIO;
2420 		HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
2421 	}
2422 	ret = pci_enable_msi(sc->pdev);
2423 	if (ret < 0) {
2424 		HIF_ERROR("%s: single MSI interrupt allocation failed",
2425 			  __func__);
2426 		/* Try for legacy PCI line interrupts */
2427 		sc->num_msi_intrs = 0;
2428 	} else {
2429 		sc->num_msi_intrs = 1;
2430 		tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2431 		ret = request_irq(sc->pdev->irq,
2432 				  hif_pci_legacy_ce_interrupt_handler,
2433 				  IRQF_SHARED, "wlan_pci", sc);
2434 		if (ret) {
2435 			HIF_ERROR("%s: request_irq failed", __func__);
2436 			goto err_intr;
2437 		}
2438 	}
2439 
2440 	if (ret == 0) {
2441 		hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2442 			  PCIE_INTR_ENABLE_ADDRESS),
2443 			  HOST_GROUP0_MASK);
2444 		hif_write32_mb(sc, sc->mem +
2445 			  PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
2446 			  PCIE_SOC_WAKE_RESET);
2447 	}
2448 	HIF_TRACE("%s: X, ret = %d", __func__, ret);
2449 
2450 	return ret;
2451 
2452 err_intr:
2453 	if (sc->num_msi_intrs >= 1)
2454 		pci_disable_msi(sc->pdev);
2455 	return ret;
2456 }
2457 
2458 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2459 {
2460 	int ret = 0;
2461 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2462 	uint32_t target_type = scn->target_info.target_type;
2463 
2464 	HIF_TRACE("%s: E", __func__);
2465 
2466 	/* do notn support MSI or MSI IRQ failed */
2467 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2468 	ret = request_irq(sc->pdev->irq,
2469 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2470 			  "wlan_pci", sc);
2471 	if (ret) {
2472 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2473 		goto end;
2474 	}
2475 	scn->wake_irq = sc->pdev->irq;
2476 	/* Use sc->irq instead of sc->pdev-irq
2477 	 * platform_device pdev doesn't have an irq field
2478 	 */
2479 	sc->irq = sc->pdev->irq;
2480 	/* Use Legacy PCI Interrupts */
2481 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2482 		  PCIE_INTR_ENABLE_ADDRESS),
2483 		  HOST_GROUP0_MASK);
2484 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2485 			       PCIE_INTR_ENABLE_ADDRESS));
2486 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2487 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2488 
2489 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2490 			(target_type == TARGET_TYPE_AR900B)  ||
2491 			(target_type == TARGET_TYPE_QCA9984) ||
2492 			(target_type == TARGET_TYPE_AR9888) ||
2493 			(target_type == TARGET_TYPE_QCA9888) ||
2494 			(target_type == TARGET_TYPE_AR6320V1) ||
2495 			(target_type == TARGET_TYPE_AR6320V2) ||
2496 			(target_type == TARGET_TYPE_AR6320V3)) {
2497 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2498 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2499 	}
2500 end:
2501 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2502 			  "%s: X, ret = %d", __func__, ret);
2503 	return ret;
2504 }
2505 
2506 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2507 {
2508 	int ret;
2509 	int ce_id, irq;
2510 	uint32_t msi_data_start;
2511 	uint32_t msi_data_count;
2512 	uint32_t msi_irq_start;
2513 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2514 
2515 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2516 					    &msi_data_count, &msi_data_start,
2517 					    &msi_irq_start);
2518 	if (ret)
2519 		return ret;
2520 
2521 	/* needs to match the ce_id -> irq data mapping
2522 	 * used in the srng parameter configuration
2523 	 */
2524 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2525 		unsigned int msi_data;
2526 
2527 		if (!ce_sc->tasklets[ce_id].inited)
2528 			continue;
2529 
2530 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2531 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2532 
2533 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2534 			  ce_id, msi_data, irq);
2535 
2536 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2537 	}
2538 
2539 	return ret;
2540 }
2541 
2542 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2543 {
2544 	int i, j, irq;
2545 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2546 	struct hif_exec_context *hif_ext_group;
2547 
2548 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2549 		hif_ext_group = hif_state->hif_ext_group[i];
2550 		if (hif_ext_group->irq_requested) {
2551 			hif_ext_group->irq_requested = false;
2552 			for (j = 0; j < hif_ext_group->numirq; j++) {
2553 				irq = hif_ext_group->os_irq[j];
2554 				free_irq(irq, hif_ext_group);
2555 			}
2556 			hif_ext_group->numirq = 0;
2557 		}
2558 	}
2559 }
2560 
2561 /**
2562  * hif_nointrs(): disable IRQ
2563  *
2564  * This function stops interrupt(s)
2565  *
2566  * @scn: struct hif_softc
2567  *
2568  * Return: none
2569  */
2570 void hif_pci_nointrs(struct hif_softc *scn)
2571 {
2572 	int i, ret;
2573 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2574 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2575 
2576 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2577 
2578 	if (scn->request_irq_done == false)
2579 		return;
2580 
2581 	hif_pci_deconfigure_grp_irq(scn);
2582 
2583 	ret = hif_ce_srng_msi_free_irq(scn);
2584 	if (ret != -EINVAL) {
2585 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2586 
2587 		if (scn->wake_irq)
2588 			free_irq(scn->wake_irq, scn);
2589 		scn->wake_irq = 0;
2590 	} else if (sc->num_msi_intrs > 0) {
2591 		/* MSI interrupt(s) */
2592 		for (i = 0; i < sc->num_msi_intrs; i++)
2593 			free_irq(sc->irq + i, sc);
2594 		sc->num_msi_intrs = 0;
2595 	} else {
2596 		/* Legacy PCI line interrupt
2597 		 * Use sc->irq instead of sc->pdev-irq
2598 		 * platform_device pdev doesn't have an irq field
2599 		 */
2600 		free_irq(sc->irq, sc);
2601 	}
2602 	scn->request_irq_done = false;
2603 }
2604 
2605 /**
2606  * hif_disable_bus(): hif_disable_bus
2607  *
2608  * This function disables the bus
2609  *
2610  * @bdev: bus dev
2611  *
2612  * Return: none
2613  */
2614 void hif_pci_disable_bus(struct hif_softc *scn)
2615 {
2616 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2617 	struct pci_dev *pdev;
2618 	void __iomem *mem;
2619 	struct hif_target_info *tgt_info = &scn->target_info;
2620 
2621 	/* Attach did not succeed, all resources have been
2622 	 * freed in error handler
2623 	 */
2624 	if (!sc)
2625 		return;
2626 
2627 	pdev = sc->pdev;
2628 	if (ADRASTEA_BU) {
2629 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2630 
2631 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2632 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2633 			       HOST_GROUP0_MASK);
2634 	}
2635 
2636 #if defined(CPU_WARM_RESET_WAR)
2637 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2638 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2639 	 * verified for AR9888_REV1
2640 	 */
2641 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2642 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2643 		hif_pci_device_warm_reset(sc);
2644 	else
2645 		hif_pci_device_reset(sc);
2646 #else
2647 	hif_pci_device_reset(sc);
2648 #endif
2649 	mem = (void __iomem *)sc->mem;
2650 	if (mem) {
2651 #ifndef CONFIG_PLD_PCIE_INIT
2652 		pci_disable_msi(pdev);
2653 #endif
2654 		hif_dump_pipe_debug_count(scn);
2655 		if (scn->athdiag_procfs_inited) {
2656 			athdiag_procfs_remove();
2657 			scn->athdiag_procfs_inited = false;
2658 		}
2659 		hif_pci_deinit(sc);
2660 		scn->mem = NULL;
2661 	}
2662 	HIF_INFO("%s: X", __func__);
2663 }
2664 
2665 #define OL_ATH_PCI_PM_CONTROL 0x44
2666 
2667 #ifdef FEATURE_RUNTIME_PM
2668 /**
2669  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2670  * @scn: hif context
2671  * @flag: prevent linkdown if true otherwise allow
2672  *
2673  * this api should only be called as part of bus prevent linkdown
2674  */
2675 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2676 {
2677 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2678 
2679 	if (flag)
2680 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2681 	else
2682 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2683 }
2684 #else
2685 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2686 {
2687 }
2688 #endif
2689 
2690 #if defined(CONFIG_PCI_MSM)
2691 /**
2692  * hif_bus_prevent_linkdown(): allow or permit linkdown
2693  * @flag: true prevents linkdown, false allows
2694  *
2695  * Calls into the platform driver to vote against taking down the
2696  * pcie link.
2697  *
2698  * Return: n/a
2699  */
2700 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2701 {
2702 	int errno;
2703 
2704 	HIF_DBG("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2705 	hif_runtime_prevent_linkdown(scn, flag);
2706 
2707 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2708 	if (errno)
2709 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2710 			  __func__, errno);
2711 }
2712 #else
2713 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2714 {
2715 	HIF_DBG("wlan: %s pcie power collapse",
2716 			(flag ? "disable" : "enable"));
2717 	hif_runtime_prevent_linkdown(scn, flag);
2718 }
2719 #endif
2720 
2721 static int hif_mark_wake_irq_wakeable(struct hif_softc *scn)
2722 {
2723 	int errno;
2724 
2725 	errno = enable_irq_wake(scn->wake_irq);
2726 	if (errno) {
2727 		HIF_ERROR("%s: Failed to mark wake IRQ: %d", __func__, errno);
2728 		return errno;
2729 	}
2730 
2731 	return 0;
2732 }
2733 
2734 /**
2735  * hif_pci_bus_suspend(): prepare hif for suspend
2736  *
2737  * Enables pci bus wake irq based on link suspend voting.
2738  *
2739  * Return: 0 for success and non-zero error code for failure
2740  */
2741 int hif_pci_bus_suspend(struct hif_softc *scn)
2742 {
2743 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2744 		return 0;
2745 
2746 	/* pci link is staying up; enable wake irq */
2747 	return hif_mark_wake_irq_wakeable(scn);
2748 }
2749 
2750 /**
2751  * __hif_check_link_status() - API to check if PCIe link is active/not
2752  * @scn: HIF Context
2753  *
2754  * API reads the PCIe config space to verify if PCIe link training is
2755  * successful or not.
2756  *
2757  * Return: Success/Failure
2758  */
2759 static int __hif_check_link_status(struct hif_softc *scn)
2760 {
2761 	uint16_t dev_id = 0;
2762 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2763 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2764 
2765 	if (!sc) {
2766 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2767 		return -EINVAL;
2768 	}
2769 
2770 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2771 
2772 	if (dev_id == sc->devid)
2773 		return 0;
2774 
2775 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2776 	       __func__, dev_id);
2777 
2778 	scn->recovery = true;
2779 
2780 	if (cbk && cbk->set_recovery_in_progress)
2781 		cbk->set_recovery_in_progress(cbk->context, true);
2782 	else
2783 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2784 
2785 	pld_is_pci_link_down(sc->dev);
2786 	return -EACCES;
2787 }
2788 
2789 static int hif_unmark_wake_irq_wakeable(struct hif_softc *scn)
2790 {
2791 	int errno;
2792 
2793 	errno = disable_irq_wake(scn->wake_irq);
2794 	if (errno) {
2795 		HIF_ERROR("%s: Failed to unmark wake IRQ: %d", __func__, errno);
2796 		return errno;
2797 	}
2798 
2799 	return 0;
2800 }
2801 
2802 /**
2803  * hif_pci_bus_resume(): prepare hif for resume
2804  *
2805  * Disables pci bus wake irq based on link suspend voting.
2806  *
2807  * Return: 0 for success and non-zero error code for failure
2808  */
2809 int hif_pci_bus_resume(struct hif_softc *scn)
2810 {
2811 	int ret;
2812 
2813 	ret = __hif_check_link_status(scn);
2814 	if (ret)
2815 		return ret;
2816 
2817 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2818 		return 0;
2819 
2820 	/* pci link is up; disable wake irq */
2821 	return hif_unmark_wake_irq_wakeable(scn);
2822 }
2823 
2824 /**
2825  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2826  * @scn: hif context
2827  *
2828  * Ensure that if we received the wakeup message before the irq
2829  * was disabled that the message is pocessed before suspending.
2830  *
2831  * Return: -EBUSY if we fail to flush the tasklets.
2832  */
2833 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2834 {
2835 	if (hif_drain_tasklets(scn) != 0)
2836 		return -EBUSY;
2837 
2838 	/* Stop the HIF Sleep Timer */
2839 	hif_cancel_deferred_target_sleep(scn);
2840 
2841 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2842 		qdf_atomic_set(&scn->link_suspended, 1);
2843 
2844 	return 0;
2845 }
2846 
2847 /**
2848  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2849  * @scn: hif context
2850  *
2851  * Ensure that if we received the wakeup message before the irq
2852  * was disabled that the message is pocessed before suspending.
2853  *
2854  * Return: -EBUSY if we fail to flush the tasklets.
2855  */
2856 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2857 {
2858 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2859 		qdf_atomic_set(&scn->link_suspended, 0);
2860 
2861 	return 0;
2862 }
2863 
2864 #ifdef FEATURE_RUNTIME_PM
2865 /**
2866  * __hif_runtime_pm_set_state(): utility function
2867  * @state: state to set
2868  *
2869  * indexes into the runtime pm state and sets it.
2870  */
2871 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2872 				enum hif_pm_runtime_state state)
2873 {
2874 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2875 
2876 	if (NULL == sc) {
2877 		HIF_ERROR("%s: HIF_CTX not initialized",
2878 		       __func__);
2879 		return;
2880 	}
2881 
2882 	qdf_atomic_set(&sc->pm_state, state);
2883 }
2884 
2885 /**
2886  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2887  *
2888  * Notify hif that a runtime pm opperation has started
2889  */
2890 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2891 {
2892 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2893 }
2894 
2895 /**
2896  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2897  *
2898  * Notify hif that a the runtime pm state should be on
2899  */
2900 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2901 {
2902 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2903 }
2904 
2905 /**
2906  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2907  *
2908  * Notify hif that a runtime suspend attempt has been completed successfully
2909  */
2910 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2911 {
2912 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2913 }
2914 
2915 /**
2916  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2917  */
2918 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2919 {
2920 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2921 
2922 	if (sc == NULL)
2923 		return;
2924 
2925 	sc->pm_stats.suspended++;
2926 	sc->pm_stats.suspend_jiffies = jiffies;
2927 }
2928 
2929 /**
2930  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2931  *
2932  * log a failed runtime suspend
2933  * mark last busy to prevent immediate runtime suspend
2934  */
2935 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2936 {
2937 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2938 
2939 	if (sc == NULL)
2940 		return;
2941 
2942 	sc->pm_stats.suspend_err++;
2943 }
2944 
2945 /**
2946  * hif_log_runtime_resume_success() - log a successful runtime resume
2947  *
2948  * log a successful runtime resume
2949  * mark last busy to prevent immediate runtime suspend
2950  */
2951 static void hif_log_runtime_resume_success(void *hif_ctx)
2952 {
2953 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2954 
2955 	if (sc == NULL)
2956 		return;
2957 
2958 	sc->pm_stats.resumed++;
2959 }
2960 
2961 /**
2962  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2963  *
2964  * Record the failure.
2965  * mark last busy to delay a retry.
2966  * adjust the runtime_pm state.
2967  */
2968 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2969 {
2970 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2971 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2972 
2973 	hif_log_runtime_suspend_failure(hif_ctx);
2974 	if (hif_pci_sc != NULL)
2975 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2976 	hif_runtime_pm_set_state_on(scn);
2977 }
2978 
2979 /**
2980  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2981  *
2982  * Makes sure that the pci link will be taken down by the suspend opperation.
2983  * If the hif layer is configured to leave the bus on, runtime suspend will
2984  * not save any power.
2985  *
2986  * Set the runtime suspend state to in progress.
2987  *
2988  * return -EINVAL if the bus won't go down.  otherwise return 0
2989  */
2990 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2991 {
2992 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2993 
2994 	if (!hif_can_suspend_link(hif_ctx)) {
2995 		HIF_ERROR("Runtime PM not supported for link up suspend");
2996 		return -EINVAL;
2997 	}
2998 
2999 	hif_runtime_pm_set_state_inprogress(scn);
3000 	return 0;
3001 }
3002 
3003 /**
3004  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
3005  *
3006  * Record the success.
3007  * adjust the runtime_pm state
3008  */
3009 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
3010 {
3011 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3012 
3013 	hif_runtime_pm_set_state_suspended(scn);
3014 	hif_log_runtime_suspend_success(scn);
3015 }
3016 
3017 /**
3018  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
3019  *
3020  * update the runtime pm state.
3021  */
3022 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
3023 {
3024 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3025 
3026 	hif_runtime_pm_set_state_inprogress(scn);
3027 }
3028 
3029 /**
3030  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
3031  *
3032  * record the success.
3033  * adjust the runtime_pm state
3034  */
3035 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
3036 {
3037 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
3038 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3039 
3040 	hif_log_runtime_resume_success(hif_ctx);
3041 	if (hif_pci_sc != NULL)
3042 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
3043 	hif_runtime_pm_set_state_on(scn);
3044 }
3045 
3046 /**
3047  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
3048  *
3049  * Return: 0 for success and non-zero error code for failure
3050  */
3051 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
3052 {
3053 	int errno;
3054 
3055 	errno = hif_bus_suspend(hif_ctx);
3056 	if (errno) {
3057 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
3058 		return errno;
3059 	}
3060 
3061 	errno = hif_apps_irqs_disable(hif_ctx);
3062 	if (errno) {
3063 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
3064 		goto bus_resume;
3065 	}
3066 
3067 	errno = hif_bus_suspend_noirq(hif_ctx);
3068 	if (errno) {
3069 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
3070 		goto irqs_enable;
3071 	}
3072 
3073 	/* link should always be down; skip enable wake irq */
3074 
3075 	return 0;
3076 
3077 irqs_enable:
3078 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3079 
3080 bus_resume:
3081 	QDF_BUG(!hif_bus_resume(hif_ctx));
3082 
3083 	return errno;
3084 }
3085 
3086 /**
3087  * hif_fastpath_resume() - resume fastpath for runtimepm
3088  *
3089  * ensure that the fastpath write index register is up to date
3090  * since runtime pm may cause ce_send_fast to skip the register
3091  * write.
3092  *
3093  * fastpath only applicable to legacy copy engine
3094  */
3095 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
3096 {
3097 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3098 	struct CE_state *ce_state;
3099 
3100 	if (!scn)
3101 		return;
3102 
3103 	if (scn->fastpath_mode_on) {
3104 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3105 			return;
3106 
3107 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
3108 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
3109 
3110 		/*war_ce_src_ring_write_idx_set */
3111 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
3112 				ce_state->src_ring->write_index);
3113 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
3114 		Q_TARGET_ACCESS_END(scn);
3115 	}
3116 }
3117 
3118 /**
3119  * hif_runtime_resume() - do the bus resume part of a runtime resume
3120  *
3121  *  Return: 0 for success and non-zero error code for failure
3122  */
3123 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
3124 {
3125 	/* link should always be down; skip disable wake irq */
3126 
3127 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
3128 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3129 	QDF_BUG(!hif_bus_resume(hif_ctx));
3130 	return 0;
3131 }
3132 #endif /* #ifdef FEATURE_RUNTIME_PM */
3133 
3134 #if CONFIG_PCIE_64BIT_MSI
3135 static void hif_free_msi_ctx(struct hif_softc *scn)
3136 {
3137 	struct hif_pci_softc *sc = scn->hif_sc;
3138 	struct hif_msi_info *info = &sc->msi_info;
3139 	struct device *dev = scn->qdf_dev->dev;
3140 
3141 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
3142 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
3143 	info->magic = NULL;
3144 	info->magic_dma = 0;
3145 }
3146 #else
3147 static void hif_free_msi_ctx(struct hif_softc *scn)
3148 {
3149 }
3150 #endif
3151 
3152 void hif_pci_disable_isr(struct hif_softc *scn)
3153 {
3154 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3155 
3156 	hif_exec_kill(&scn->osc);
3157 	hif_nointrs(scn);
3158 	hif_free_msi_ctx(scn);
3159 	/* Cancel the pending tasklet */
3160 	ce_tasklet_kill(scn);
3161 	tasklet_kill(&sc->intr_tq);
3162 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
3163 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
3164 }
3165 
3166 /* Function to reset SoC */
3167 void hif_pci_reset_soc(struct hif_softc *hif_sc)
3168 {
3169 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
3170 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
3171 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
3172 
3173 #if defined(CPU_WARM_RESET_WAR)
3174 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
3175 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
3176 	 * verified for AR9888_REV1
3177 	 */
3178 	if (tgt_info->target_version == AR9888_REV2_VERSION)
3179 		hif_pci_device_warm_reset(sc);
3180 	else
3181 		hif_pci_device_reset(sc);
3182 #else
3183 	hif_pci_device_reset(sc);
3184 #endif
3185 }
3186 
3187 #ifdef CONFIG_PCI_MSM
3188 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
3189 {
3190 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
3191 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
3192 }
3193 #else
3194 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3195 #endif
3196 
3197 /**
3198  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3199  * @sc: HIF PCIe Context
3200  *
3201  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3202  *
3203  * Return: Failure to caller
3204  */
3205 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3206 {
3207 	uint16_t val = 0;
3208 	uint32_t bar = 0;
3209 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3210 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3211 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3212 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
3213 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
3214 	A_target_id_t pci_addr = scn->mem;
3215 
3216 	HIF_ERROR("%s: keep_awake_count = %d",
3217 			__func__, hif_state->keep_awake_count);
3218 
3219 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3220 
3221 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3222 
3223 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3224 
3225 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3226 
3227 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3228 
3229 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3230 
3231 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3232 
3233 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3234 
3235 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3236 
3237 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3238 
3239 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3240 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3241 						PCIE_SOC_WAKE_ADDRESS));
3242 
3243 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3244 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3245 							RTC_STATE_ADDRESS));
3246 
3247 	HIF_ERROR("%s:error, wakeup target", __func__);
3248 	hif_msm_pcie_debug_info(sc);
3249 
3250 	if (!cfg->enable_self_recovery)
3251 		QDF_BUG(0);
3252 
3253 	scn->recovery = true;
3254 
3255 	if (cbk->set_recovery_in_progress)
3256 		cbk->set_recovery_in_progress(cbk->context, true);
3257 
3258 	pld_is_pci_link_down(sc->dev);
3259 	return -EACCES;
3260 }
3261 
3262 /*
3263  * For now, we use simple on-demand sleep/wake.
3264  * Some possible improvements:
3265  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3266  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3267  *   Careful, though, these functions may be used by
3268  *  interrupt handlers ("atomic")
3269  *  -Don't use host_reg_table for this code; instead use values directly
3270  *  -Use a separate timer to track activity and allow Target to sleep only
3271  *   if it hasn't done anything for a while; may even want to delay some
3272  *   processing for a short while in order to "batch" (e.g.) transmit
3273  *   requests with completion processing into "windows of up time".  Costs
3274  *   some performance, but improves power utilization.
3275  *  -On some platforms, it might be possible to eliminate explicit
3276  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3277  *   recover from the failure by forcing the Target awake.
3278  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3279  *   overhead in some cases. Perhaps this makes more sense when
3280  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3281  *   disabled.
3282  *  -It is possible to compile this code out and simply force the Target
3283  *   to remain awake.  That would yield optimal performance at the cost of
3284  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3285  *
3286  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3287  */
3288 /**
3289  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3290  * @scn: hif_softc pointer.
3291  * @sleep_ok: bool
3292  * @wait_for_it: bool
3293  *
3294  * Output the pipe error counts of each pipe to log file
3295  *
3296  * Return: int
3297  */
3298 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3299 			      bool sleep_ok, bool wait_for_it)
3300 {
3301 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3302 	A_target_id_t pci_addr = scn->mem;
3303 	static int max_delay;
3304 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3305 	static int debug;
3306 	if (scn->recovery)
3307 		return -EACCES;
3308 
3309 	if (qdf_atomic_read(&scn->link_suspended)) {
3310 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3311 		debug = true;
3312 		QDF_ASSERT(0);
3313 		return -EACCES;
3314 	}
3315 
3316 	if (debug) {
3317 		wait_for_it = true;
3318 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3319 				__func__);
3320 		QDF_ASSERT(0);
3321 	}
3322 
3323 	if (sleep_ok) {
3324 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3325 		hif_state->keep_awake_count--;
3326 		if (hif_state->keep_awake_count == 0) {
3327 			/* Allow sleep */
3328 			hif_state->verified_awake = false;
3329 			hif_state->sleep_ticks = qdf_system_ticks();
3330 		}
3331 		if (hif_state->fake_sleep == false) {
3332 			/* Set the Fake Sleep */
3333 			hif_state->fake_sleep = true;
3334 
3335 			/* Start the Sleep Timer */
3336 			qdf_timer_stop(&hif_state->sleep_timer);
3337 			qdf_timer_start(&hif_state->sleep_timer,
3338 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3339 		}
3340 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3341 	} else {
3342 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3343 
3344 		if (hif_state->fake_sleep) {
3345 			hif_state->verified_awake = true;
3346 		} else {
3347 			if (hif_state->keep_awake_count == 0) {
3348 				/* Force AWAKE */
3349 				hif_write32_mb(sc, pci_addr +
3350 					      PCIE_LOCAL_BASE_ADDRESS +
3351 					      PCIE_SOC_WAKE_ADDRESS,
3352 					      PCIE_SOC_WAKE_V_MASK);
3353 			}
3354 		}
3355 		hif_state->keep_awake_count++;
3356 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3357 
3358 		if (wait_for_it && !hif_state->verified_awake) {
3359 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3360 			int tot_delay = 0;
3361 			int curr_delay = 5;
3362 
3363 			for (;; ) {
3364 				if (hif_targ_is_awake(scn, pci_addr)) {
3365 					hif_state->verified_awake = true;
3366 					break;
3367 				}
3368 				if (!hif_pci_targ_is_present(scn, pci_addr))
3369 					break;
3370 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3371 					return hif_log_soc_wakeup_timeout(sc);
3372 
3373 				OS_DELAY(curr_delay);
3374 				tot_delay += curr_delay;
3375 
3376 				if (curr_delay < 50)
3377 					curr_delay += 5;
3378 			}
3379 
3380 			/*
3381 			 * NB: If Target has to come out of Deep Sleep,
3382 			 * this may take a few Msecs. Typically, though
3383 			 * this delay should be <30us.
3384 			 */
3385 			if (tot_delay > max_delay)
3386 				max_delay = tot_delay;
3387 		}
3388 	}
3389 
3390 	if (debug && hif_state->verified_awake) {
3391 		debug = 0;
3392 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3393 			__func__,
3394 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3395 				PCIE_INTR_ENABLE_ADDRESS),
3396 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3397 				PCIE_INTR_CAUSE_ADDRESS),
3398 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3399 				CPU_INTR_ADDRESS),
3400 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3401 				PCIE_INTR_CLR_ADDRESS),
3402 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3403 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3404 	}
3405 
3406 	return 0;
3407 }
3408 
3409 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3410 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3411 {
3412 	uint32_t value;
3413 	void *addr;
3414 
3415 	addr = scn->mem + offset;
3416 	value = hif_read32_mb(scn, addr);
3417 
3418 	{
3419 		unsigned long irq_flags;
3420 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3421 
3422 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3423 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3424 		pcie_access_log[idx].is_write = false;
3425 		pcie_access_log[idx].addr = addr;
3426 		pcie_access_log[idx].value = value;
3427 		pcie_access_log_seqnum++;
3428 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3429 	}
3430 
3431 	return value;
3432 }
3433 
3434 void
3435 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3436 {
3437 	void *addr;
3438 
3439 	addr = scn->mem + (offset);
3440 	hif_write32_mb(scn, addr, value);
3441 
3442 	{
3443 		unsigned long irq_flags;
3444 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3445 
3446 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3447 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3448 		pcie_access_log[idx].is_write = true;
3449 		pcie_access_log[idx].addr = addr;
3450 		pcie_access_log[idx].value = value;
3451 		pcie_access_log_seqnum++;
3452 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3453 	}
3454 }
3455 
3456 /**
3457  * hif_target_dump_access_log() - dump access log
3458  *
3459  * dump access log
3460  *
3461  * Return: n/a
3462  */
3463 void hif_target_dump_access_log(void)
3464 {
3465 	int idx, len, start_idx, cur_idx;
3466 	unsigned long irq_flags;
3467 
3468 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3469 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3470 		len = PCIE_ACCESS_LOG_NUM;
3471 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3472 	} else {
3473 		len = pcie_access_log_seqnum;
3474 		start_idx = 0;
3475 	}
3476 
3477 	for (idx = 0; idx < len; idx++) {
3478 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3479 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3480 		       __func__, idx,
3481 		       pcie_access_log[cur_idx].seqnum,
3482 		       pcie_access_log[cur_idx].is_write,
3483 		       pcie_access_log[cur_idx].addr,
3484 		       pcie_access_log[cur_idx].value);
3485 	}
3486 
3487 	pcie_access_log_seqnum = 0;
3488 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3489 }
3490 #endif
3491 
3492 #ifndef HIF_AHB
3493 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3494 {
3495 	QDF_BUG(0);
3496 	return -EINVAL;
3497 }
3498 
3499 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3500 {
3501 	QDF_BUG(0);
3502 	return -EINVAL;
3503 }
3504 #endif
3505 
3506 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3507 {
3508 	struct ce_tasklet_entry *tasklet_entry = context;
3509 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3510 }
3511 extern const char *ce_name[];
3512 
3513 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3514 {
3515 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3516 
3517 	return pci_scn->ce_msi_irq_num[ce_id];
3518 }
3519 
3520 /* hif_srng_msi_irq_disable() - disable the irq for msi
3521  * @hif_sc: hif context
3522  * @ce_id: which ce to disable copy complete interrupts for
3523  *
3524  * since MSI interrupts are not level based, the system can function
3525  * without disabling these interrupts.  Interrupt mitigation can be
3526  * added here for better system performance.
3527  */
3528 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3529 {
3530 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3531 }
3532 
3533 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3534 {
3535 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3536 }
3537 
3538 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3539 {}
3540 
3541 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3542 {}
3543 
3544 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3545 {
3546 	int ret;
3547 	int ce_id, irq;
3548 	uint32_t msi_data_start;
3549 	uint32_t msi_data_count;
3550 	uint32_t msi_irq_start;
3551 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3552 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3553 
3554 	/* do wake irq assignment */
3555 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3556 					  &msi_data_count, &msi_data_start,
3557 					  &msi_irq_start);
3558 	if (ret)
3559 		return ret;
3560 
3561 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3562 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3563 			  "wlan_wake_irq", scn);
3564 	if (ret)
3565 		return ret;
3566 
3567 	/* do ce irq assignments */
3568 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3569 					    &msi_data_count, &msi_data_start,
3570 					    &msi_irq_start);
3571 	if (ret)
3572 		goto free_wake_irq;
3573 
3574 	if (ce_srng_based(scn)) {
3575 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3576 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3577 	} else {
3578 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3579 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3580 	}
3581 
3582 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3583 
3584 	/* needs to match the ce_id -> irq data mapping
3585 	 * used in the srng parameter configuration
3586 	 */
3587 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3588 		unsigned int msi_data = (ce_id % msi_data_count) +
3589 			msi_irq_start;
3590 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3591 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3592 			 __func__, ce_id, msi_data, irq,
3593 			 &ce_sc->tasklets[ce_id]);
3594 
3595 		/* implies the ce is also initialized */
3596 		if (!ce_sc->tasklets[ce_id].inited)
3597 			continue;
3598 
3599 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3600 		ret = request_irq(irq, hif_ce_interrupt_handler,
3601 				  IRQF_SHARED,
3602 				  ce_name[ce_id],
3603 				  &ce_sc->tasklets[ce_id]);
3604 		if (ret)
3605 			goto free_irq;
3606 	}
3607 
3608 	return ret;
3609 
3610 free_irq:
3611 	/* the request_irq for the last ce_id failed so skip it. */
3612 	while (ce_id > 0 && ce_id < scn->ce_count) {
3613 		unsigned int msi_data;
3614 
3615 		ce_id--;
3616 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3617 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3618 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3619 	}
3620 
3621 free_wake_irq:
3622 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3623 	scn->wake_irq = 0;
3624 
3625 	return ret;
3626 }
3627 
3628 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3629 {
3630 	int i;
3631 
3632 	for (i = 0; i < hif_ext_group->numirq; i++)
3633 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3634 }
3635 
3636 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3637 {
3638 	int i;
3639 
3640 	for (i = 0; i < hif_ext_group->numirq; i++)
3641 		enable_irq(hif_ext_group->os_irq[i]);
3642 }
3643 
3644 
3645 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3646 			      struct hif_exec_context *hif_ext_group)
3647 {
3648 	int ret = 0;
3649 	int irq = 0;
3650 	int j;
3651 
3652 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3653 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3654 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3655 
3656 	for (j = 0; j < hif_ext_group->numirq; j++) {
3657 		irq = hif_ext_group->irq[j];
3658 
3659 		HIF_DBG("%s: request_irq = %d for grp %d",
3660 			  __func__, irq, hif_ext_group->grp_id);
3661 		ret = request_irq(irq,
3662 				  hif_ext_group_interrupt_handler,
3663 				  IRQF_SHARED, "wlan_EXT_GRP",
3664 				  hif_ext_group);
3665 		if (ret) {
3666 			HIF_ERROR("%s: request_irq failed ret = %d",
3667 				  __func__, ret);
3668 			return -EFAULT;
3669 		}
3670 		hif_ext_group->os_irq[j] = irq;
3671 	}
3672 	hif_ext_group->irq_requested = true;
3673 	return 0;
3674 }
3675 
3676 /**
3677  * hif_configure_irq() - configure interrupt
3678  *
3679  * This function configures interrupt(s)
3680  *
3681  * @sc: PCIe control struct
3682  * @hif_hdl: struct HIF_CE_state
3683  *
3684  * Return: 0 - for success
3685  */
3686 int hif_configure_irq(struct hif_softc *scn)
3687 {
3688 	int ret = 0;
3689 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3690 
3691 	HIF_TRACE("%s: E", __func__);
3692 	if (scn->polled_mode_on) {
3693 		scn->request_irq_done = false;
3694 		return 0;
3695 	}
3696 
3697 	hif_init_reschedule_tasklet_work(sc);
3698 
3699 	ret = hif_ce_msi_configure_irq(scn);
3700 	if (ret == 0) {
3701 		goto end;
3702 	}
3703 
3704 	if (ENABLE_MSI) {
3705 		ret = hif_configure_msi(sc);
3706 		if (ret == 0)
3707 			goto end;
3708 	}
3709 	/* MSI failed. Try legacy irq */
3710 	switch (scn->target_info.target_type) {
3711 	case TARGET_TYPE_IPQ4019:
3712 		ret = hif_ahb_configure_legacy_irq(sc);
3713 		break;
3714 	case TARGET_TYPE_QCA8074:
3715 		ret = hif_ahb_configure_irq(sc);
3716 		break;
3717 	default:
3718 		ret = hif_pci_configure_legacy_irq(sc);
3719 		break;
3720 	}
3721 	if (ret < 0) {
3722 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3723 			__func__, ret);
3724 		return ret;
3725 	}
3726 end:
3727 	scn->request_irq_done = true;
3728 	return 0;
3729 }
3730 
3731 /**
3732  * hif_target_sync() : ensure the target is ready
3733  * @scn: hif control structure
3734  *
3735  * Informs fw that we plan to use legacy interupts so that
3736  * it can begin booting. Ensures that the fw finishes booting
3737  * before continuing. Should be called before trying to write
3738  * to the targets other registers for the first time.
3739  *
3740  * Return: none
3741  */
3742 static void hif_target_sync(struct hif_softc *scn)
3743 {
3744 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3745 				PCIE_INTR_ENABLE_ADDRESS),
3746 				PCIE_INTR_FIRMWARE_MASK);
3747 
3748 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3749 			PCIE_SOC_WAKE_ADDRESS,
3750 			PCIE_SOC_WAKE_V_MASK);
3751 	while (!hif_targ_is_awake(scn, scn->mem))
3752 		;
3753 
3754 	if (HAS_FW_INDICATOR) {
3755 		int wait_limit = 500;
3756 		int fw_ind = 0;
3757 
3758 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3759 		while (1) {
3760 			fw_ind = hif_read32_mb(scn, scn->mem +
3761 					FW_INDICATOR_ADDRESS);
3762 			if (fw_ind & FW_IND_INITIALIZED)
3763 				break;
3764 			if (wait_limit-- < 0)
3765 				break;
3766 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3767 				PCIE_INTR_ENABLE_ADDRESS),
3768 				PCIE_INTR_FIRMWARE_MASK);
3769 
3770 			qdf_mdelay(10);
3771 		}
3772 		if (wait_limit < 0)
3773 			HIF_TRACE("%s: FW signal timed out",
3774 					__func__);
3775 		else
3776 			HIF_TRACE("%s: Got FW signal, retries = %x",
3777 					__func__, 500-wait_limit);
3778 	}
3779 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3780 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3781 }
3782 
3783 #ifdef CONFIG_PLD_PCIE_INIT
3784 static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3785 {
3786 	struct pld_soc_info info;
3787 
3788 	pld_get_soc_info(dev, &info);
3789 	sc->mem = info.v_addr;
3790 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3791 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3792 }
3793 #else
3794 static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3795 {}
3796 #endif
3797 
3798 #ifdef HIF_REG_WINDOW_SUPPORT
3799 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3800 					       u32 target_type)
3801 {
3802 	switch (target_type) {
3803 	case TARGET_TYPE_QCN7605:
3804 		sc->use_register_windowing = true;
3805 		qdf_spinlock_create(&sc->register_access_lock);
3806 		sc->register_window = 0;
3807 		break;
3808 	default:
3809 		sc->use_register_windowing = false;
3810 	}
3811 }
3812 #else
3813 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3814 					       u32 target_type)
3815 {
3816 	sc->use_register_windowing = false;
3817 }
3818 #endif
3819 
3820 /**
3821  * hif_enable_bus(): enable bus
3822  *
3823  * This function enables the bus
3824  *
3825  * @ol_sc: soft_sc struct
3826  * @dev: device pointer
3827  * @bdev: bus dev pointer
3828  * bid: bus id pointer
3829  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3830  * Return: QDF_STATUS
3831  */
3832 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3833 			  struct device *dev, void *bdev,
3834 			  const struct hif_bus_id *bid,
3835 			  enum hif_enable_type type)
3836 {
3837 	int ret = 0;
3838 	uint32_t hif_type, target_type;
3839 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3840 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3841 	uint16_t revision_id;
3842 	int probe_again = 0;
3843 	struct pci_dev *pdev = bdev;
3844 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3845 	struct hif_target_info *tgt_info;
3846 
3847 	if (!ol_sc) {
3848 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3849 		return QDF_STATUS_E_NOMEM;
3850 	}
3851 
3852 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3853 		  __func__, hif_get_conparam(ol_sc), id->device);
3854 
3855 	sc->pdev = pdev;
3856 	sc->dev = &pdev->dev;
3857 	sc->devid = id->device;
3858 	sc->cacheline_sz = dma_get_cache_alignment();
3859 	tgt_info = hif_get_target_info_handle(hif_hdl);
3860 	hif_pci_get_soc_info(sc, dev);
3861 again:
3862 	ret = hif_enable_pci(sc, pdev, id);
3863 	if (ret < 0) {
3864 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3865 		       __func__, ret);
3866 		goto err_enable_pci;
3867 	}
3868 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3869 
3870 	/* Temporary FIX: disable ASPM on peregrine.
3871 	 * Will be removed after the OTP is programmed
3872 	 */
3873 	hif_disable_power_gating(hif_hdl);
3874 
3875 	device_disable_async_suspend(&pdev->dev);
3876 	pci_read_config_word(pdev, 0x08, &revision_id);
3877 
3878 	ret = hif_get_device_type(id->device, revision_id,
3879 						&hif_type, &target_type);
3880 	if (ret < 0) {
3881 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3882 		goto err_tgtstate;
3883 	}
3884 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3885 		  __func__, hif_type, target_type);
3886 
3887 	hif_register_tbl_attach(ol_sc, hif_type);
3888 	hif_target_register_tbl_attach(ol_sc, target_type);
3889 
3890 	hif_pci_init_reg_windowing_support(sc, target_type);
3891 
3892 	tgt_info->target_type = target_type;
3893 
3894 	if (ce_srng_based(ol_sc)) {
3895 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3896 	} else {
3897 		ret = hif_pci_probe_tgt_wakeup(sc);
3898 		if (ret < 0) {
3899 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3900 					__func__, ret);
3901 			if (ret == -EAGAIN)
3902 				probe_again++;
3903 			goto err_tgtstate;
3904 		}
3905 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3906 	}
3907 
3908 	if (!ol_sc->mem_pa) {
3909 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3910 		ret = -EIO;
3911 		goto err_tgtstate;
3912 	}
3913 
3914 	if (!ce_srng_based(ol_sc)) {
3915 		hif_target_sync(ol_sc);
3916 
3917 		if (ADRASTEA_BU)
3918 			hif_vote_link_up(hif_hdl);
3919 	}
3920 
3921 	return 0;
3922 
3923 err_tgtstate:
3924 	hif_disable_pci(sc);
3925 	sc->pci_enabled = false;
3926 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3927 	return QDF_STATUS_E_ABORTED;
3928 
3929 err_enable_pci:
3930 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3931 		int delay_time;
3932 
3933 		HIF_INFO("%s: pci reprobe", __func__);
3934 		/* 10, 40, 90, 100, 100, ... */
3935 		delay_time = max(100, 10 * (probe_again * probe_again));
3936 		qdf_mdelay(delay_time);
3937 		goto again;
3938 	}
3939 	return ret;
3940 }
3941 
3942 /**
3943  * hif_pci_irq_enable() - ce_irq_enable
3944  * @scn: hif_softc
3945  * @ce_id: ce_id
3946  *
3947  * Return: void
3948  */
3949 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3950 {
3951 	uint32_t tmp = 1 << ce_id;
3952 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3953 
3954 	qdf_spin_lock_irqsave(&sc->irq_lock);
3955 	scn->ce_irq_summary &= ~tmp;
3956 	if (scn->ce_irq_summary == 0) {
3957 		/* Enable Legacy PCI line interrupts */
3958 		if (LEGACY_INTERRUPTS(sc) &&
3959 			(scn->target_status != TARGET_STATUS_RESET) &&
3960 			(!qdf_atomic_read(&scn->link_suspended))) {
3961 
3962 			hif_write32_mb(scn, scn->mem +
3963 				(SOC_CORE_BASE_ADDRESS |
3964 				PCIE_INTR_ENABLE_ADDRESS),
3965 				HOST_GROUP0_MASK);
3966 
3967 			hif_read32_mb(scn, scn->mem +
3968 					(SOC_CORE_BASE_ADDRESS |
3969 					PCIE_INTR_ENABLE_ADDRESS));
3970 		}
3971 	}
3972 	if (scn->hif_init_done == true)
3973 		Q_TARGET_ACCESS_END(scn);
3974 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3975 
3976 	/* check for missed firmware crash */
3977 	hif_fw_interrupt_handler(0, scn);
3978 }
3979 
3980 /**
3981  * hif_pci_irq_disable() - ce_irq_disable
3982  * @scn: hif_softc
3983  * @ce_id: ce_id
3984  *
3985  * only applicable to legacy copy engine...
3986  *
3987  * Return: void
3988  */
3989 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3990 {
3991 	/* For Rome only need to wake up target */
3992 	/* target access is maintained until interrupts are re-enabled */
3993 	Q_TARGET_ACCESS_BEGIN(scn);
3994 }
3995 
3996 #ifdef FEATURE_RUNTIME_PM
3997 
3998 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
3999 {
4000 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4001 
4002 	if (NULL == sc)
4003 		return;
4004 
4005 	sc->pm_stats.runtime_get++;
4006 	pm_runtime_get_noresume(sc->dev);
4007 }
4008 
4009 /**
4010  * hif_pm_runtime_get() - do a get opperation on the device
4011  *
4012  * A get opperation will prevent a runtime suspend until a
4013  * corresponding put is done.  This api should be used when sending
4014  * data.
4015  *
4016  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
4017  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
4018  *
4019  * return: success if the bus is up and a get has been issued
4020  *   otherwise an error code.
4021  */
4022 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
4023 {
4024 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4025 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4026 	int ret;
4027 	int pm_state;
4028 
4029 	if (NULL == scn) {
4030 		HIF_ERROR("%s: Could not do runtime get, scn is null",
4031 				__func__);
4032 		return -EFAULT;
4033 	}
4034 
4035 	pm_state = qdf_atomic_read(&sc->pm_state);
4036 
4037 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
4038 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
4039 		sc->pm_stats.runtime_get++;
4040 		ret = __hif_pm_runtime_get(sc->dev);
4041 
4042 		/* Get can return 1 if the device is already active, just return
4043 		 * success in that case
4044 		 */
4045 		if (ret > 0)
4046 			ret = 0;
4047 
4048 		if (ret)
4049 			hif_pm_runtime_put(hif_ctx);
4050 
4051 		if (ret && ret != -EINPROGRESS) {
4052 			sc->pm_stats.runtime_get_err++;
4053 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
4054 				__func__, qdf_atomic_read(&sc->pm_state), ret);
4055 		}
4056 
4057 		return ret;
4058 	}
4059 
4060 	sc->pm_stats.request_resume++;
4061 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4062 	ret = hif_pm_request_resume(sc->dev);
4063 
4064 	return -EAGAIN;
4065 }
4066 
4067 /**
4068  * hif_pm_runtime_put() - do a put opperation on the device
4069  *
4070  * A put opperation will allow a runtime suspend after a corresponding
4071  * get was done.  This api should be used when sending data.
4072  *
4073  * This api will return a failure if runtime pm is stopped
4074  * This api will return failure if it would decrement the usage count below 0.
4075  *
4076  * return: QDF_STATUS_SUCCESS if the put is performed
4077  */
4078 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
4079 {
4080 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4081 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4082 	int pm_state, usage_count;
4083 	char *error = NULL;
4084 
4085 	if (NULL == scn) {
4086 		HIF_ERROR("%s: Could not do runtime put, scn is null",
4087 				__func__);
4088 		return -EFAULT;
4089 	}
4090 	usage_count = atomic_read(&sc->dev->power.usage_count);
4091 
4092 	if (usage_count == 1) {
4093 		pm_state = qdf_atomic_read(&sc->pm_state);
4094 
4095 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4096 			error = "Ignoring unexpected put when runtime pm is disabled";
4097 
4098 	} else if (usage_count == 0) {
4099 		error = "PUT Without a Get Operation";
4100 	}
4101 
4102 	if (error) {
4103 		hif_pci_runtime_pm_warn(sc, error);
4104 		return -EINVAL;
4105 	}
4106 
4107 	sc->pm_stats.runtime_put++;
4108 
4109 	hif_pm_runtime_mark_last_busy(sc->dev);
4110 	hif_pm_runtime_put_auto(sc->dev);
4111 
4112 	return 0;
4113 }
4114 
4115 
4116 /**
4117  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
4118  *                                      reason
4119  * @hif_sc: pci context
4120  * @lock: runtime_pm lock being acquired
4121  *
4122  * Return 0 if successful.
4123  */
4124 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4125 		*hif_sc, struct hif_pm_runtime_lock *lock)
4126 {
4127 	int ret = 0;
4128 
4129 	/*
4130 	 * We shouldn't be setting context->timeout to zero here when
4131 	 * context is active as we will have a case where Timeout API's
4132 	 * for the same context called back to back.
4133 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4134 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4135 	 * API to ensure the timeout version is no more active and
4136 	 * list entry of this context will be deleted during allow suspend.
4137 	 */
4138 	if (lock->active)
4139 		return 0;
4140 
4141 	ret = __hif_pm_runtime_get(hif_sc->dev);
4142 
4143 	/**
4144 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4145 	 * RPM_SUSPENDING. Any other negative value is an error.
4146 	 * We shouldn't be do runtime_put here as in later point allow
4147 	 * suspend gets called with the the context and there the usage count
4148 	 * is decremented, so suspend will be prevented.
4149 	 */
4150 
4151 	if (ret < 0 && ret != -EINPROGRESS) {
4152 		hif_sc->pm_stats.runtime_get_err++;
4153 		hif_pci_runtime_pm_warn(hif_sc,
4154 				"Prevent Suspend Runtime PM Error");
4155 	}
4156 
4157 	hif_sc->prevent_suspend_cnt++;
4158 
4159 	lock->active = true;
4160 
4161 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4162 
4163 	hif_sc->pm_stats.prevent_suspend++;
4164 
4165 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4166 		hif_pm_runtime_state_to_string(
4167 			qdf_atomic_read(&hif_sc->pm_state)),
4168 					ret);
4169 
4170 	return ret;
4171 }
4172 
4173 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4174 		struct hif_pm_runtime_lock *lock)
4175 {
4176 	int ret = 0;
4177 	int usage_count;
4178 
4179 	if (hif_sc->prevent_suspend_cnt == 0)
4180 		return ret;
4181 
4182 	if (!lock->active)
4183 		return ret;
4184 
4185 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4186 
4187 	/*
4188 	 * During Driver unload, platform driver increments the usage
4189 	 * count to prevent any runtime suspend getting called.
4190 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4191 	 * usage_count should be one. Ideally this shouldn't happen as
4192 	 * context->active should be active for allow suspend to happen
4193 	 * Handling this case here to prevent any failures.
4194 	 */
4195 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4196 				&& usage_count == 1) || usage_count == 0) {
4197 		hif_pci_runtime_pm_warn(hif_sc,
4198 				"Allow without a prevent suspend");
4199 		return -EINVAL;
4200 	}
4201 
4202 	list_del(&lock->list);
4203 
4204 	hif_sc->prevent_suspend_cnt--;
4205 
4206 	lock->active = false;
4207 	lock->timeout = 0;
4208 
4209 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4210 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4211 
4212 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4213 		hif_pm_runtime_state_to_string(
4214 			qdf_atomic_read(&hif_sc->pm_state)),
4215 					ret);
4216 
4217 	hif_sc->pm_stats.allow_suspend++;
4218 	return ret;
4219 }
4220 
4221 /**
4222  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4223  * @data: calback data that is the pci context
4224  *
4225  * if runtime locks are acquired with a timeout, this function releases
4226  * the locks when the last runtime lock expires.
4227  *
4228  * dummy implementation until lock acquisition is implemented.
4229  */
4230 static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
4231 {
4232 	struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
4233 	unsigned long timer_expires;
4234 	struct hif_pm_runtime_lock *context, *temp;
4235 
4236 	spin_lock_bh(&hif_sc->runtime_lock);
4237 
4238 	timer_expires = hif_sc->runtime_timer_expires;
4239 
4240 	/* Make sure we are not called too early, this should take care of
4241 	 * following case
4242 	 *
4243 	 * CPU0                         CPU1 (timeout function)
4244 	 * ----                         ----------------------
4245 	 * spin_lock_irq
4246 	 *                              timeout function called
4247 	 *
4248 	 * mod_timer()
4249 	 *
4250 	 * spin_unlock_irq
4251 	 *                              spin_lock_irq
4252 	 */
4253 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4254 		hif_sc->runtime_timer_expires = 0;
4255 		list_for_each_entry_safe(context, temp,
4256 				&hif_sc->prevent_suspend_list, list) {
4257 			if (context->timeout) {
4258 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4259 				hif_sc->pm_stats.allow_suspend_timeout++;
4260 			}
4261 		}
4262 	}
4263 
4264 	spin_unlock_bh(&hif_sc->runtime_lock);
4265 }
4266 
4267 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4268 		struct hif_pm_runtime_lock *data)
4269 {
4270 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4271 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4272 	struct hif_pm_runtime_lock *context = data;
4273 
4274 	if (!sc->hif_config.enable_runtime_pm)
4275 		return 0;
4276 
4277 	if (!context)
4278 		return -EINVAL;
4279 
4280 	if (in_irq())
4281 		WARN_ON(1);
4282 
4283 	spin_lock_bh(&hif_sc->runtime_lock);
4284 	context->timeout = 0;
4285 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4286 	spin_unlock_bh(&hif_sc->runtime_lock);
4287 
4288 	return 0;
4289 }
4290 
4291 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4292 				struct hif_pm_runtime_lock *data)
4293 {
4294 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4295 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4296 	struct hif_pm_runtime_lock *context = data;
4297 
4298 	if (!sc->hif_config.enable_runtime_pm)
4299 		return 0;
4300 
4301 	if (!context)
4302 		return -EINVAL;
4303 
4304 	if (in_irq())
4305 		WARN_ON(1);
4306 
4307 	spin_lock_bh(&hif_sc->runtime_lock);
4308 
4309 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4310 
4311 	/* The list can be empty as well in cases where
4312 	 * we have one context in the list and the allow
4313 	 * suspend came before the timer expires and we delete
4314 	 * context above from the list.
4315 	 * When list is empty prevent_suspend count will be zero.
4316 	 */
4317 	if (hif_sc->prevent_suspend_cnt == 0 &&
4318 			hif_sc->runtime_timer_expires > 0) {
4319 		del_timer(&hif_sc->runtime_timer);
4320 		hif_sc->runtime_timer_expires = 0;
4321 	}
4322 
4323 	spin_unlock_bh(&hif_sc->runtime_lock);
4324 
4325 	return 0;
4326 }
4327 
4328 /**
4329  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4330  * @ol_sc: HIF context
4331  * @lock: which lock is being acquired
4332  * @delay: Timeout in milliseconds
4333  *
4334  * Prevent runtime suspend with a timeout after which runtime suspend would be
4335  * allowed. This API uses a single timer to allow the suspend and timer is
4336  * modified if the timeout is changed before timer fires.
4337  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4338  * of starting the timer.
4339  *
4340  * It is wise to try not to use this API and correct the design if possible.
4341  *
4342  * Return: 0 on success and negative error code on failure
4343  */
4344 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4345 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4346 {
4347 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4348 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4349 
4350 	int ret = 0;
4351 	unsigned long expires;
4352 	struct hif_pm_runtime_lock *context = lock;
4353 
4354 	if (hif_is_load_or_unload_in_progress(sc)) {
4355 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4356 				__func__);
4357 		return -EINVAL;
4358 	}
4359 
4360 	if (hif_is_recovery_in_progress(sc)) {
4361 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4362 		return -EINVAL;
4363 	}
4364 
4365 	if (!sc->hif_config.enable_runtime_pm)
4366 		return 0;
4367 
4368 	if (!context)
4369 		return -EINVAL;
4370 
4371 	if (in_irq())
4372 		WARN_ON(1);
4373 
4374 	/*
4375 	 * Don't use internal timer if the timeout is less than auto suspend
4376 	 * delay.
4377 	 */
4378 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4379 		hif_pm_request_resume(hif_sc->dev);
4380 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4381 		return ret;
4382 	}
4383 
4384 	expires = jiffies + msecs_to_jiffies(delay);
4385 	expires += !expires;
4386 
4387 	spin_lock_bh(&hif_sc->runtime_lock);
4388 
4389 	context->timeout = delay;
4390 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4391 	hif_sc->pm_stats.prevent_suspend_timeout++;
4392 
4393 	/* Modify the timer only if new timeout is after already configured
4394 	 * timeout
4395 	 */
4396 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4397 		mod_timer(&hif_sc->runtime_timer, expires);
4398 		hif_sc->runtime_timer_expires = expires;
4399 	}
4400 
4401 	spin_unlock_bh(&hif_sc->runtime_lock);
4402 
4403 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4404 		hif_pm_runtime_state_to_string(
4405 			qdf_atomic_read(&hif_sc->pm_state)),
4406 					delay, ret);
4407 
4408 	return ret;
4409 }
4410 
4411 /**
4412  * hif_runtime_lock_init() - API to initialize Runtime PM context
4413  * @name: Context name
4414  *
4415  * This API initializes the Runtime PM context of the caller and
4416  * return the pointer.
4417  *
4418  * Return: None
4419  */
4420 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4421 {
4422 	struct hif_pm_runtime_lock *context;
4423 
4424 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4425 
4426 	context = qdf_mem_malloc(sizeof(*context));
4427 	if (!context) {
4428 		HIF_ERROR("%s: No memory for Runtime PM wakelock context",
4429 			  __func__);
4430 		return -ENOMEM;
4431 	}
4432 
4433 	context->name = name ? name : "Default";
4434 	lock->lock = context;
4435 
4436 	return 0;
4437 }
4438 
4439 /**
4440  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4441  * @data: Runtime PM context
4442  *
4443  * Return: void
4444  */
4445 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4446 			     struct hif_pm_runtime_lock *data)
4447 {
4448 	struct hif_pm_runtime_lock *context = data;
4449 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4450 
4451 	if (!context) {
4452 		HIF_ERROR("Runtime PM wakelock context is NULL");
4453 		return;
4454 	}
4455 
4456 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4457 
4458 	/*
4459 	 * Ensure to delete the context list entry and reduce the usage count
4460 	 * before freeing the context if context is active.
4461 	 */
4462 	if (sc) {
4463 		spin_lock_bh(&sc->runtime_lock);
4464 		__hif_pm_runtime_allow_suspend(sc, context);
4465 		spin_unlock_bh(&sc->runtime_lock);
4466 	}
4467 
4468 	qdf_mem_free(context);
4469 }
4470 #endif /* FEATURE_RUNTIME_PM */
4471 
4472 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4473 {
4474 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4475 
4476 	/* legacy case only has one irq */
4477 	return pci_scn->irq;
4478 }
4479 
4480 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4481 {
4482 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4483 	struct hif_target_info *tgt_info;
4484 
4485 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4486 
4487 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4488 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4489 		/*
4490 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4491 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4492 		 * well initialized/defined.
4493 		 */
4494 		return 0;
4495 	}
4496 
4497 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4498 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4499 		return 0;
4500 	}
4501 
4502 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%lx (max 0x%zx)\n",
4503 		 offset, offset + sizeof(unsigned int), sc->mem_len);
4504 
4505 	return -EINVAL;
4506 }
4507 
4508 /**
4509  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4510  * @scn: hif context
4511  *
4512  * Return: true if soc needs driver bmi otherwise false
4513  */
4514 bool hif_pci_needs_bmi(struct hif_softc *scn)
4515 {
4516 	return !ce_srng_based(scn);
4517 }
4518