xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include <linux/of_pci.h>
24 #include <linux/version.h>
25 #include "hif_io32.h"
26 #include "if_pci.h"
27 #include "hif.h"
28 #include "target_type.h"
29 #include "hif_main.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_internal.h"
33 #include "ce_reg.h"
34 #include "ce_bmi.h"
35 #include "regtable.h"
36 #include "hif_hw_version.h"
37 #include <linux/debugfs.h>
38 #include <linux/seq_file.h>
39 #include "qdf_status.h"
40 #include "qdf_atomic.h"
41 #include "qdf_platform.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
47 	defined(QCA_WIFI_WCN7850))
48 #include "hal_api.h"
49 #endif
50 
51 #include "if_pci_internal.h"
52 #include "ce_tasklet.h"
53 #include "targaddrs.h"
54 #include "hif_exec.h"
55 
56 #include "pci_api.h"
57 #include "ahb_api.h"
58 #include "wlan_cfg.h"
59 #include "qdf_hang_event_notifier.h"
60 #include "qdf_platform.h"
61 #include "qal_devnode.h"
62 #include "qdf_irq.h"
63 
64 /* Maximum ms timeout for host to wake up target */
65 #define PCIE_WAKE_TIMEOUT 1000
66 #define RAMDUMP_EVENT_TIMEOUT 2500
67 
68 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
69  * PCIe data bus error
70  * As workaround for this issue - changing the reset sequence to
71  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
72  */
73 #define CPU_WARM_RESET_WAR
74 #define WLAN_CFG_MAX_PCIE_GROUPS 2
75 #ifdef QCA_WIFI_QCN9224
76 #define WLAN_CFG_MAX_CE_COUNT 16
77 #else
78 #define WLAN_CFG_MAX_CE_COUNT 12
79 #endif
80 #define DP_IRQ_NAME_LEN 25
81 char dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS][DP_IRQ_NAME_LEN] = {};
82 char ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT][DP_IRQ_NAME_LEN] = {};
83 
84 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
85 static inline int hif_get_pci_slot(struct hif_softc *scn)
86 {
87 	/*
88 	 * If WLAN_MAX_PDEVS is defined as 1, always return pci slot 0
89 	 * since there is only one pci device attached.
90 	 */
91 	return 0;
92 }
93 #else
94 static inline int hif_get_pci_slot(struct hif_softc *scn)
95 {
96 	int pci_slot = pld_get_pci_slot(scn->qdf_dev->dev);
97 
98 	if (pci_slot < 0) {
99 		hif_err("Invalid PCI SLOT %d", pci_slot);
100 		qdf_assert_always(0);
101 		return 0;
102 	} else {
103 		return pci_slot;
104 	}
105 }
106 #endif
107 
108 /*
109  * Top-level interrupt handler for all PCI interrupts from a Target.
110  * When a block of MSI interrupts is allocated, this top-level handler
111  * is not used; instead, we directly call the correct sub-handler.
112  */
113 struct ce_irq_reg_table {
114 	uint32_t irq_enable;
115 	uint32_t irq_status;
116 };
117 
118 #ifndef QCA_WIFI_3_0_ADRASTEA
119 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
120 {
121 }
122 #else
123 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
124 {
125 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
126 	unsigned int target_enable0, target_enable1;
127 	unsigned int target_cause0, target_cause1;
128 
129 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
130 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
131 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
132 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
133 
134 	if ((target_enable0 & target_cause0) ||
135 	    (target_enable1 & target_cause1)) {
136 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
137 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
138 
139 		if (scn->notice_send)
140 			pld_intr_notify_q6(sc->dev);
141 	}
142 }
143 #endif
144 
145 
146 /**
147  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
148  * @scn: scn
149  *
150  * Return: N/A
151  */
152 static void pci_dispatch_interrupt(struct hif_softc *scn)
153 {
154 	uint32_t intr_summary;
155 	int id;
156 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
157 
158 	if (scn->hif_init_done != true)
159 		return;
160 
161 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
162 		return;
163 
164 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
165 
166 	if (intr_summary == 0) {
167 		if ((scn->target_status != TARGET_STATUS_RESET) &&
168 			(!qdf_atomic_read(&scn->link_suspended))) {
169 
170 			hif_write32_mb(scn, scn->mem +
171 				(SOC_CORE_BASE_ADDRESS |
172 				PCIE_INTR_ENABLE_ADDRESS),
173 				HOST_GROUP0_MASK);
174 
175 			hif_read32_mb(scn, scn->mem +
176 					(SOC_CORE_BASE_ADDRESS |
177 					PCIE_INTR_ENABLE_ADDRESS));
178 		}
179 		Q_TARGET_ACCESS_END(scn);
180 		return;
181 	}
182 	Q_TARGET_ACCESS_END(scn);
183 
184 	scn->ce_irq_summary = intr_summary;
185 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
186 		if (intr_summary & (1 << id)) {
187 			intr_summary &= ~(1 << id);
188 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
189 		}
190 	}
191 }
192 
193 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
194 {
195 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
196 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
197 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
198 
199 	volatile int tmp;
200 	uint16_t val = 0;
201 	uint32_t bar0 = 0;
202 	uint32_t fw_indicator_address, fw_indicator;
203 	bool ssr_irq = false;
204 	unsigned int host_cause, host_enable;
205 
206 	if (LEGACY_INTERRUPTS(sc)) {
207 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
208 			return IRQ_HANDLED;
209 
210 		if (ADRASTEA_BU) {
211 			host_enable = hif_read32_mb(sc, sc->mem +
212 						    PCIE_INTR_ENABLE_ADDRESS);
213 			host_cause = hif_read32_mb(sc, sc->mem +
214 						   PCIE_INTR_CAUSE_ADDRESS);
215 			if (!(host_enable & host_cause)) {
216 				hif_pci_route_adrastea_interrupt(sc);
217 				return IRQ_HANDLED;
218 			}
219 		}
220 
221 		/* Clear Legacy PCI line interrupts
222 		 * IMPORTANT: INTR_CLR regiser has to be set
223 		 * after INTR_ENABLE is set to 0,
224 		 * otherwise interrupt can not be really cleared
225 		 */
226 		hif_write32_mb(sc, sc->mem +
227 			      (SOC_CORE_BASE_ADDRESS |
228 			       PCIE_INTR_ENABLE_ADDRESS), 0);
229 
230 		hif_write32_mb(sc, sc->mem +
231 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
232 			       ADRASTEA_BU ?
233 			       (host_enable & host_cause) :
234 			      HOST_GROUP0_MASK);
235 
236 		if (ADRASTEA_BU)
237 			hif_write32_mb(sc, sc->mem + 0x2f100c,
238 				       (host_cause >> 1));
239 
240 		/* IMPORTANT: this extra read transaction is required to
241 		 * flush the posted write buffer
242 		 */
243 		if (!ADRASTEA_BU) {
244 		tmp =
245 			hif_read32_mb(sc, sc->mem +
246 				     (SOC_CORE_BASE_ADDRESS |
247 				      PCIE_INTR_ENABLE_ADDRESS));
248 
249 		if (tmp == 0xdeadbeef) {
250 			hif_err("SoC returns 0xdeadbeef!!");
251 
252 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
253 			hif_err("PCI Vendor ID = 0x%04x", val);
254 
255 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
256 			hif_err("PCI Device ID = 0x%04x", val);
257 
258 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
259 			hif_err("PCI Command = 0x%04x", val);
260 
261 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
262 			hif_err("PCI Status = 0x%04x", val);
263 
264 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
265 					      &bar0);
266 			hif_err("PCI BAR0 = 0x%08x", bar0);
267 
268 			hif_err("RTC_STATE_ADDRESS = 0x%08x",
269 				hif_read32_mb(sc, sc->mem +
270 					PCIE_LOCAL_BASE_ADDRESS
271 					+ RTC_STATE_ADDRESS));
272 			hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
273 				hif_read32_mb(sc, sc->mem +
274 					PCIE_LOCAL_BASE_ADDRESS
275 					+ PCIE_SOC_WAKE_ADDRESS));
276 			hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
277 				hif_read32_mb(sc, sc->mem + 0x80008),
278 				hif_read32_mb(sc, sc->mem + 0x8000c));
279 			hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
280 				hif_read32_mb(sc, sc->mem + 0x80010),
281 				hif_read32_mb(sc, sc->mem + 0x80014));
282 			hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
283 				hif_read32_mb(sc, sc->mem + 0x80018),
284 				hif_read32_mb(sc, sc->mem + 0x8001c));
285 			QDF_BUG(0);
286 		}
287 
288 		PCI_CLR_CAUSE0_REGISTER(sc);
289 		}
290 
291 		if (HAS_FW_INDICATOR) {
292 			fw_indicator_address = hif_state->fw_indicator_address;
293 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
294 			if ((fw_indicator != ~0) &&
295 			   (fw_indicator & FW_IND_EVENT_PENDING))
296 				ssr_irq = true;
297 		}
298 
299 		if (Q_TARGET_ACCESS_END(scn) < 0)
300 			return IRQ_HANDLED;
301 	}
302 	/* TBDXXX: Add support for WMAC */
303 
304 	if (ssr_irq) {
305 		sc->irq_event = irq;
306 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
307 
308 		qdf_atomic_inc(&scn->active_tasklet_cnt);
309 		tasklet_schedule(&sc->intr_tq);
310 	} else {
311 		pci_dispatch_interrupt(scn);
312 	}
313 
314 	return IRQ_HANDLED;
315 }
316 
317 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
318 {
319 	return 1;               /* FIX THIS */
320 }
321 
322 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
323 {
324 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
325 	int i = 0;
326 
327 	if (!irq || !size) {
328 		return -EINVAL;
329 	}
330 
331 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
332 		irq[0] = sc->irq;
333 		return 1;
334 	}
335 
336 	if (sc->num_msi_intrs > size) {
337 		qdf_print("Not enough space in irq buffer to return irqs");
338 		return -EINVAL;
339 	}
340 
341 	for (i = 0; i < sc->num_msi_intrs; i++) {
342 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
343 	}
344 
345 	return sc->num_msi_intrs;
346 }
347 
348 
349 /**
350  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
351  * @scn: hif_softc
352  *
353  * Return: void
354  */
355 #if CONFIG_ATH_PCIE_MAX_PERF == 0
356 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
357 {
358 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
359 	A_target_id_t pci_addr = scn->mem;
360 
361 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
362 	/*
363 	 * If the deferred sleep timer is running cancel it
364 	 * and put the soc into sleep.
365 	 */
366 	if (hif_state->fake_sleep == true) {
367 		qdf_timer_stop(&hif_state->sleep_timer);
368 		if (hif_state->verified_awake == false) {
369 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
370 				      PCIE_SOC_WAKE_ADDRESS,
371 				      PCIE_SOC_WAKE_RESET);
372 		}
373 		hif_state->fake_sleep = false;
374 	}
375 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
376 }
377 #else
378 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
379 {
380 }
381 #endif
382 
383 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
384 	hif_read32_mb(sc, (char *)(mem) + \
385 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
386 
387 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
388 	hif_write32_mb(sc, ((char *)(mem) + \
389 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
390 
391 #ifdef QCA_WIFI_3_0
392 /**
393  * hif_targ_is_awake() - check to see if the target is awake
394  * @hif_ctx: hif context
395  *
396  * emulation never goes to sleep
397  *
398  * Return: true if target is awake
399  */
400 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
401 {
402 	return true;
403 }
404 #else
405 /**
406  * hif_targ_is_awake() - check to see if the target is awake
407  * @hif_ctx: hif context
408  *
409  * Return: true if the targets clocks are on
410  */
411 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
412 {
413 	uint32_t val;
414 
415 	if (scn->recovery)
416 		return false;
417 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
418 		+ RTC_STATE_ADDRESS);
419 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
420 }
421 #endif
422 
423 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
424 static void hif_pci_device_reset(struct hif_pci_softc *sc)
425 {
426 	void __iomem *mem = sc->mem;
427 	int i;
428 	uint32_t val;
429 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
430 
431 	if (!scn->hostdef)
432 		return;
433 
434 	/* NB: Don't check resetok here.  This form of reset
435 	 * is integral to correct operation.
436 	 */
437 
438 	if (!SOC_GLOBAL_RESET_ADDRESS)
439 		return;
440 
441 	if (!mem)
442 		return;
443 
444 	hif_err("Reset Device");
445 
446 	/*
447 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
448 	 * writing WAKE_V, the Target may scribble over Host memory!
449 	 */
450 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
451 			       PCIE_SOC_WAKE_V_MASK);
452 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
453 		if (hif_targ_is_awake(scn, mem))
454 			break;
455 
456 		qdf_mdelay(1);
457 	}
458 
459 	/* Put Target, including PCIe, into RESET. */
460 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
461 	val |= 1;
462 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
463 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
464 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
465 		    RTC_STATE_COLD_RESET_MASK)
466 			break;
467 
468 		qdf_mdelay(1);
469 	}
470 
471 	/* Pull Target, including PCIe, out of RESET. */
472 	val &= ~1;
473 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
474 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
475 		if (!
476 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
477 		     RTC_STATE_COLD_RESET_MASK))
478 			break;
479 
480 		qdf_mdelay(1);
481 	}
482 
483 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
484 			       PCIE_SOC_WAKE_RESET);
485 }
486 
487 /* CPU warm reset function
488  * Steps:
489  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
490  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
491  *    correctly on WARM reset
492  * 3. Clear TARGET CPU LF timer interrupt
493  * 4. Reset all CEs to clear any pending CE tarnsactions
494  * 5. Warm reset CPU
495  */
496 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
497 {
498 	void __iomem *mem = sc->mem;
499 	int i;
500 	uint32_t val;
501 	uint32_t fw_indicator;
502 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
503 
504 	/* NB: Don't check resetok here.  This form of reset is
505 	 * integral to correct operation.
506 	 */
507 
508 	if (!mem)
509 		return;
510 
511 	hif_debug("Target Warm Reset");
512 
513 	/*
514 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
515 	 * writing WAKE_V, the Target may scribble over Host memory!
516 	 */
517 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
518 			       PCIE_SOC_WAKE_V_MASK);
519 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
520 		if (hif_targ_is_awake(scn, mem))
521 			break;
522 		qdf_mdelay(1);
523 	}
524 
525 	/*
526 	 * Disable Pending interrupts
527 	 */
528 	val =
529 		hif_read32_mb(sc, mem +
530 			     (SOC_CORE_BASE_ADDRESS |
531 			      PCIE_INTR_CAUSE_ADDRESS));
532 	hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
533 		  (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
534 	/* Target CPU Intr Cause */
535 	val = hif_read32_mb(sc, mem +
536 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
537 	hif_debug("Target CPU Intr Cause 0x%x", val);
538 
539 	val =
540 		hif_read32_mb(sc, mem +
541 			     (SOC_CORE_BASE_ADDRESS |
542 			      PCIE_INTR_ENABLE_ADDRESS));
543 	hif_write32_mb(sc, (mem +
544 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
545 	hif_write32_mb(sc, (mem +
546 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
547 		       HOST_GROUP0_MASK);
548 
549 	qdf_mdelay(100);
550 
551 	/* Clear FW_INDICATOR_ADDRESS */
552 	if (HAS_FW_INDICATOR) {
553 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
554 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
555 	}
556 
557 	/* Clear Target LF Timer interrupts */
558 	val =
559 		hif_read32_mb(sc, mem +
560 			     (RTC_SOC_BASE_ADDRESS +
561 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
562 	hif_debug("addr 0x%x : 0x%x",
563 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
564 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
565 	hif_write32_mb(sc, mem +
566 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
567 		      val);
568 
569 	/* Reset CE */
570 	val =
571 		hif_read32_mb(sc, mem +
572 			     (RTC_SOC_BASE_ADDRESS |
573 			      SOC_RESET_CONTROL_ADDRESS));
574 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
575 	hif_write32_mb(sc, (mem +
576 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
577 		      val);
578 	val =
579 		hif_read32_mb(sc, mem +
580 			     (RTC_SOC_BASE_ADDRESS |
581 			      SOC_RESET_CONTROL_ADDRESS));
582 	qdf_mdelay(10);
583 
584 	/* CE unreset */
585 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
586 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
587 		       SOC_RESET_CONTROL_ADDRESS), val);
588 	val =
589 		hif_read32_mb(sc, mem +
590 			     (RTC_SOC_BASE_ADDRESS |
591 			      SOC_RESET_CONTROL_ADDRESS));
592 	qdf_mdelay(10);
593 
594 	/* Read Target CPU Intr Cause */
595 	val = hif_read32_mb(sc, mem +
596 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
597 	hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
598 
599 	/* CPU warm RESET */
600 	val =
601 		hif_read32_mb(sc, mem +
602 			     (RTC_SOC_BASE_ADDRESS |
603 			      SOC_RESET_CONTROL_ADDRESS));
604 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
605 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
606 		       SOC_RESET_CONTROL_ADDRESS), val);
607 	val =
608 		hif_read32_mb(sc, mem +
609 			     (RTC_SOC_BASE_ADDRESS |
610 			      SOC_RESET_CONTROL_ADDRESS));
611 	hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
612 
613 	qdf_mdelay(100);
614 	hif_debug("Target Warm reset complete");
615 
616 }
617 
618 #ifndef QCA_WIFI_3_0
619 /* only applicable to legacy ce */
620 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
621 {
622 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
623 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
624 	void __iomem *mem = sc->mem;
625 	uint32_t val;
626 
627 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
628 		return ATH_ISR_NOSCHED;
629 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
630 	if (Q_TARGET_ACCESS_END(scn) < 0)
631 		return ATH_ISR_SCHED;
632 
633 	hif_debug("FW_INDICATOR register is 0x%x", val);
634 
635 	if (val & FW_IND_HELPER)
636 		return 0;
637 
638 	return 1;
639 }
640 #endif
641 
642 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
643 {
644 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
645 	uint16_t device_id = 0;
646 	uint32_t val;
647 	uint16_t timeout_count = 0;
648 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
649 
650 	/* Check device ID from PCIe configuration space for link status */
651 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
652 	if (device_id != sc->devid) {
653 		hif_err("Device ID does match (read 0x%x, expect 0x%x)",
654 			device_id, sc->devid);
655 		return -EACCES;
656 	}
657 
658 	/* Check PCIe local register for bar/memory access */
659 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
660 			   RTC_STATE_ADDRESS);
661 	hif_debug("RTC_STATE_ADDRESS is %08x", val);
662 
663 	/* Try to wake up taget if it sleeps */
664 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
665 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
666 	hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
667 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
668 		PCIE_SOC_WAKE_ADDRESS));
669 
670 	/* Check if taget can be woken up */
671 	while (!hif_targ_is_awake(scn, sc->mem)) {
672 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
673 			hif_err("wake up timeout, %08x, %08x",
674 				hif_read32_mb(sc, sc->mem +
675 				     PCIE_LOCAL_BASE_ADDRESS +
676 				     RTC_STATE_ADDRESS),
677 				hif_read32_mb(sc, sc->mem +
678 				     PCIE_LOCAL_BASE_ADDRESS +
679 				     PCIE_SOC_WAKE_ADDRESS));
680 			return -EACCES;
681 		}
682 
683 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
684 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
685 
686 		qdf_mdelay(100);
687 		timeout_count += 100;
688 	}
689 
690 	/* Check Power register for SoC internal bus issues */
691 	val =
692 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
693 			     SOC_POWER_REG_OFFSET);
694 	hif_debug("Power register is %08x", val);
695 
696 	return 0;
697 }
698 
699 /**
700  * __hif_pci_dump_registers(): dump other PCI debug registers
701  * @scn: struct hif_softc
702  *
703  * This function dumps pci debug registers.  The parrent function
704  * dumps the copy engine registers before calling this function.
705  *
706  * Return: void
707  */
708 static void __hif_pci_dump_registers(struct hif_softc *scn)
709 {
710 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
711 	void __iomem *mem = sc->mem;
712 	uint32_t val, i, j;
713 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
714 	uint32_t ce_base;
715 
716 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
717 		return;
718 
719 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
720 	val =
721 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
722 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
723 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
724 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
725 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
726 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
727 
728 	/* DEBUG_CONTROL_ENABLE = 0x1 */
729 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
730 			   WLAN_DEBUG_CONTROL_OFFSET);
731 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
732 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
733 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
734 		      WLAN_DEBUG_CONTROL_OFFSET, val);
735 
736 	hif_debug("Debug: inputsel: %x dbgctrl: %x",
737 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
738 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
739 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
740 			    WLAN_DEBUG_CONTROL_OFFSET));
741 
742 	hif_debug("Debug CE");
743 	/* Loop CE debug output */
744 	/* AMBA_DEBUG_BUS_SEL = 0xc */
745 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
746 			    AMBA_DEBUG_BUS_OFFSET);
747 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
748 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
749 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
750 		       val);
751 
752 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
753 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
754 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
755 				   CE_WRAPPER_DEBUG_OFFSET);
756 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
757 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
758 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
759 			      CE_WRAPPER_DEBUG_OFFSET, val);
760 
761 		hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
762 			  wrapper_idx[i],
763 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
764 				AMBA_DEBUG_BUS_OFFSET),
765 			  hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
766 				CE_WRAPPER_DEBUG_OFFSET));
767 
768 		if (wrapper_idx[i] <= 7) {
769 			for (j = 0; j <= 5; j++) {
770 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
771 				/* For (j=0~5) write CE_DEBUG_SEL = j */
772 				val =
773 					hif_read32_mb(sc, mem + ce_base +
774 						     CE_DEBUG_OFFSET);
775 				val &= ~CE_DEBUG_SEL_MASK;
776 				val |= CE_DEBUG_SEL_SET(j);
777 				hif_write32_mb(sc, mem + ce_base +
778 					       CE_DEBUG_OFFSET, val);
779 
780 				/* read (@gpio_athr_wlan_reg)
781 				 * WLAN_DEBUG_OUT_DATA
782 				 */
783 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
784 						    + WLAN_DEBUG_OUT_OFFSET);
785 				val = WLAN_DEBUG_OUT_DATA_GET(val);
786 
787 				hif_debug("module%d: cedbg: %x out: %x",
788 					  j,
789 					  hif_read32_mb(sc, mem + ce_base +
790 						CE_DEBUG_OFFSET), val);
791 			}
792 		} else {
793 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
794 			val =
795 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
796 					     WLAN_DEBUG_OUT_OFFSET);
797 			val = WLAN_DEBUG_OUT_DATA_GET(val);
798 
799 			hif_debug("out: %x", val);
800 		}
801 	}
802 
803 	hif_debug("Debug PCIe:");
804 	/* Loop PCIe debug output */
805 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
806 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
807 			    AMBA_DEBUG_BUS_OFFSET);
808 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
809 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
810 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
811 		       AMBA_DEBUG_BUS_OFFSET, val);
812 
813 	for (i = 0; i <= 8; i++) {
814 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
815 		val =
816 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
817 				     AMBA_DEBUG_BUS_OFFSET);
818 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
819 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
820 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
821 			       AMBA_DEBUG_BUS_OFFSET, val);
822 
823 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
824 		val =
825 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
826 				     WLAN_DEBUG_OUT_OFFSET);
827 		val = WLAN_DEBUG_OUT_DATA_GET(val);
828 
829 		hif_debug("amdbg: %x out: %x %x",
830 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
831 				WLAN_DEBUG_OUT_OFFSET), val,
832 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
833 				WLAN_DEBUG_OUT_OFFSET));
834 	}
835 
836 	Q_TARGET_ACCESS_END(scn);
837 }
838 
839 /**
840  * hif_dump_registers(): dump bus debug registers
841  * @scn: struct hif_opaque_softc
842  *
843  * This function dumps hif bus debug registers
844  *
845  * Return: 0 for success or error code
846  */
847 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
848 {
849 	int status;
850 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
851 
852 	status = hif_dump_ce_registers(scn);
853 
854 	if (status)
855 		hif_err("Dump CE Registers Failed");
856 
857 	/* dump non copy engine pci registers */
858 	__hif_pci_dump_registers(scn);
859 
860 	return 0;
861 }
862 
863 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
864 
865 /* worker thread to schedule wlan_tasklet in SLUB debug build */
866 static void reschedule_tasklet_work_handler(void *arg)
867 {
868 	struct hif_pci_softc *sc = arg;
869 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
870 
871 	if (!scn) {
872 		hif_err("hif_softc is NULL");
873 		return;
874 	}
875 
876 	if (scn->hif_init_done == false) {
877 		hif_err("wlan driver is unloaded");
878 		return;
879 	}
880 
881 	tasklet_schedule(&sc->intr_tq);
882 }
883 
884 /**
885  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
886  * work
887  * @sc: HIF PCI Context
888  *
889  * Return: void
890  */
891 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
892 {
893 	qdf_create_work(0, &sc->reschedule_tasklet_work,
894 				reschedule_tasklet_work_handler, NULL);
895 }
896 #else
897 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
898 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
899 
900 void wlan_tasklet(unsigned long data)
901 {
902 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
903 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
904 
905 	if (scn->hif_init_done == false)
906 		goto end;
907 
908 	if (qdf_atomic_read(&scn->link_suspended))
909 		goto end;
910 
911 	if (!ADRASTEA_BU) {
912 		hif_fw_interrupt_handler(sc->irq_event, scn);
913 		if (scn->target_status == TARGET_STATUS_RESET)
914 			goto end;
915 	}
916 
917 end:
918 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
919 	qdf_atomic_dec(&scn->active_tasklet_cnt);
920 }
921 
922 /**
923  * hif_disable_power_gating() - disable HW power gating
924  * @hif_ctx: hif context
925  *
926  * disables pcie L1 power states
927  */
928 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
929 {
930 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
931 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
932 
933 	if (!scn) {
934 		hif_err("Could not disable ASPM scn is null");
935 		return;
936 	}
937 
938 	/* Disable ASPM when pkt log is enabled */
939 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
940 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
941 }
942 
943 /**
944  * hif_enable_power_gating() - enable HW power gating
945  * @hif_ctx: hif context
946  *
947  * enables pcie L1 power states
948  */
949 static void hif_enable_power_gating(struct hif_pci_softc *sc)
950 {
951 	if (!sc) {
952 		hif_err("Could not disable ASPM scn is null");
953 		return;
954 	}
955 
956 	/* Re-enable ASPM after firmware/OTP download is complete */
957 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
958 }
959 
960 /**
961  * hif_enable_power_management() - enable power management
962  * @hif_ctx: hif context
963  *
964  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
965  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
966  *
967  * note: epping mode does not call this function as it does not
968  *       care about saving power.
969  */
970 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
971 				 bool is_packet_log_enabled)
972 {
973 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
974 	uint32_t mode;
975 
976 	if (!pci_ctx) {
977 		hif_err("hif_ctx null");
978 		return;
979 	}
980 
981 	mode = hif_get_conparam(hif_sc);
982 	if (mode == QDF_GLOBAL_FTM_MODE) {
983 		hif_info("Enable power gating for FTM mode");
984 		hif_enable_power_gating(pci_ctx);
985 		return;
986 	}
987 
988 	hif_pm_runtime_start(hif_sc);
989 
990 	if (!is_packet_log_enabled)
991 		hif_enable_power_gating(pci_ctx);
992 
993 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
994 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
995 	    !ce_srng_based(hif_sc)) {
996 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
997 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
998 			hif_err("Failed to set target to sleep");
999 	}
1000 }
1001 
1002 /**
1003  * hif_disable_power_management() - disable power management
1004  * @hif_ctx: hif context
1005  *
1006  * Currently disables runtime pm. Should be updated to behave
1007  * if runtime pm is not started. Should be updated to take care
1008  * of aspm and soc sleep for driver load.
1009  */
1010 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1011 {
1012 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1013 
1014 	if (!pci_ctx) {
1015 		hif_err("hif_ctx null");
1016 		return;
1017 	}
1018 
1019 	hif_pm_runtime_stop(hif_ctx);
1020 }
1021 
1022 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1023 {
1024 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1025 
1026 	if (!pci_ctx) {
1027 		hif_err("hif_ctx null");
1028 		return;
1029 	}
1030 	hif_display_ce_stats(hif_ctx);
1031 
1032 	hif_print_pci_stats(pci_ctx);
1033 }
1034 
1035 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1036 {
1037 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1038 
1039 	if (!pci_ctx) {
1040 		hif_err("hif_ctx null");
1041 		return;
1042 	}
1043 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1044 }
1045 
1046 #define ATH_PCI_PROBE_RETRY_MAX 3
1047 /**
1048  * hif_bus_open(): hif_bus_open
1049  * @scn: scn
1050  * @bus_type: bus type
1051  *
1052  * Return: n/a
1053  */
1054 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1055 {
1056 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1057 
1058 	hif_ctx->bus_type = bus_type;
1059 	hif_pm_runtime_open(hif_ctx);
1060 
1061 	qdf_spinlock_create(&sc->irq_lock);
1062 
1063 	return hif_ce_open(hif_ctx);
1064 }
1065 
1066 /**
1067  * hif_wake_target_cpu() - wake the target's cpu
1068  * @scn: hif context
1069  *
1070  * Send an interrupt to the device to wake up the Target CPU
1071  * so it has an opportunity to notice any changed state.
1072  */
1073 static void hif_wake_target_cpu(struct hif_softc *scn)
1074 {
1075 	QDF_STATUS rv;
1076 	uint32_t core_ctrl;
1077 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1078 
1079 	rv = hif_diag_read_access(hif_hdl,
1080 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1081 				  &core_ctrl);
1082 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1083 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1084 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1085 
1086 	rv = hif_diag_write_access(hif_hdl,
1087 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1088 				   core_ctrl);
1089 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1090 }
1091 
1092 /**
1093  * soc_wake_reset() - allow the target to go to sleep
1094  * @scn: hif_softc
1095  *
1096  * Clear the force wake register.  This is done by
1097  * hif_sleep_entry and cancel defered timer sleep.
1098  */
1099 static void soc_wake_reset(struct hif_softc *scn)
1100 {
1101 	hif_write32_mb(scn, scn->mem +
1102 		PCIE_LOCAL_BASE_ADDRESS +
1103 		PCIE_SOC_WAKE_ADDRESS,
1104 		PCIE_SOC_WAKE_RESET);
1105 }
1106 
1107 /**
1108  * hif_sleep_entry() - gate target sleep
1109  * @arg: hif context
1110  *
1111  * This function is the callback for the sleep timer.
1112  * Check if last force awake critical section was at least
1113  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1114  * allow the target to go to sleep and cancel the sleep timer.
1115  * otherwise reschedule the sleep timer.
1116  */
1117 static void hif_sleep_entry(void *arg)
1118 {
1119 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1120 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1121 	uint32_t idle_ms;
1122 
1123 	if (scn->recovery)
1124 		return;
1125 
1126 	if (hif_is_driver_unloading(scn))
1127 		return;
1128 
1129 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1130 	if (hif_state->fake_sleep) {
1131 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1132 						    - hif_state->sleep_ticks);
1133 		if (!hif_state->verified_awake &&
1134 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1135 			if (!qdf_atomic_read(&scn->link_suspended)) {
1136 				soc_wake_reset(scn);
1137 				hif_state->fake_sleep = false;
1138 			}
1139 		} else {
1140 			qdf_timer_stop(&hif_state->sleep_timer);
1141 			qdf_timer_start(&hif_state->sleep_timer,
1142 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1143 		}
1144 	}
1145 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1146 }
1147 
1148 #define HIF_HIA_MAX_POLL_LOOP    1000000
1149 #define HIF_HIA_POLLING_DELAY_MS 10
1150 
1151 #ifdef QCA_HIF_HIA_EXTND
1152 
1153 static void hif_set_hia_extnd(struct hif_softc *scn)
1154 {
1155 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1156 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1157 	uint32_t target_type = tgt_info->target_type;
1158 
1159 	hif_info("E");
1160 
1161 	if ((target_type == TARGET_TYPE_AR900B) ||
1162 			target_type == TARGET_TYPE_QCA9984 ||
1163 			target_type == TARGET_TYPE_QCA9888) {
1164 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1165 		 * in RTC space
1166 		 */
1167 		tgt_info->target_revision
1168 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1169 					+ CHIP_ID_ADDRESS));
1170 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1171 			  target_type, tgt_info->target_revision);
1172 	}
1173 
1174 	{
1175 		uint32_t flag2_value = 0;
1176 		uint32_t flag2_targ_addr =
1177 			host_interest_item_address(target_type,
1178 			offsetof(struct host_interest_s, hi_skip_clock_init));
1179 
1180 		if ((ar900b_20_targ_clk != -1) &&
1181 			(frac != -1) && (intval != -1)) {
1182 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1183 				&flag2_value);
1184 			qdf_print("\n Setting clk_override");
1185 			flag2_value |= CLOCK_OVERRIDE;
1186 
1187 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1188 					flag2_value);
1189 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1190 		} else {
1191 			qdf_print("\n CLOCK PLL skipped");
1192 		}
1193 	}
1194 
1195 	if (target_type == TARGET_TYPE_AR900B
1196 			|| target_type == TARGET_TYPE_QCA9984
1197 			|| target_type == TARGET_TYPE_QCA9888) {
1198 
1199 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1200 		 * this would be supplied through module parameters,
1201 		 * if not supplied assumed default or same behavior as 1.0.
1202 		 * Assume 1.0 clock can't be tuned, reset to defaults
1203 		 */
1204 
1205 		qdf_print(KERN_INFO
1206 			  "%s: setting the target pll frac %x intval %x",
1207 			  __func__, frac, intval);
1208 
1209 		/* do not touch frac, and int val, let them be default -1,
1210 		 * if desired, host can supply these through module params
1211 		 */
1212 		if (frac != -1 || intval != -1) {
1213 			uint32_t flag2_value = 0;
1214 			uint32_t flag2_targ_addr;
1215 
1216 			flag2_targ_addr =
1217 				host_interest_item_address(target_type,
1218 				offsetof(struct host_interest_s,
1219 					hi_clock_info));
1220 			hif_diag_read_access(hif_hdl,
1221 				flag2_targ_addr, &flag2_value);
1222 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1223 				  flag2_value);
1224 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1225 			qdf_print("\n INT Val %x  Address %x",
1226 				  intval, flag2_value + 4);
1227 			hif_diag_write_access(hif_hdl,
1228 					flag2_value + 4, intval);
1229 		} else {
1230 			qdf_print(KERN_INFO
1231 				  "%s: no frac provided, skipping pre-configuring PLL",
1232 				  __func__);
1233 		}
1234 
1235 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1236 		if ((target_type == TARGET_TYPE_AR900B)
1237 			&& (tgt_info->target_revision == AR900B_REV_2)
1238 			&& ar900b_20_targ_clk != -1) {
1239 			uint32_t flag2_value = 0;
1240 			uint32_t flag2_targ_addr;
1241 
1242 			flag2_targ_addr
1243 				= host_interest_item_address(target_type,
1244 					offsetof(struct host_interest_s,
1245 					hi_desired_cpu_speed_hz));
1246 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1247 							&flag2_value);
1248 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1249 				  flag2_value);
1250 			hif_diag_write_access(hif_hdl, flag2_value,
1251 				ar900b_20_targ_clk/*300000000u*/);
1252 		} else if (target_type == TARGET_TYPE_QCA9888) {
1253 			uint32_t flag2_targ_addr;
1254 
1255 			if (200000000u != qca9888_20_targ_clk) {
1256 				qca9888_20_targ_clk = 300000000u;
1257 				/* Setting the target clock speed to 300 mhz */
1258 			}
1259 
1260 			flag2_targ_addr
1261 				= host_interest_item_address(target_type,
1262 					offsetof(struct host_interest_s,
1263 					hi_desired_cpu_speed_hz));
1264 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1265 				qca9888_20_targ_clk);
1266 		} else {
1267 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1268 				  __func__);
1269 		}
1270 	} else {
1271 		if (frac != -1 || intval != -1) {
1272 			uint32_t flag2_value = 0;
1273 			uint32_t flag2_targ_addr =
1274 				host_interest_item_address(target_type,
1275 					offsetof(struct host_interest_s,
1276 							hi_clock_info));
1277 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1278 						&flag2_value);
1279 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1280 				  flag2_value);
1281 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1282 			qdf_print("\n INT Val %x  Address %x", intval,
1283 				  flag2_value + 4);
1284 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1285 					      intval);
1286 		}
1287 	}
1288 }
1289 
1290 #else
1291 
1292 static void hif_set_hia_extnd(struct hif_softc *scn)
1293 {
1294 }
1295 
1296 #endif
1297 
1298 /**
1299  * hif_set_hia() - fill out the host interest area
1300  * @scn: hif context
1301  *
1302  * This is replaced by hif_wlan_enable for integrated targets.
1303  * This fills out the host interest area.  The firmware will
1304  * process these memory addresses when it is first brought out
1305  * of reset.
1306  *
1307  * Return: 0 for success.
1308  */
1309 static int hif_set_hia(struct hif_softc *scn)
1310 {
1311 	QDF_STATUS rv;
1312 	uint32_t interconnect_targ_addr = 0;
1313 	uint32_t pcie_state_targ_addr = 0;
1314 	uint32_t pipe_cfg_targ_addr = 0;
1315 	uint32_t svc_to_pipe_map = 0;
1316 	uint32_t pcie_config_flags = 0;
1317 	uint32_t flag2_value = 0;
1318 	uint32_t flag2_targ_addr = 0;
1319 #ifdef QCA_WIFI_3_0
1320 	uint32_t host_interest_area = 0;
1321 	uint8_t i;
1322 #else
1323 	uint32_t ealloc_value = 0;
1324 	uint32_t ealloc_targ_addr = 0;
1325 	uint8_t banks_switched = 1;
1326 	uint32_t chip_id;
1327 #endif
1328 	uint32_t pipe_cfg_addr;
1329 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1330 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1331 	uint32_t target_type = tgt_info->target_type;
1332 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1333 	static struct CE_pipe_config *target_ce_config;
1334 	struct service_to_pipe *target_service_to_ce_map;
1335 
1336 	hif_info("E");
1337 
1338 	hif_get_target_ce_config(scn,
1339 				 &target_ce_config, &target_ce_config_sz,
1340 				 &target_service_to_ce_map,
1341 				 &target_service_to_ce_map_sz,
1342 				 NULL, NULL);
1343 
1344 	if (ADRASTEA_BU)
1345 		return 0;
1346 
1347 #ifdef QCA_WIFI_3_0
1348 	i = 0;
1349 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1350 		host_interest_area = hif_read32_mb(scn, scn->mem +
1351 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1352 		if ((host_interest_area & 0x01) == 0) {
1353 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1354 			host_interest_area = 0;
1355 			i++;
1356 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1357 				hif_err("poll timeout: %d", i);
1358 		} else {
1359 			host_interest_area &= (~0x01);
1360 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1361 			break;
1362 		}
1363 	}
1364 
1365 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1366 		hif_err("hia polling timeout");
1367 		return -EIO;
1368 	}
1369 
1370 	if (host_interest_area == 0) {
1371 		hif_err("host_interest_area = 0");
1372 		return -EIO;
1373 	}
1374 
1375 	interconnect_targ_addr = host_interest_area +
1376 			offsetof(struct host_interest_area_t,
1377 			hi_interconnect_state);
1378 
1379 	flag2_targ_addr = host_interest_area +
1380 			offsetof(struct host_interest_area_t, hi_option_flag2);
1381 
1382 #else
1383 	interconnect_targ_addr = hif_hia_item_address(target_type,
1384 		offsetof(struct host_interest_s, hi_interconnect_state));
1385 	ealloc_targ_addr = hif_hia_item_address(target_type,
1386 		offsetof(struct host_interest_s, hi_early_alloc));
1387 	flag2_targ_addr = hif_hia_item_address(target_type,
1388 		offsetof(struct host_interest_s, hi_option_flag2));
1389 #endif
1390 	/* Supply Target-side CE configuration */
1391 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1392 			  &pcie_state_targ_addr);
1393 	if (rv != QDF_STATUS_SUCCESS) {
1394 		hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
1395 			interconnect_targ_addr, rv);
1396 		goto done;
1397 	}
1398 	if (pcie_state_targ_addr == 0) {
1399 		rv = QDF_STATUS_E_FAILURE;
1400 		hif_err("pcie state addr is 0");
1401 		goto done;
1402 	}
1403 	pipe_cfg_addr = pcie_state_targ_addr +
1404 			  offsetof(struct pcie_state_s,
1405 			  pipe_cfg_addr);
1406 	rv = hif_diag_read_access(hif_hdl,
1407 			  pipe_cfg_addr,
1408 			  &pipe_cfg_targ_addr);
1409 	if (rv != QDF_STATUS_SUCCESS) {
1410 		hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
1411 		goto done;
1412 	}
1413 	if (pipe_cfg_targ_addr == 0) {
1414 		rv = QDF_STATUS_E_FAILURE;
1415 		hif_err("pipe cfg addr is 0");
1416 		goto done;
1417 	}
1418 
1419 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1420 			(uint8_t *) target_ce_config,
1421 			target_ce_config_sz);
1422 
1423 	if (rv != QDF_STATUS_SUCCESS) {
1424 		hif_err("write pipe cfg: %d", rv);
1425 		goto done;
1426 	}
1427 
1428 	rv = hif_diag_read_access(hif_hdl,
1429 			  pcie_state_targ_addr +
1430 			  offsetof(struct pcie_state_s,
1431 			   svc_to_pipe_map),
1432 			  &svc_to_pipe_map);
1433 	if (rv != QDF_STATUS_SUCCESS) {
1434 		hif_err("get svc/pipe map: %d", rv);
1435 		goto done;
1436 	}
1437 	if (svc_to_pipe_map == 0) {
1438 		rv = QDF_STATUS_E_FAILURE;
1439 		hif_err("svc_to_pipe map is 0");
1440 		goto done;
1441 	}
1442 
1443 	rv = hif_diag_write_mem(hif_hdl,
1444 			svc_to_pipe_map,
1445 			(uint8_t *) target_service_to_ce_map,
1446 			target_service_to_ce_map_sz);
1447 	if (rv != QDF_STATUS_SUCCESS) {
1448 		hif_err("write svc/pipe map: %d", rv);
1449 		goto done;
1450 	}
1451 
1452 	rv = hif_diag_read_access(hif_hdl,
1453 			pcie_state_targ_addr +
1454 			offsetof(struct pcie_state_s,
1455 			config_flags),
1456 			&pcie_config_flags);
1457 	if (rv != QDF_STATUS_SUCCESS) {
1458 		hif_err("get pcie config_flags: %d", rv);
1459 		goto done;
1460 	}
1461 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1462 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1463 #else
1464 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1465 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1466 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1467 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1468 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1469 #endif
1470 	rv = hif_diag_write_mem(hif_hdl,
1471 			pcie_state_targ_addr +
1472 			offsetof(struct pcie_state_s,
1473 			config_flags),
1474 			(uint8_t *) &pcie_config_flags,
1475 			sizeof(pcie_config_flags));
1476 	if (rv != QDF_STATUS_SUCCESS) {
1477 		hif_err("write pcie config_flags: %d", rv);
1478 		goto done;
1479 	}
1480 
1481 #ifndef QCA_WIFI_3_0
1482 	/* configure early allocation */
1483 	ealloc_targ_addr = hif_hia_item_address(target_type,
1484 						offsetof(
1485 						struct host_interest_s,
1486 						hi_early_alloc));
1487 
1488 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1489 			&ealloc_value);
1490 	if (rv != QDF_STATUS_SUCCESS) {
1491 		hif_err("get early alloc val: %d", rv);
1492 		goto done;
1493 	}
1494 
1495 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1496 	ealloc_value |=
1497 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1498 		 HI_EARLY_ALLOC_MAGIC_MASK);
1499 
1500 	rv = hif_diag_read_access(hif_hdl,
1501 			  CHIP_ID_ADDRESS |
1502 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1503 	if (rv != QDF_STATUS_SUCCESS) {
1504 		hif_err("get chip id val: %d", rv);
1505 		goto done;
1506 	}
1507 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1508 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1509 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1510 		case 0x2:       /* ROME 1.3 */
1511 			/* 2 banks are switched to IRAM */
1512 			banks_switched = 2;
1513 			break;
1514 		case 0x4:       /* ROME 2.1 */
1515 		case 0x5:       /* ROME 2.2 */
1516 			banks_switched = 6;
1517 			break;
1518 		case 0x8:       /* ROME 3.0 */
1519 		case 0x9:       /* ROME 3.1 */
1520 		case 0xA:       /* ROME 3.2 */
1521 			banks_switched = 9;
1522 			break;
1523 		case 0x0:       /* ROME 1.0 */
1524 		case 0x1:       /* ROME 1.1 */
1525 		default:
1526 			/* 3 banks are switched to IRAM */
1527 			banks_switched = 3;
1528 			break;
1529 		}
1530 	}
1531 
1532 	ealloc_value |=
1533 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1534 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1535 
1536 	rv = hif_diag_write_access(hif_hdl,
1537 				ealloc_targ_addr,
1538 				ealloc_value);
1539 	if (rv != QDF_STATUS_SUCCESS) {
1540 		hif_err("set early alloc val: %d", rv);
1541 		goto done;
1542 	}
1543 #endif
1544 	if ((target_type == TARGET_TYPE_AR900B)
1545 			|| (target_type == TARGET_TYPE_QCA9984)
1546 			|| (target_type == TARGET_TYPE_QCA9888)
1547 			|| (target_type == TARGET_TYPE_AR9888)) {
1548 		hif_set_hia_extnd(scn);
1549 	}
1550 
1551 	/* Tell Target to proceed with initialization */
1552 	flag2_targ_addr = hif_hia_item_address(target_type,
1553 						offsetof(
1554 						struct host_interest_s,
1555 						hi_option_flag2));
1556 
1557 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1558 			  &flag2_value);
1559 	if (rv != QDF_STATUS_SUCCESS) {
1560 		hif_err("get option val: %d", rv);
1561 		goto done;
1562 	}
1563 
1564 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1565 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1566 			   flag2_value);
1567 	if (rv != QDF_STATUS_SUCCESS) {
1568 		hif_err("set option val: %d", rv);
1569 		goto done;
1570 	}
1571 
1572 	hif_wake_target_cpu(scn);
1573 
1574 done:
1575 
1576 	return qdf_status_to_os_return(rv);
1577 }
1578 
1579 /**
1580  * hif_bus_configure() - configure the pcie bus
1581  * @hif_sc: pointer to the hif context.
1582  *
1583  * return: 0 for success. nonzero for failure.
1584  */
1585 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1586 {
1587 	int status = 0;
1588 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1589 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1590 
1591 	hif_ce_prepare_config(hif_sc);
1592 
1593 	/* initialize sleep state adjust variables */
1594 	hif_state->sleep_timer_init = true;
1595 	hif_state->keep_awake_count = 0;
1596 	hif_state->fake_sleep = false;
1597 	hif_state->sleep_ticks = 0;
1598 
1599 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1600 			       hif_sleep_entry, (void *)hif_state,
1601 			       QDF_TIMER_TYPE_WAKE_APPS);
1602 	hif_state->sleep_timer_init = true;
1603 
1604 	status = hif_wlan_enable(hif_sc);
1605 	if (status) {
1606 		hif_err("hif_wlan_enable error: %d", status);
1607 		goto timer_free;
1608 	}
1609 
1610 	A_TARGET_ACCESS_LIKELY(hif_sc);
1611 
1612 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1613 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1614 	    !ce_srng_based(hif_sc)) {
1615 		/*
1616 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1617 		 * prevent sleep when we want to keep firmware always awake
1618 		 * note: when we want to keep firmware always awake,
1619 		 *       hif_target_sleep_state_adjust will point to a dummy
1620 		 *       function, and hif_pci_target_sleep_state_adjust must
1621 		 *       be called instead.
1622 		 * note: bus type check is here because AHB bus is reusing
1623 		 *       hif_pci_bus_configure code.
1624 		 */
1625 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1626 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1627 					false, true) < 0) {
1628 				status = -EACCES;
1629 				goto disable_wlan;
1630 			}
1631 		}
1632 	}
1633 
1634 	/* todo: consider replacing this with an srng field */
1635 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1636 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1637 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1638 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1639 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1640 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1641 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1642 		hif_sc->per_ce_irq = true;
1643 	}
1644 
1645 	status = hif_config_ce(hif_sc);
1646 	if (status)
1647 		goto disable_wlan;
1648 
1649 	if (hif_needs_bmi(hif_osc)) {
1650 		status = hif_set_hia(hif_sc);
1651 		if (status)
1652 			goto unconfig_ce;
1653 
1654 		hif_debug("hif_set_hia done");
1655 
1656 	}
1657 
1658 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1659 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1660 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1661 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1662 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1663 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1664 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
1665 		hif_debug("Skip irq config for PCI based 8074 target");
1666 	else {
1667 		status = hif_configure_irq(hif_sc);
1668 		if (status < 0)
1669 			goto unconfig_ce;
1670 	}
1671 
1672 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1673 
1674 	return status;
1675 
1676 unconfig_ce:
1677 	hif_unconfig_ce(hif_sc);
1678 disable_wlan:
1679 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1680 	hif_wlan_disable(hif_sc);
1681 
1682 timer_free:
1683 	qdf_timer_stop(&hif_state->sleep_timer);
1684 	qdf_timer_free(&hif_state->sleep_timer);
1685 	hif_state->sleep_timer_init = false;
1686 
1687 	hif_err("Failed, status: %d", status);
1688 	return status;
1689 }
1690 
1691 /**
1692  * hif_bus_close(): hif_bus_close
1693  *
1694  * Return: n/a
1695  */
1696 void hif_pci_close(struct hif_softc *hif_sc)
1697 {
1698 	hif_pm_runtime_close(hif_sc);
1699 	hif_ce_close(hif_sc);
1700 }
1701 
1702 #define BAR_NUM 0
1703 
1704 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
1705 				struct pci_dev *pdev,
1706 				const struct pci_device_id *id)
1707 {
1708 	void __iomem *mem;
1709 	int ret = 0;
1710 	uint16_t device_id = 0;
1711 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1712 
1713 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1714 	if (device_id != id->device)  {
1715 		hif_err(
1716 		   "dev id mismatch, config id = 0x%x, probing id = 0x%x",
1717 		   device_id, id->device);
1718 		/* pci link is down, so returing with error code */
1719 		return -EIO;
1720 	}
1721 
1722 	/* FIXME: temp. commenting out assign_resource
1723 	 * call for dev_attach to work on 2.6.38 kernel
1724 	 */
1725 #if (!defined(__LINUX_ARM_ARCH__))
1726 	if (pci_assign_resource(pdev, BAR_NUM)) {
1727 		hif_err("pci_assign_resource error");
1728 		return -EIO;
1729 	}
1730 #endif
1731 	if (pci_enable_device(pdev)) {
1732 		hif_err("pci_enable_device error");
1733 		return -EIO;
1734 	}
1735 
1736 	/* Request MMIO resources */
1737 	ret = pci_request_region(pdev, BAR_NUM, "ath");
1738 	if (ret) {
1739 		hif_err("PCI MMIO reservation error");
1740 		ret = -EIO;
1741 		goto err_region;
1742 	}
1743 
1744 #ifdef CONFIG_ARM_LPAE
1745 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1746 	 * for 32 bits device also.
1747 	 */
1748 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1749 	if (ret) {
1750 		hif_err("Cannot enable 64-bit pci DMA");
1751 		goto err_dma;
1752 	}
1753 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1754 	if (ret) {
1755 		hif_err("Cannot enable 64-bit DMA");
1756 		goto err_dma;
1757 	}
1758 #else
1759 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1760 	if (ret) {
1761 		hif_err("Cannot enable 32-bit pci DMA");
1762 		goto err_dma;
1763 	}
1764 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1765 	if (ret) {
1766 		hif_err("Cannot enable 32-bit consistent DMA!");
1767 		goto err_dma;
1768 	}
1769 #endif
1770 
1771 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1772 
1773 	/* Set bus master bit in PCI_COMMAND to enable DMA */
1774 	pci_set_master(pdev);
1775 
1776 	/* Arrange for access to Target SoC registers. */
1777 	mem = pci_iomap(pdev, BAR_NUM, 0);
1778 	if (!mem) {
1779 		hif_err("PCI iomap error");
1780 		ret = -EIO;
1781 		goto err_iomap;
1782 	}
1783 
1784 	hif_info("*****BAR is %pK", (void *)mem);
1785 
1786 	sc->mem = mem;
1787 
1788 	/* Hawkeye emulation specific change */
1789 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
1790 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
1791 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
1792 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
1793 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
1794 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
1795 		mem = mem + 0x0c000000;
1796 		sc->mem = mem;
1797 		hif_info("Changing PCI mem base to %pK", sc->mem);
1798 	}
1799 
1800 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
1801 	ol_sc->mem = mem;
1802 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
1803 	sc->pci_enabled = true;
1804 	return ret;
1805 
1806 err_iomap:
1807 	pci_clear_master(pdev);
1808 err_dma:
1809 	pci_release_region(pdev, BAR_NUM);
1810 err_region:
1811 	pci_disable_device(pdev);
1812 	return ret;
1813 }
1814 
1815 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
1816 			      struct pci_dev *pdev,
1817 			      const struct pci_device_id *id)
1818 {
1819 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1820 	sc->pci_enabled = true;
1821 	return 0;
1822 }
1823 
1824 
1825 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
1826 {
1827 	pci_disable_msi(sc->pdev);
1828 	pci_iounmap(sc->pdev, sc->mem);
1829 	pci_clear_master(sc->pdev);
1830 	pci_release_region(sc->pdev, BAR_NUM);
1831 	pci_disable_device(sc->pdev);
1832 }
1833 
1834 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
1835 
1836 static void hif_disable_pci(struct hif_pci_softc *sc)
1837 {
1838 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1839 
1840 	if (!ol_sc) {
1841 		hif_err("ol_sc = NULL");
1842 		return;
1843 	}
1844 	hif_pci_device_reset(sc);
1845 	sc->hif_pci_deinit(sc);
1846 
1847 	sc->mem = NULL;
1848 	ol_sc->mem = NULL;
1849 }
1850 
1851 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1852 {
1853 	int ret = 0;
1854 	int targ_awake_limit = 500;
1855 #ifndef QCA_WIFI_3_0
1856 	uint32_t fw_indicator;
1857 #endif
1858 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1859 
1860 	/*
1861 	 * Verify that the Target was started cleanly.*
1862 	 * The case where this is most likely is with an AUX-powered
1863 	 * Target and a Host in WoW mode. If the Host crashes,
1864 	 * loses power, or is restarted (without unloading the driver)
1865 	 * then the Target is left (aux) powered and running.  On a
1866 	 * subsequent driver load, the Target is in an unexpected state.
1867 	 * We try to catch that here in order to reset the Target and
1868 	 * retry the probe.
1869 	 */
1870 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1871 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1872 	while (!hif_targ_is_awake(scn, sc->mem)) {
1873 		if (0 == targ_awake_limit) {
1874 			hif_err("target awake timeout");
1875 			ret = -EAGAIN;
1876 			goto end;
1877 		}
1878 		qdf_mdelay(1);
1879 		targ_awake_limit--;
1880 	}
1881 
1882 #if PCIE_BAR0_READY_CHECKING
1883 	{
1884 		int wait_limit = 200;
1885 		/* Synchronization point: wait the BAR0 is configured */
1886 		while (wait_limit-- &&
1887 			   !(hif_read32_mb(sc, c->mem +
1888 					  PCIE_LOCAL_BASE_ADDRESS +
1889 					  PCIE_SOC_RDY_STATUS_ADDRESS)
1890 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
1891 			qdf_mdelay(10);
1892 		}
1893 		if (wait_limit < 0) {
1894 			/* AR6320v1 doesn't support checking of BAR0
1895 			 * configuration, takes one sec to wait BAR0 ready
1896 			 */
1897 			hif_debug("AR6320v1 waits two sec for BAR0");
1898 		}
1899 	}
1900 #endif
1901 
1902 #ifndef QCA_WIFI_3_0
1903 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
1904 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1905 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
1906 
1907 	if (fw_indicator & FW_IND_INITIALIZED) {
1908 		hif_err("Target is in an unknown state. EAGAIN");
1909 		ret = -EAGAIN;
1910 		goto end;
1911 	}
1912 #endif
1913 
1914 end:
1915 	return ret;
1916 }
1917 
1918 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
1919 {
1920 	int ret = 0;
1921 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1922 	uint32_t target_type = scn->target_info.target_type;
1923 
1924 	hif_info("E");
1925 
1926 	/* do notn support MSI or MSI IRQ failed */
1927 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
1928 	ret = request_irq(sc->pdev->irq,
1929 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
1930 			  "wlan_pci", sc);
1931 	if (ret) {
1932 		hif_err("request_irq failed, ret: %d", ret);
1933 		goto end;
1934 	}
1935 	scn->wake_irq = sc->pdev->irq;
1936 	/* Use sc->irq instead of sc->pdev-irq
1937 	 * platform_device pdev doesn't have an irq field
1938 	 */
1939 	sc->irq = sc->pdev->irq;
1940 	/* Use Legacy PCI Interrupts */
1941 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
1942 		  PCIE_INTR_ENABLE_ADDRESS),
1943 		  HOST_GROUP0_MASK);
1944 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
1945 			       PCIE_INTR_ENABLE_ADDRESS));
1946 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1947 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
1948 
1949 	if ((target_type == TARGET_TYPE_IPQ4019) ||
1950 			(target_type == TARGET_TYPE_AR900B)  ||
1951 			(target_type == TARGET_TYPE_QCA9984) ||
1952 			(target_type == TARGET_TYPE_AR9888) ||
1953 			(target_type == TARGET_TYPE_QCA9888) ||
1954 			(target_type == TARGET_TYPE_AR6320V1) ||
1955 			(target_type == TARGET_TYPE_AR6320V2) ||
1956 			(target_type == TARGET_TYPE_AR6320V3)) {
1957 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
1958 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1959 	}
1960 end:
1961 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
1962 			  "%s: X, ret = %d", __func__, ret);
1963 	return ret;
1964 }
1965 
1966 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
1967 {
1968 	int ret;
1969 	int ce_id, irq, irq_id;
1970 	uint32_t msi_data_start;
1971 	uint32_t msi_data_count;
1972 	uint32_t msi_irq_start;
1973 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
1974 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
1975 
1976 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
1977 					    &msi_data_count, &msi_data_start,
1978 					    &msi_irq_start);
1979 	if (ret)
1980 		return ret;
1981 
1982 	/* needs to match the ce_id -> irq data mapping
1983 	 * used in the srng parameter configuration
1984 	 */
1985 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1986 		unsigned int msi_data;
1987 
1988 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
1989 			continue;
1990 
1991 		if (!ce_sc->tasklets[ce_id].inited)
1992 			continue;
1993 
1994 		irq_id = scn->int_assignment->msi_idx[ce_id];
1995 		msi_data = irq_id + msi_irq_start;
1996 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
1997 
1998 		hif_ce_irq_remove_affinity_hint(irq);
1999 
2000 		hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d)",
2001 			  __func__, irq_id, ce_id, msi_data, irq);
2002 
2003 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2004 	}
2005 
2006 	return ret;
2007 }
2008 
2009 void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2010 {
2011 	int i, j, irq;
2012 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2013 	struct hif_exec_context *hif_ext_group;
2014 
2015 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2016 		hif_ext_group = hif_state->hif_ext_group[i];
2017 		if (hif_ext_group->irq_requested) {
2018 			hif_ext_group->irq_requested = false;
2019 			for (j = 0; j < hif_ext_group->numirq; j++) {
2020 				irq = hif_ext_group->os_irq[j];
2021 				if (scn->irq_unlazy_disable) {
2022 					qdf_dev_clear_irq_status_flags(
2023 							irq,
2024 							QDF_IRQ_DISABLE_UNLAZY);
2025 				}
2026 				pfrm_free_irq(scn->qdf_dev->dev,
2027 					      irq, hif_ext_group);
2028 			}
2029 			hif_ext_group->numirq = 0;
2030 		}
2031 	}
2032 }
2033 
2034 /**
2035  * hif_nointrs(): disable IRQ
2036  *
2037  * This function stops interrupt(s)
2038  *
2039  * @scn: struct hif_softc
2040  *
2041  * Return: none
2042  */
2043 void hif_pci_nointrs(struct hif_softc *scn)
2044 {
2045 	int i, ret;
2046 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2047 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2048 
2049 	scn->free_irq_done = true;
2050 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2051 
2052 	if (scn->request_irq_done == false)
2053 		return;
2054 
2055 	hif_pci_deconfigure_grp_irq(scn);
2056 
2057 	ret = hif_ce_srng_msi_free_irq(scn);
2058 	if (ret != -EINVAL) {
2059 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2060 
2061 		if (scn->wake_irq)
2062 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2063 		scn->wake_irq = 0;
2064 	} else if (sc->num_msi_intrs > 0) {
2065 		/* MSI interrupt(s) */
2066 		for (i = 0; i < sc->num_msi_intrs; i++)
2067 			free_irq(sc->irq + i, sc);
2068 		sc->num_msi_intrs = 0;
2069 	} else {
2070 		/* Legacy PCI line interrupt
2071 		 * Use sc->irq instead of sc->pdev-irq
2072 		 * platform_device pdev doesn't have an irq field
2073 		 */
2074 		free_irq(sc->irq, sc);
2075 	}
2076 	scn->request_irq_done = false;
2077 }
2078 
2079 static inline
2080 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2081 {
2082 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2083 		return true;
2084 	else
2085 		return false;
2086 }
2087 /**
2088  * hif_disable_bus(): hif_disable_bus
2089  *
2090  * This function disables the bus
2091  *
2092  * @bdev: bus dev
2093  *
2094  * Return: none
2095  */
2096 void hif_pci_disable_bus(struct hif_softc *scn)
2097 {
2098 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2099 	struct pci_dev *pdev;
2100 	void __iomem *mem;
2101 	struct hif_target_info *tgt_info = &scn->target_info;
2102 
2103 	/* Attach did not succeed, all resources have been
2104 	 * freed in error handler
2105 	 */
2106 	if (!sc)
2107 		return;
2108 
2109 	pdev = sc->pdev;
2110 	if (hif_pci_default_link_up(tgt_info)) {
2111 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2112 
2113 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2114 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2115 			       HOST_GROUP0_MASK);
2116 	}
2117 
2118 #if defined(CPU_WARM_RESET_WAR)
2119 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2120 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2121 	 * verified for AR9888_REV1
2122 	 */
2123 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2124 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2125 		hif_pci_device_warm_reset(sc);
2126 	else
2127 		hif_pci_device_reset(sc);
2128 #else
2129 	hif_pci_device_reset(sc);
2130 #endif
2131 	mem = (void __iomem *)sc->mem;
2132 	if (mem) {
2133 		hif_dump_pipe_debug_count(scn);
2134 		if (scn->athdiag_procfs_inited) {
2135 			athdiag_procfs_remove();
2136 			scn->athdiag_procfs_inited = false;
2137 		}
2138 		sc->hif_pci_deinit(sc);
2139 		scn->mem = NULL;
2140 	}
2141 	hif_info("X");
2142 }
2143 
2144 #ifdef FEATURE_RUNTIME_PM
2145 /**
2146  * hif_pci_get_rpm_ctx() - Map corresponding hif_runtime_pm_ctx
2147  * @scn: hif context
2148  *
2149  * This function will map and return the corresponding
2150  * hif_runtime_pm_ctx based on pcie interface.
2151  *
2152  * Return: struct hif_runtime_pm_ctx pointer
2153  */
2154 struct hif_runtime_pm_ctx *hif_pci_get_rpm_ctx(struct hif_softc *scn)
2155 {
2156 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2157 
2158 	return &sc->rpm_ctx;
2159 }
2160 
2161 /**
2162  * hif_pci_get_dev() - Map corresponding device structure
2163  * @scn: hif context
2164  *
2165  * This function will map and return the corresponding
2166  * device structure based on pcie interface.
2167  *
2168  * Return: struct device pointer
2169  */
2170 struct device *hif_pci_get_dev(struct hif_softc *scn)
2171 {
2172 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2173 
2174 	return sc->dev;
2175 }
2176 #endif
2177 
2178 #define OL_ATH_PCI_PM_CONTROL 0x44
2179 
2180 #ifdef CONFIG_PLD_PCIE_CNSS
2181 /**
2182  * hif_pci_prevent_linkdown(): allow or permit linkdown
2183  * @flag: true prevents linkdown, false allows
2184  *
2185  * Calls into the platform driver to vote against taking down the
2186  * pcie link.
2187  *
2188  * Return: n/a
2189  */
2190 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2191 {
2192 	int errno;
2193 
2194 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2195 	hif_runtime_prevent_linkdown(scn, flag);
2196 
2197 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2198 	if (errno)
2199 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
2200 }
2201 #else
2202 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2203 {
2204 }
2205 #endif
2206 
2207 /**
2208  * hif_pci_bus_suspend(): prepare hif for suspend
2209  *
2210  * Return: Errno
2211  */
2212 int hif_pci_bus_suspend(struct hif_softc *scn)
2213 {
2214 	QDF_STATUS ret;
2215 
2216 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2217 
2218 	ret = hif_try_complete_tasks(scn);
2219 	if (QDF_IS_STATUS_ERROR(ret)) {
2220 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2221 		return -EBUSY;
2222 	}
2223 
2224 	/* Stop the HIF Sleep Timer */
2225 	hif_cancel_deferred_target_sleep(scn);
2226 
2227 	scn->bus_suspended = true;
2228 
2229 	return 0;
2230 }
2231 
2232 #ifdef PCI_LINK_STATUS_SANITY
2233 /**
2234  * __hif_check_link_status() - API to check if PCIe link is active/not
2235  * @scn: HIF Context
2236  *
2237  * API reads the PCIe config space to verify if PCIe link training is
2238  * successful or not.
2239  *
2240  * Return: Success/Failure
2241  */
2242 static int __hif_check_link_status(struct hif_softc *scn)
2243 {
2244 	uint16_t dev_id = 0;
2245 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2246 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2247 
2248 	if (!sc) {
2249 		hif_err("HIF Bus Context is Invalid");
2250 		return -EINVAL;
2251 	}
2252 
2253 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2254 
2255 	if (dev_id == sc->devid)
2256 		return 0;
2257 
2258 	hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2259 	       dev_id);
2260 
2261 	scn->recovery = true;
2262 
2263 	if (cbk && cbk->set_recovery_in_progress)
2264 		cbk->set_recovery_in_progress(cbk->context, true);
2265 	else
2266 		hif_err("Driver Global Recovery is not set");
2267 
2268 	pld_is_pci_link_down(sc->dev);
2269 	return -EACCES;
2270 }
2271 #else
2272 static inline int __hif_check_link_status(struct hif_softc *scn)
2273 {
2274 	return 0;
2275 }
2276 #endif
2277 
2278 
2279 #ifdef HIF_BUS_LOG_INFO
2280 bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
2281 		       unsigned int *offset)
2282 {
2283 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2284 	struct hang_event_bus_info info = {0};
2285 	size_t size;
2286 
2287 	if (!sc) {
2288 		hif_err("HIF Bus Context is Invalid");
2289 		return false;
2290 	}
2291 
2292 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
2293 
2294 	size = sizeof(info);
2295 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
2296 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
2297 
2298 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
2299 		return false;
2300 
2301 	qdf_mem_copy(data + *offset, &info, size);
2302 	*offset = *offset + size;
2303 
2304 	if (info.dev_id == sc->devid)
2305 		return false;
2306 
2307 	qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
2308 	qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
2309 			     (QDF_WLAN_HANG_FW_OFFSET - size));
2310 	return true;
2311 }
2312 #endif
2313 
2314 /**
2315  * hif_pci_bus_resume(): prepare hif for resume
2316  *
2317  * Return: Errno
2318  */
2319 int hif_pci_bus_resume(struct hif_softc *scn)
2320 {
2321 	int errno;
2322 
2323 	scn->bus_suspended = false;
2324 
2325 	errno = __hif_check_link_status(scn);
2326 	if (errno)
2327 		return errno;
2328 
2329 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2330 
2331 	return 0;
2332 }
2333 
2334 /**
2335  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2336  * @scn: hif context
2337  *
2338  * Ensure that if we received the wakeup message before the irq
2339  * was disabled that the message is pocessed before suspending.
2340  *
2341  * Return: -EBUSY if we fail to flush the tasklets.
2342  */
2343 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2344 {
2345 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2346 		qdf_atomic_set(&scn->link_suspended, 1);
2347 
2348 	hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
2349 
2350 	return 0;
2351 }
2352 
2353 /**
2354  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2355  * @scn: hif context
2356  *
2357  * Ensure that if we received the wakeup message before the irq
2358  * was disabled that the message is pocessed before suspending.
2359  *
2360  * Return: -EBUSY if we fail to flush the tasklets.
2361  */
2362 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2363 {
2364 	hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
2365 
2366 	/* a vote for link up can come in the middle of the ongoing resume
2367 	 * process. hence, clear the link suspend flag once
2368 	 * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
2369 	 * by this time
2370 	 */
2371 	qdf_atomic_set(&scn->link_suspended, 0);
2372 
2373 	return 0;
2374 }
2375 
2376 #if CONFIG_PCIE_64BIT_MSI
2377 static void hif_free_msi_ctx(struct hif_softc *scn)
2378 {
2379 	struct hif_pci_softc *sc = scn->hif_sc;
2380 	struct hif_msi_info *info = &sc->msi_info;
2381 	struct device *dev = scn->qdf_dev->dev;
2382 
2383 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2384 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2385 	info->magic = NULL;
2386 	info->magic_dma = 0;
2387 }
2388 #else
2389 static void hif_free_msi_ctx(struct hif_softc *scn)
2390 {
2391 }
2392 #endif
2393 
2394 void hif_pci_disable_isr(struct hif_softc *scn)
2395 {
2396 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2397 
2398 	hif_exec_kill(&scn->osc);
2399 	hif_nointrs(scn);
2400 	hif_free_msi_ctx(scn);
2401 	/* Cancel the pending tasklet */
2402 	ce_tasklet_kill(scn);
2403 	tasklet_kill(&sc->intr_tq);
2404 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2405 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2406 }
2407 
2408 /* Function to reset SoC */
2409 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2410 {
2411 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2412 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2413 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2414 
2415 #if defined(CPU_WARM_RESET_WAR)
2416 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2417 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2418 	 * verified for AR9888_REV1
2419 	 */
2420 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2421 		hif_pci_device_warm_reset(sc);
2422 	else
2423 		hif_pci_device_reset(sc);
2424 #else
2425 	hif_pci_device_reset(sc);
2426 #endif
2427 }
2428 
2429 /**
2430  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2431  * @sc: HIF PCIe Context
2432  *
2433  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2434  *
2435  * Return: Failure to caller
2436  */
2437 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2438 {
2439 	uint16_t val = 0;
2440 	uint32_t bar = 0;
2441 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2442 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2443 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2444 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2445 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2446 	A_target_id_t pci_addr = scn->mem;
2447 
2448 	hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
2449 
2450 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2451 
2452 	hif_info("PCI Vendor ID = 0x%04x", val);
2453 
2454 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2455 
2456 	hif_info("PCI Device ID = 0x%04x", val);
2457 
2458 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
2459 
2460 	hif_info("PCI Command = 0x%04x", val);
2461 
2462 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
2463 
2464 	hif_info("PCI Status = 0x%04x", val);
2465 
2466 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2467 
2468 	hif_info("PCI BAR 0 = 0x%08x", bar);
2469 
2470 	hif_info("SOC_WAKE_ADDR 0%08x",
2471 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2472 				PCIE_SOC_WAKE_ADDRESS));
2473 
2474 	hif_info("RTC_STATE_ADDR 0x%08x",
2475 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2476 							RTC_STATE_ADDRESS));
2477 
2478 	hif_info("wakeup target");
2479 
2480 	if (!cfg->enable_self_recovery)
2481 		QDF_BUG(0);
2482 
2483 	scn->recovery = true;
2484 
2485 	if (cbk->set_recovery_in_progress)
2486 		cbk->set_recovery_in_progress(cbk->context, true);
2487 
2488 	pld_is_pci_link_down(sc->dev);
2489 	return -EACCES;
2490 }
2491 
2492 /*
2493  * For now, we use simple on-demand sleep/wake.
2494  * Some possible improvements:
2495  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2496  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2497  *   Careful, though, these functions may be used by
2498  *  interrupt handlers ("atomic")
2499  *  -Don't use host_reg_table for this code; instead use values directly
2500  *  -Use a separate timer to track activity and allow Target to sleep only
2501  *   if it hasn't done anything for a while; may even want to delay some
2502  *   processing for a short while in order to "batch" (e.g.) transmit
2503  *   requests with completion processing into "windows of up time".  Costs
2504  *   some performance, but improves power utilization.
2505  *  -On some platforms, it might be possible to eliminate explicit
2506  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
2507  *   recover from the failure by forcing the Target awake.
2508  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
2509  *   overhead in some cases. Perhaps this makes more sense when
2510  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2511  *   disabled.
2512  *  -It is possible to compile this code out and simply force the Target
2513  *   to remain awake.  That would yield optimal performance at the cost of
2514  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2515  *
2516  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2517  */
2518 /**
2519  * hif_target_sleep_state_adjust() - on-demand sleep/wake
2520  * @scn: hif_softc pointer.
2521  * @sleep_ok: bool
2522  * @wait_for_it: bool
2523  *
2524  * Output the pipe error counts of each pipe to log file
2525  *
2526  * Return: int
2527  */
2528 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
2529 			      bool sleep_ok, bool wait_for_it)
2530 {
2531 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2532 	A_target_id_t pci_addr = scn->mem;
2533 	static int max_delay;
2534 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2535 	static int debug;
2536 	if (scn->recovery)
2537 		return -EACCES;
2538 
2539 	if (qdf_atomic_read(&scn->link_suspended)) {
2540 		hif_err("Invalid access, PCIe link is down");
2541 		debug = true;
2542 		QDF_ASSERT(0);
2543 		return -EACCES;
2544 	}
2545 
2546 	if (debug) {
2547 		wait_for_it = true;
2548 		hif_err("Invalid access, PCIe link is suspended");
2549 		QDF_ASSERT(0);
2550 	}
2551 
2552 	if (sleep_ok) {
2553 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2554 		hif_state->keep_awake_count--;
2555 		if (hif_state->keep_awake_count == 0) {
2556 			/* Allow sleep */
2557 			hif_state->verified_awake = false;
2558 			hif_state->sleep_ticks = qdf_system_ticks();
2559 		}
2560 		if (hif_state->fake_sleep == false) {
2561 			/* Set the Fake Sleep */
2562 			hif_state->fake_sleep = true;
2563 
2564 			/* Start the Sleep Timer */
2565 			qdf_timer_stop(&hif_state->sleep_timer);
2566 			qdf_timer_start(&hif_state->sleep_timer,
2567 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2568 		}
2569 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2570 	} else {
2571 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2572 
2573 		if (hif_state->fake_sleep) {
2574 			hif_state->verified_awake = true;
2575 		} else {
2576 			if (hif_state->keep_awake_count == 0) {
2577 				/* Force AWAKE */
2578 				hif_write32_mb(sc, pci_addr +
2579 					      PCIE_LOCAL_BASE_ADDRESS +
2580 					      PCIE_SOC_WAKE_ADDRESS,
2581 					      PCIE_SOC_WAKE_V_MASK);
2582 			}
2583 		}
2584 		hif_state->keep_awake_count++;
2585 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2586 
2587 		if (wait_for_it && !hif_state->verified_awake) {
2588 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
2589 			int tot_delay = 0;
2590 			int curr_delay = 5;
2591 
2592 			for (;; ) {
2593 				if (hif_targ_is_awake(scn, pci_addr)) {
2594 					hif_state->verified_awake = true;
2595 					break;
2596 				}
2597 				if (!hif_pci_targ_is_present(scn, pci_addr))
2598 					break;
2599 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2600 					return hif_log_soc_wakeup_timeout(sc);
2601 
2602 				OS_DELAY(curr_delay);
2603 				tot_delay += curr_delay;
2604 
2605 				if (curr_delay < 50)
2606 					curr_delay += 5;
2607 			}
2608 
2609 			/*
2610 			 * NB: If Target has to come out of Deep Sleep,
2611 			 * this may take a few Msecs. Typically, though
2612 			 * this delay should be <30us.
2613 			 */
2614 			if (tot_delay > max_delay)
2615 				max_delay = tot_delay;
2616 		}
2617 	}
2618 
2619 	if (debug && hif_state->verified_awake) {
2620 		debug = 0;
2621 		hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2622 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2623 				PCIE_INTR_ENABLE_ADDRESS),
2624 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2625 				PCIE_INTR_CAUSE_ADDRESS),
2626 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2627 				CPU_INTR_ADDRESS),
2628 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2629 				PCIE_INTR_CLR_ADDRESS),
2630 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
2631 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2632 	}
2633 
2634 	return 0;
2635 }
2636 
2637 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2638 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
2639 {
2640 	uint32_t value;
2641 	void *addr;
2642 
2643 	addr = scn->mem + offset;
2644 	value = hif_read32_mb(scn, addr);
2645 
2646 	{
2647 		unsigned long irq_flags;
2648 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2649 
2650 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2651 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2652 		pcie_access_log[idx].is_write = false;
2653 		pcie_access_log[idx].addr = addr;
2654 		pcie_access_log[idx].value = value;
2655 		pcie_access_log_seqnum++;
2656 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2657 	}
2658 
2659 	return value;
2660 }
2661 
2662 void
2663 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
2664 {
2665 	void *addr;
2666 
2667 	addr = scn->mem + (offset);
2668 	hif_write32_mb(scn, addr, value);
2669 
2670 	{
2671 		unsigned long irq_flags;
2672 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2673 
2674 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2675 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2676 		pcie_access_log[idx].is_write = true;
2677 		pcie_access_log[idx].addr = addr;
2678 		pcie_access_log[idx].value = value;
2679 		pcie_access_log_seqnum++;
2680 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2681 	}
2682 }
2683 
2684 /**
2685  * hif_target_dump_access_log() - dump access log
2686  *
2687  * dump access log
2688  *
2689  * Return: n/a
2690  */
2691 void hif_target_dump_access_log(void)
2692 {
2693 	int idx, len, start_idx, cur_idx;
2694 	unsigned long irq_flags;
2695 
2696 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2697 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2698 		len = PCIE_ACCESS_LOG_NUM;
2699 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2700 	} else {
2701 		len = pcie_access_log_seqnum;
2702 		start_idx = 0;
2703 	}
2704 
2705 	for (idx = 0; idx < len; idx++) {
2706 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2707 		hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
2708 		       idx,
2709 		       pcie_access_log[cur_idx].seqnum,
2710 		       pcie_access_log[cur_idx].is_write,
2711 		       pcie_access_log[cur_idx].addr,
2712 		       pcie_access_log[cur_idx].value);
2713 	}
2714 
2715 	pcie_access_log_seqnum = 0;
2716 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2717 }
2718 #endif
2719 
2720 #ifndef HIF_AHB
2721 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
2722 {
2723 	QDF_BUG(0);
2724 	return -EINVAL;
2725 }
2726 
2727 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
2728 {
2729 	QDF_BUG(0);
2730 	return -EINVAL;
2731 }
2732 #endif
2733 
2734 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
2735 {
2736 	struct ce_tasklet_entry *tasklet_entry = context;
2737 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
2738 }
2739 extern const char *ce_name[];
2740 
2741 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
2742 {
2743 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
2744 
2745 	return pci_scn->ce_msi_irq_num[ce_id];
2746 }
2747 
2748 /* hif_srng_msi_irq_disable() - disable the irq for msi
2749  * @hif_sc: hif context
2750  * @ce_id: which ce to disable copy complete interrupts for
2751  *
2752  * since MSI interrupts are not level based, the system can function
2753  * without disabling these interrupts.  Interrupt mitigation can be
2754  * added here for better system performance.
2755  */
2756 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2757 {
2758 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
2759 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2760 }
2761 
2762 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2763 {
2764 	if (__hif_check_link_status(hif_sc))
2765 		return;
2766 
2767 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
2768 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2769 }
2770 
2771 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2772 {
2773 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2774 }
2775 
2776 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2777 {
2778 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2779 }
2780 
2781 int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
2782 {
2783 	int ret = 0;
2784 	int irq;
2785 	uint32_t msi_data_start;
2786 	uint32_t msi_data_count;
2787 	unsigned int msi_data;
2788 	int irq_id;
2789 	uint32_t msi_irq_start;
2790 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2791 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
2792 	int pci_slot;
2793 
2794 	if (ce_id >= CE_COUNT_MAX)
2795 		return -EINVAL;
2796 
2797 	/* do ce irq assignments */
2798 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2799 					  &msi_data_count, &msi_data_start,
2800 					  &msi_irq_start);
2801 
2802 	if (ret) {
2803 		hif_err("Failed to get CE msi config");
2804 		return -EINVAL;
2805 	}
2806 
2807 	irq_id = scn->int_assignment->msi_idx[ce_id];
2808 	/* needs to match the ce_id -> irq data mapping
2809 	 * used in the srng parameter configuration
2810 	 */
2811 	pci_slot = hif_get_pci_slot(scn);
2812 	msi_data = irq_id + msi_irq_start;
2813 	irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2814 	hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d tasklet %pK)",
2815 		  __func__, ce_id, irq_id, msi_data, irq,
2816 		  &ce_sc->tasklets[ce_id]);
2817 
2818 	/* implies the ce is also initialized */
2819 	if (!ce_sc->tasklets[ce_id].inited)
2820 		goto skip;
2821 
2822 	pci_sc->ce_msi_irq_num[ce_id] = irq;
2823 
2824 	qdf_scnprintf(ce_irqname[pci_slot][ce_id],
2825 		      DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u",
2826 		      pci_slot, ce_id);
2827 
2828 	ret = pfrm_request_irq(scn->qdf_dev->dev,
2829 			       irq, hif_ce_interrupt_handler, IRQF_SHARED,
2830 			       ce_irqname[pci_slot][ce_id],
2831 			       &ce_sc->tasklets[ce_id]);
2832 	if (ret)
2833 		return -EINVAL;
2834 
2835 skip:
2836 	return ret;
2837 }
2838 
2839 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
2840 {
2841 	int ret;
2842 	int ce_id, irq;
2843 	uint32_t msi_data_start;
2844 	uint32_t msi_data_count;
2845 	uint32_t msi_irq_start;
2846 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2847 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2848 
2849 	if (!scn->disable_wake_irq) {
2850 		/* do wake irq assignment */
2851 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
2852 						  &msi_data_count,
2853 						  &msi_data_start,
2854 						  &msi_irq_start);
2855 		if (ret)
2856 			return ret;
2857 
2858 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
2859 						msi_irq_start);
2860 		scn->wake_irq_type = HIF_PM_MSI_WAKE;
2861 
2862 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
2863 				       hif_wake_interrupt_handler,
2864 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
2865 
2866 		if (ret)
2867 			return ret;
2868 	}
2869 
2870 	/* do ce irq assignments */
2871 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2872 					  &msi_data_count, &msi_data_start,
2873 					  &msi_irq_start);
2874 	if (ret)
2875 		goto free_wake_irq;
2876 
2877 	if (ce_srng_based(scn)) {
2878 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
2879 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
2880 	} else {
2881 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
2882 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
2883 	}
2884 
2885 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
2886 
2887 	/* needs to match the ce_id -> irq data mapping
2888 	 * used in the srng parameter configuration
2889 	 */
2890 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2891 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2892 			continue;
2893 
2894 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
2895 			continue;
2896 
2897 		ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
2898 		if (ret)
2899 			goto free_irq;
2900 	}
2901 
2902 	return ret;
2903 
2904 free_irq:
2905 	/* the request_irq for the last ce_id failed so skip it. */
2906 	while (ce_id > 0 && ce_id < scn->ce_count) {
2907 		unsigned int msi_data;
2908 
2909 		ce_id--;
2910 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2911 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2912 		pfrm_free_irq(scn->qdf_dev->dev,
2913 			      irq, &ce_sc->tasklets[ce_id]);
2914 	}
2915 
2916 free_wake_irq:
2917 	if (!scn->disable_wake_irq) {
2918 		pfrm_free_irq(scn->qdf_dev->dev,
2919 			      scn->wake_irq, scn->qdf_dev->dev);
2920 		scn->wake_irq = 0;
2921 		scn->wake_irq_type = HIF_PM_INVALID_WAKE;
2922 	}
2923 
2924 	return ret;
2925 }
2926 
2927 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
2928 {
2929 	int i;
2930 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
2931 
2932 	for (i = 0; i < hif_ext_group->numirq; i++)
2933 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
2934 					hif_ext_group->os_irq[i]);
2935 }
2936 
2937 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
2938 {
2939 	int i;
2940 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
2941 
2942 	for (i = 0; i < hif_ext_group->numirq; i++)
2943 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
2944 }
2945 
2946 /**
2947  * hif_pci_get_irq_name() - get irqname
2948  * This function gives irqnumber to irqname
2949  * mapping.
2950  *
2951  * @irq_no: irq number
2952  *
2953  * Return: irq name
2954  */
2955 const char *hif_pci_get_irq_name(int irq_no)
2956 {
2957 	return "pci-dummy";
2958 }
2959 
2960 #ifdef HIF_CPU_PERF_AFFINE_MASK
2961 void hif_pci_irq_set_affinity_hint(
2962 	struct hif_exec_context *hif_ext_group)
2963 {
2964 	int i, ret;
2965 	unsigned int cpus;
2966 	bool mask_set = false;
2967 
2968 	for (i = 0; i < hif_ext_group->numirq; i++)
2969 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
2970 
2971 	for (i = 0; i < hif_ext_group->numirq; i++) {
2972 		qdf_for_each_online_cpu(cpus) {
2973 			if (qdf_topology_physical_package_id(cpus) ==
2974 				CPU_CLUSTER_TYPE_PERF) {
2975 				qdf_cpumask_set_cpu(cpus,
2976 						    &hif_ext_group->
2977 						    new_cpu_mask[i]);
2978 				mask_set = true;
2979 			}
2980 		}
2981 	}
2982 	for (i = 0; i < hif_ext_group->numirq; i++) {
2983 		if (mask_set) {
2984 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
2985 						  IRQ_NO_BALANCING, 0);
2986 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
2987 						       (struct qdf_cpu_mask *)
2988 						       &hif_ext_group->
2989 						       new_cpu_mask[i]);
2990 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
2991 						  0, IRQ_NO_BALANCING);
2992 			if (ret)
2993 				qdf_err("Set affinity %*pbl fails for IRQ %d ",
2994 					qdf_cpumask_pr_args(&hif_ext_group->
2995 							    new_cpu_mask[i]),
2996 					hif_ext_group->os_irq[i]);
2997 			else
2998 				qdf_debug("Set affinity %*pbl for IRQ: %d",
2999 					  qdf_cpumask_pr_args(&hif_ext_group->
3000 							      new_cpu_mask[i]),
3001 					  hif_ext_group->os_irq[i]);
3002 		} else {
3003 			qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
3004 				hif_ext_group->os_irq[i]);
3005 		}
3006 	}
3007 }
3008 
3009 void hif_pci_ce_irq_set_affinity_hint(
3010 	struct hif_softc *scn)
3011 {
3012 	int ret;
3013 	unsigned int cpus;
3014 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3015 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3016 	struct CE_attr *host_ce_conf;
3017 	int ce_id;
3018 	qdf_cpu_mask ce_cpu_mask;
3019 
3020 	host_ce_conf = ce_sc->host_ce_config;
3021 	qdf_cpumask_clear(&ce_cpu_mask);
3022 
3023 	qdf_for_each_online_cpu(cpus) {
3024 		if (qdf_topology_physical_package_id(cpus) ==
3025 			CPU_CLUSTER_TYPE_PERF) {
3026 			qdf_cpumask_set_cpu(cpus,
3027 					    &ce_cpu_mask);
3028 		} else {
3029 			hif_err_rl("Unable to set cpu mask for offline CPU %d"
3030 				   , cpus);
3031 		}
3032 	}
3033 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
3034 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
3035 		return;
3036 	}
3037 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3038 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3039 			continue;
3040 		qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
3041 		qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
3042 				 &ce_cpu_mask);
3043 		qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id],
3044 					  IRQ_NO_BALANCING, 0);
3045 		ret = qdf_dev_set_irq_affinity(
3046 			pci_sc->ce_msi_irq_num[ce_id],
3047 			(struct qdf_cpu_mask *)&pci_sc->ce_irq_cpu_mask[ce_id]);
3048 		qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id],
3049 					  0, IRQ_NO_BALANCING);
3050 		if (ret)
3051 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
3052 				   qdf_cpumask_pr_args(
3053 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3054 				   pci_sc->ce_msi_irq_num[ce_id]);
3055 		else
3056 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
3057 				     qdf_cpumask_pr_args(
3058 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3059 				     pci_sc->ce_msi_irq_num[ce_id]);
3060 	}
3061 }
3062 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3063 
3064 #ifdef HIF_CPU_CLEAR_AFFINITY
3065 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
3066 					   int intr_ctxt_id, int cpu)
3067 {
3068 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3069 	struct hif_exec_context *hif_ext_group;
3070 	int i, ret;
3071 
3072 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
3073 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
3074 
3075 		for (i = 0; i < hif_ext_group->numirq; i++) {
3076 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
3077 			qdf_cpumask_clear_cpu(cpu,
3078 					      &hif_ext_group->new_cpu_mask[i]);
3079 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3080 						  IRQ_NO_BALANCING, 0);
3081 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3082 						       (struct qdf_cpu_mask *)
3083 						       &hif_ext_group->
3084 						       new_cpu_mask[i]);
3085 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3086 						  0, IRQ_NO_BALANCING);
3087 			if (ret)
3088 				hif_err("Set affinity %*pbl fails for IRQ %d ",
3089 					qdf_cpumask_pr_args(&hif_ext_group->
3090 							    new_cpu_mask[i]),
3091 					hif_ext_group->os_irq[i]);
3092 			else
3093 				hif_debug("Set affinity %*pbl for IRQ: %d",
3094 					  qdf_cpumask_pr_args(&hif_ext_group->
3095 							      new_cpu_mask[i]),
3096 					  hif_ext_group->os_irq[i]);
3097 		}
3098 	}
3099 }
3100 #endif
3101 
3102 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3103 {
3104 	int i;
3105 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3106 	struct hif_exec_context *hif_ext_group;
3107 
3108 	hif_core_ctl_set_boost(true);
3109 	/* Set IRQ affinity for WLAN DP interrupts*/
3110 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3111 		hif_ext_group = hif_state->hif_ext_group[i];
3112 		hif_pci_irq_set_affinity_hint(hif_ext_group);
3113 	}
3114 	/* Set IRQ affinity for CE interrupts*/
3115 	hif_pci_ce_irq_set_affinity_hint(scn);
3116 }
3117 
3118 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3119 			      struct hif_exec_context *hif_ext_group)
3120 {
3121 	int ret = 0;
3122 	int irq = 0;
3123 	int j;
3124 	int pci_slot;
3125 
3126 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3127 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3128 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3129 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3130 
3131 	pci_slot = hif_get_pci_slot(scn);
3132 	for (j = 0; j < hif_ext_group->numirq; j++) {
3133 		irq = hif_ext_group->irq[j];
3134 		if (scn->irq_unlazy_disable)
3135 			qdf_dev_set_irq_status_flags(irq,
3136 						     QDF_IRQ_DISABLE_UNLAZY);
3137 
3138 		hif_debug("request_irq = %d for grp %d",
3139 			  irq, hif_ext_group->grp_id);
3140 
3141 		qdf_scnprintf(dp_irqname[pci_slot][hif_ext_group->grp_id],
3142 			      DP_IRQ_NAME_LEN, "pci%u_wlan_grp_dp_%u",
3143 			      pci_slot, hif_ext_group->grp_id);
3144 		ret = pfrm_request_irq(
3145 				scn->qdf_dev->dev, irq,
3146 				hif_ext_group_interrupt_handler,
3147 				IRQF_SHARED | IRQF_NO_SUSPEND,
3148 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3149 				hif_ext_group);
3150 		if (ret) {
3151 			hif_err("request_irq failed ret = %d", ret);
3152 			return -EFAULT;
3153 		}
3154 		hif_ext_group->os_irq[j] = irq;
3155 	}
3156 	hif_ext_group->irq_requested = true;
3157 	return 0;
3158 }
3159 
3160 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
3161 	defined(QCA_WIFI_WCN7850))
3162 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3163 			    uint32_t offset)
3164 {
3165 	return hal_read32_mb(hif_sc->hal_soc, offset);
3166 }
3167 
3168 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3169 			 uint32_t offset,
3170 			 uint32_t value)
3171 {
3172 	hal_write32_mb(hif_sc->hal_soc, offset, value);
3173 }
3174 #else
3175 /* TODO: Need to implement other chips carefully */
3176 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3177 			    uint32_t offset)
3178 {
3179 	return 0;
3180 }
3181 
3182 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3183 			 uint32_t offset,
3184 			 uint32_t value)
3185 {
3186 }
3187 #endif
3188 
3189 /**
3190  * hif_configure_irq() - configure interrupt
3191  *
3192  * This function configures interrupt(s)
3193  *
3194  * @sc: PCIe control struct
3195  * @hif_hdl: struct HIF_CE_state
3196  *
3197  * Return: 0 - for success
3198  */
3199 int hif_configure_irq(struct hif_softc *scn)
3200 {
3201 	int ret = 0;
3202 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3203 
3204 	hif_info("E");
3205 
3206 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3207 		scn->request_irq_done = false;
3208 		return 0;
3209 	}
3210 
3211 	hif_init_reschedule_tasklet_work(sc);
3212 
3213 	ret = hif_ce_msi_configure_irq(scn);
3214 	if (ret == 0) {
3215 		goto end;
3216 	}
3217 
3218 	switch (scn->target_info.target_type) {
3219 	case TARGET_TYPE_IPQ4019:
3220 		ret = hif_ahb_configure_legacy_irq(sc);
3221 		break;
3222 	case TARGET_TYPE_QCA8074:
3223 	case TARGET_TYPE_QCA8074V2:
3224 	case TARGET_TYPE_QCA6018:
3225 	case TARGET_TYPE_QCA5018:
3226 	case TARGET_TYPE_QCA9574:
3227 		ret = hif_ahb_configure_irq(sc);
3228 		break;
3229 	default:
3230 		ret = hif_pci_configure_legacy_irq(sc);
3231 		break;
3232 	}
3233 	if (ret < 0) {
3234 		hif_err("hif_pci_configure_legacy_irq error = %d", ret);
3235 		return ret;
3236 	}
3237 end:
3238 	scn->request_irq_done = true;
3239 	return 0;
3240 }
3241 
3242 /**
3243  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3244  * @scn: hif control structure
3245  *
3246  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3247  * stuck at a polling loop in pcie_address_config in FW
3248  *
3249  * Return: none
3250  */
3251 static void hif_trigger_timer_irq(struct hif_softc *scn)
3252 {
3253 	int tmp;
3254 	/* Trigger IRQ on Peregrine/Swift by setting
3255 	 * IRQ Bit of LF_TIMER 0
3256 	 */
3257 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3258 						SOC_LF_TIMER_STATUS0_ADDRESS));
3259 	/* Set Raw IRQ Bit */
3260 	tmp |= 1;
3261 	/* SOC_LF_TIMER_STATUS0 */
3262 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3263 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3264 }
3265 
3266 /**
3267  * hif_target_sync() : ensure the target is ready
3268  * @scn: hif control structure
3269  *
3270  * Informs fw that we plan to use legacy interupts so that
3271  * it can begin booting. Ensures that the fw finishes booting
3272  * before continuing. Should be called before trying to write
3273  * to the targets other registers for the first time.
3274  *
3275  * Return: none
3276  */
3277 static void hif_target_sync(struct hif_softc *scn)
3278 {
3279 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3280 			    PCIE_INTR_ENABLE_ADDRESS),
3281 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3282 	/* read to flush pcie write */
3283 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3284 			PCIE_INTR_ENABLE_ADDRESS));
3285 
3286 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3287 			PCIE_SOC_WAKE_ADDRESS,
3288 			PCIE_SOC_WAKE_V_MASK);
3289 	while (!hif_targ_is_awake(scn, scn->mem))
3290 		;
3291 
3292 	if (HAS_FW_INDICATOR) {
3293 		int wait_limit = 500;
3294 		int fw_ind = 0;
3295 		int retry_count = 0;
3296 		uint32_t target_type = scn->target_info.target_type;
3297 fw_retry:
3298 		hif_info("Loop checking FW signal");
3299 		while (1) {
3300 			fw_ind = hif_read32_mb(scn, scn->mem +
3301 					FW_INDICATOR_ADDRESS);
3302 			if (fw_ind & FW_IND_INITIALIZED)
3303 				break;
3304 			if (wait_limit-- < 0)
3305 				break;
3306 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3307 			    PCIE_INTR_ENABLE_ADDRESS),
3308 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3309 			    /* read to flush pcie write */
3310 			(void)hif_read32_mb(scn, scn->mem +
3311 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3312 
3313 			qdf_mdelay(10);
3314 		}
3315 		if (wait_limit < 0) {
3316 			if (target_type == TARGET_TYPE_AR9888 &&
3317 			    retry_count++ < 2) {
3318 				hif_trigger_timer_irq(scn);
3319 				wait_limit = 500;
3320 				goto fw_retry;
3321 			}
3322 			hif_info("FW signal timed out");
3323 			qdf_assert_always(0);
3324 		} else {
3325 			hif_info("Got FW signal, retries = %x", 500-wait_limit);
3326 		}
3327 	}
3328 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3329 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3330 }
3331 
3332 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3333 				     struct device *dev)
3334 {
3335 	struct pld_soc_info info;
3336 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3337 
3338 	pld_get_soc_info(dev, &info);
3339 	sc->mem = info.v_addr;
3340 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3341 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3342 	sc->device_version.family_number = info.device_version.family_number;
3343 	sc->device_version.device_number = info.device_version.device_number;
3344 	sc->device_version.major_version = info.device_version.major_version;
3345 	sc->device_version.minor_version = info.device_version.minor_version;
3346 
3347 	hif_info("%s: fam num %u dev ver %u maj ver %u min ver %u\n", __func__,
3348 		 sc->device_version.family_number,
3349 		 sc->device_version.device_number,
3350 		 sc->device_version.major_version,
3351 		 sc->device_version.minor_version);
3352 
3353 	/* dev_mem_info[0] is for CMEM */
3354 	scn->cmem_start = info.dev_mem_info[0].start;
3355 	scn->cmem_size = info.dev_mem_info[0].size;
3356 	scn->target_info.target_version = info.soc_id;
3357 	scn->target_info.target_revision = 0;
3358 }
3359 
3360 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3361 				       struct device *dev)
3362 {}
3363 
3364 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3365 				    int device_id)
3366 {
3367 	if (!pld_have_platform_driver_support(sc->dev))
3368 		return false;
3369 
3370 	switch (device_id) {
3371 	case QCA6290_DEVICE_ID:
3372 	case QCN9000_DEVICE_ID:
3373 	case QCN9224_DEVICE_ID:
3374 	case QCA6290_EMULATION_DEVICE_ID:
3375 	case QCA6390_DEVICE_ID:
3376 	case QCA6490_DEVICE_ID:
3377 	case AR6320_DEVICE_ID:
3378 	case QCN7605_DEVICE_ID:
3379 	case WCN7850_DEVICE_ID:
3380 		return true;
3381 	}
3382 	return false;
3383 }
3384 
3385 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3386 					   int device_id)
3387 {
3388 	if (hif_is_pld_based_target(sc, device_id)) {
3389 		sc->hif_enable_pci = hif_enable_pci_pld;
3390 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3391 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3392 	} else {
3393 		sc->hif_enable_pci = hif_enable_pci_nopld;
3394 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3395 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3396 	}
3397 }
3398 
3399 #ifdef HIF_REG_WINDOW_SUPPORT
3400 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3401 					       u32 target_type)
3402 {
3403 	switch (target_type) {
3404 	case TARGET_TYPE_QCN7605:
3405 	case TARGET_TYPE_QCA6490:
3406 	case TARGET_TYPE_QCA6390:
3407 		sc->use_register_windowing = true;
3408 		qdf_spinlock_create(&sc->register_access_lock);
3409 		sc->register_window = 0;
3410 		break;
3411 	default:
3412 		sc->use_register_windowing = false;
3413 	}
3414 }
3415 #else
3416 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3417 					       u32 target_type)
3418 {
3419 	sc->use_register_windowing = false;
3420 }
3421 #endif
3422 
3423 /**
3424  * hif_enable_bus(): enable bus
3425  *
3426  * This function enables the bus
3427  *
3428  * @ol_sc: soft_sc struct
3429  * @dev: device pointer
3430  * @bdev: bus dev pointer
3431  * bid: bus id pointer
3432  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3433  * Return: QDF_STATUS
3434  */
3435 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3436 			  struct device *dev, void *bdev,
3437 			  const struct hif_bus_id *bid,
3438 			  enum hif_enable_type type)
3439 {
3440 	int ret = 0;
3441 	uint32_t hif_type;
3442 	uint32_t target_type = TARGET_TYPE_UNKNOWN;
3443 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3444 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3445 	uint16_t revision_id = 0;
3446 	int probe_again = 0;
3447 	struct pci_dev *pdev = bdev;
3448 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3449 	struct hif_target_info *tgt_info;
3450 
3451 	if (!ol_sc) {
3452 		hif_err("hif_ctx is NULL");
3453 		return QDF_STATUS_E_NOMEM;
3454 	}
3455 	/* Following print is used by various tools to identify
3456 	 * WLAN SOC (e.g. crash dump analysis and reporting tool).
3457 	 */
3458 	hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
3459 		 hif_get_conparam(ol_sc), id->device);
3460 
3461 	sc->pdev = pdev;
3462 	sc->dev = &pdev->dev;
3463 	sc->devid = id->device;
3464 	sc->cacheline_sz = dma_get_cache_alignment();
3465 	tgt_info = hif_get_target_info_handle(hif_hdl);
3466 	hif_pci_init_deinit_ops_attach(sc, id->device);
3467 	sc->hif_pci_get_soc_info(sc, dev);
3468 again:
3469 	ret = sc->hif_enable_pci(sc, pdev, id);
3470 	if (ret < 0) {
3471 		hif_err("hif_enable_pci error = %d", ret);
3472 		goto err_enable_pci;
3473 	}
3474 	hif_info("hif_enable_pci done");
3475 
3476 	/* Temporary FIX: disable ASPM on peregrine.
3477 	 * Will be removed after the OTP is programmed
3478 	 */
3479 	hif_disable_power_gating(hif_hdl);
3480 
3481 	device_disable_async_suspend(&pdev->dev);
3482 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3483 
3484 	ret = hif_get_device_type(id->device, revision_id,
3485 						&hif_type, &target_type);
3486 	if (ret < 0) {
3487 		hif_err("Invalid device id/revision_id");
3488 		goto err_tgtstate;
3489 	}
3490 	hif_info("hif_type = 0x%x, target_type = 0x%x",
3491 		hif_type, target_type);
3492 
3493 	hif_register_tbl_attach(ol_sc, hif_type);
3494 	hif_target_register_tbl_attach(ol_sc, target_type);
3495 
3496 	hif_pci_init_reg_windowing_support(sc, target_type);
3497 
3498 	tgt_info->target_type = target_type;
3499 
3500 	/*
3501 	 * Disable unlzay interrupt registration for QCN9000
3502 	 */
3503 	if (target_type == TARGET_TYPE_QCN9000 ||
3504 	    target_type == TARGET_TYPE_QCN9224)
3505 		ol_sc->irq_unlazy_disable = 1;
3506 
3507 	if (ce_srng_based(ol_sc)) {
3508 		hif_info("Skip tgt_wake up for srng devices");
3509 	} else {
3510 		ret = hif_pci_probe_tgt_wakeup(sc);
3511 		if (ret < 0) {
3512 			hif_err("hif_pci_prob_wakeup error = %d", ret);
3513 			if (ret == -EAGAIN)
3514 				probe_again++;
3515 			goto err_tgtstate;
3516 		}
3517 		hif_info("hif_pci_probe_tgt_wakeup done");
3518 	}
3519 
3520 	if (!ol_sc->mem_pa) {
3521 		hif_err("BAR0 uninitialized");
3522 		ret = -EIO;
3523 		goto err_tgtstate;
3524 	}
3525 
3526 	if (!ce_srng_based(ol_sc)) {
3527 		hif_target_sync(ol_sc);
3528 
3529 		if (hif_pci_default_link_up(tgt_info))
3530 			hif_vote_link_up(hif_hdl);
3531 	}
3532 
3533 	return QDF_STATUS_SUCCESS;
3534 
3535 err_tgtstate:
3536 	hif_disable_pci(sc);
3537 	sc->pci_enabled = false;
3538 	hif_err("hif_disable_pci done");
3539 	return QDF_STATUS_E_ABORTED;
3540 
3541 err_enable_pci:
3542 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3543 		int delay_time;
3544 
3545 		hif_info("pci reprobe");
3546 		/* 10, 40, 90, 100, 100, ... */
3547 		delay_time = max(100, 10 * (probe_again * probe_again));
3548 		qdf_mdelay(delay_time);
3549 		goto again;
3550 	}
3551 	return qdf_status_from_os_return(ret);
3552 }
3553 
3554 /**
3555  * hif_pci_irq_enable() - ce_irq_enable
3556  * @scn: hif_softc
3557  * @ce_id: ce_id
3558  *
3559  * Return: void
3560  */
3561 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3562 {
3563 	uint32_t tmp = 1 << ce_id;
3564 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3565 
3566 	qdf_spin_lock_irqsave(&sc->irq_lock);
3567 	scn->ce_irq_summary &= ~tmp;
3568 	if (scn->ce_irq_summary == 0) {
3569 		/* Enable Legacy PCI line interrupts */
3570 		if (LEGACY_INTERRUPTS(sc) &&
3571 			(scn->target_status != TARGET_STATUS_RESET) &&
3572 			(!qdf_atomic_read(&scn->link_suspended))) {
3573 
3574 			hif_write32_mb(scn, scn->mem +
3575 				(SOC_CORE_BASE_ADDRESS |
3576 				PCIE_INTR_ENABLE_ADDRESS),
3577 				HOST_GROUP0_MASK);
3578 
3579 			hif_read32_mb(scn, scn->mem +
3580 					(SOC_CORE_BASE_ADDRESS |
3581 					PCIE_INTR_ENABLE_ADDRESS));
3582 		}
3583 	}
3584 	if (scn->hif_init_done == true)
3585 		Q_TARGET_ACCESS_END(scn);
3586 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3587 
3588 	/* check for missed firmware crash */
3589 	hif_fw_interrupt_handler(0, scn);
3590 }
3591 
3592 /**
3593  * hif_pci_irq_disable() - ce_irq_disable
3594  * @scn: hif_softc
3595  * @ce_id: ce_id
3596  *
3597  * only applicable to legacy copy engine...
3598  *
3599  * Return: void
3600  */
3601 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3602 {
3603 	/* For Rome only need to wake up target */
3604 	/* target access is maintained until interrupts are re-enabled */
3605 	Q_TARGET_ACCESS_BEGIN(scn);
3606 }
3607 
3608 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3609 {
3610 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3611 
3612 	/* legacy case only has one irq */
3613 	return pci_scn->irq;
3614 }
3615 
3616 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
3617 {
3618 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3619 	struct hif_target_info *tgt_info;
3620 
3621 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
3622 
3623 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
3624 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
3625 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
3626 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
3627 	    tgt_info->target_type == TARGET_TYPE_QCA8074 ||
3628 	    tgt_info->target_type == TARGET_TYPE_WCN7850) {
3629 		/*
3630 		 * Need to consider offset's memtype for QCA6290/QCA8074,
3631 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
3632 		 * well initialized/defined.
3633 		 */
3634 		return 0;
3635 	}
3636 
3637 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
3638 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
3639 		return 0;
3640 	}
3641 
3642 	hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
3643 		offset, (uint32_t)(offset + sizeof(unsigned int)),
3644 		sc->mem_len);
3645 
3646 	return -EINVAL;
3647 }
3648 
3649 /**
3650  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
3651  * @scn: hif context
3652  *
3653  * Return: true if soc needs driver bmi otherwise false
3654  */
3655 bool hif_pci_needs_bmi(struct hif_softc *scn)
3656 {
3657 	return !ce_srng_based(scn);
3658 }
3659 
3660 #ifdef FORCE_WAKE
3661 #ifdef DEVICE_FORCE_WAKE_ENABLE
3662 
3663 /**
3664  * HIF_POLL_UMAC_WAKE poll value to indicate if UMAC is powered up
3665  * Update the below macro with FW defined one.
3666  */
3667 #define HIF_POLL_UMAC_WAKE 0x2
3668 
3669 /**
3670  * hif_force_wake_request(): Enable the force wake recipe
3671  * @hif_handle: HIF handle
3672  *
3673  * Bring MHI to M0 state and force wake the UMAC by asserting the
3674  * soc wake reg. Poll the scratch reg to check if its set to
3675  * HIF_POLL_UMAC_WAKE. The polled value may return 0x1 in case UMAC
3676  * is powered down.
3677  *
3678  * Return: 0 if handshake is successful or ETIMEDOUT in case of failure
3679  */
3680 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
3681 {
3682 	uint32_t timeout, value;
3683 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3684 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3685 
3686 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
3687 
3688 	if (qdf_in_interrupt())
3689 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
3690 	else
3691 		timeout = 0;
3692 
3693 	if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) {
3694 		hif_err("force wake request send failed");
3695 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
3696 		return -EINVAL;
3697 	}
3698 
3699 	/* If device's M1 state-change event races here, it can be ignored,
3700 	 * as the device is expected to immediately move from M2 to M0
3701 	 * without entering low power state.
3702 	 */
3703 	if (!pld_is_device_awake(scn->qdf_dev->dev))
3704 		hif_info("state-change event races, ignore");
3705 
3706 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
3707 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 1);
3708 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
3709 	/*
3710 	 * do not reset the timeout
3711 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
3712 	 */
3713 	timeout = 0;
3714 	do {
3715 		value = hif_read32_mb(
3716 				scn, scn->mem +
3717 				PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
3718 		hif_info("pcie scratch reg read value = %x", value);
3719 		if (value == HIF_POLL_UMAC_WAKE)
3720 			break;
3721 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
3722 		timeout += FORCE_WAKE_DELAY_MS;
3723 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
3724 
3725 	if (value != HIF_POLL_UMAC_WAKE) {
3726 		hif_err("failed force wake handshake mechanism");
3727 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
3728 		return -ETIMEDOUT;
3729 	}
3730 
3731 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
3732 	return 0;
3733 }
3734 
3735 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
3736 {
3737 	int ret;
3738 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3739 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3740 
3741 	ret = pld_force_wake_release(scn->qdf_dev->dev);
3742 	if (ret) {
3743 		hif_err("force wake release failure");
3744 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
3745 		return ret;
3746 	}
3747 
3748 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
3749 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 0);
3750 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
3751 	return 0;
3752 }
3753 
3754 #else /* DEVICE_FORCE_WAKE_ENABLE */
3755 /** hif_force_wake_request() - Disable the PCIE scratch register
3756  * write/read
3757  *
3758  * Return: 0
3759  */
3760 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
3761 {
3762 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3763 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3764 	uint32_t timeout;
3765 
3766 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
3767 
3768 	if (qdf_in_interrupt())
3769 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
3770 	else
3771 		timeout = 0;
3772 
3773 	if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) {
3774 		hif_err("force wake request send failed");
3775 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
3776 		return -EINVAL;
3777 	}
3778 
3779 	/* If device's M1 state-change event races here, it can be ignored,
3780 	 * as the device is expected to immediately move from M2 to M0
3781 	 * without entering low power state.
3782 	 */
3783 	if (!pld_is_device_awake(scn->qdf_dev->dev))
3784 		hif_info("state-change event races, ignore");
3785 
3786 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
3787 
3788 	return 0;
3789 }
3790 
3791 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
3792 {
3793 	int ret;
3794 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3795 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3796 
3797 	ret = pld_force_wake_release(scn->qdf_dev->dev);
3798 	if (ret) {
3799 		hif_err("force wake release failure");
3800 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
3801 		return ret;
3802 	}
3803 
3804 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
3805 	return 0;
3806 }
3807 #endif /* DEVICE_FORCE_WAKE_ENABLE */
3808 
3809 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
3810 {
3811 	hif_debug("mhi_force_wake_request_vote: %d",
3812 		  pci_handle->stats.mhi_force_wake_request_vote);
3813 	hif_debug("mhi_force_wake_failure: %d",
3814 		  pci_handle->stats.mhi_force_wake_failure);
3815 	hif_debug("mhi_force_wake_success: %d",
3816 		  pci_handle->stats.mhi_force_wake_success);
3817 	hif_debug("soc_force_wake_register_write_success: %d",
3818 		  pci_handle->stats.soc_force_wake_register_write_success);
3819 	hif_debug("soc_force_wake_failure: %d",
3820 		  pci_handle->stats.soc_force_wake_failure);
3821 	hif_debug("soc_force_wake_success: %d",
3822 		  pci_handle->stats.soc_force_wake_success);
3823 	hif_debug("mhi_force_wake_release_failure: %d",
3824 		  pci_handle->stats.mhi_force_wake_release_failure);
3825 	hif_debug("mhi_force_wake_release_success: %d",
3826 		  pci_handle->stats.mhi_force_wake_release_success);
3827 	hif_debug("oc_force_wake_release_success: %d",
3828 		  pci_handle->stats.soc_force_wake_release_success);
3829 }
3830 #endif /* FORCE_WAKE */
3831 
3832 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3833 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
3834 {
3835 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
3836 }
3837 
3838 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
3839 {
3840 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
3841 }
3842 #endif
3843