xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #include <linux/of_pci.h>
24 #ifdef CONFIG_PCI_MSM
25 #include <linux/msm_pcie.h>
26 #endif
27 #include <linux/version.h>
28 #include "hif_io32.h"
29 #include "if_pci.h"
30 #include "hif.h"
31 #include "target_type.h"
32 #include "hif_main.h"
33 #include "ce_main.h"
34 #include "ce_api.h"
35 #include "ce_internal.h"
36 #include "ce_reg.h"
37 #include "ce_bmi.h"
38 #include "regtable.h"
39 #include "hif_hw_version.h"
40 #include <linux/debugfs.h>
41 #include <linux/seq_file.h>
42 #include "qdf_status.h"
43 #include "qdf_atomic.h"
44 #include "qdf_platform.h"
45 #include "pld_common.h"
46 #include "mp_dev.h"
47 #include "hif_debug.h"
48 
49 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490))
50 #include "hal_api.h"
51 #endif
52 
53 #include "if_pci_internal.h"
54 #include "ce_tasklet.h"
55 #include "targaddrs.h"
56 #include "hif_exec.h"
57 
58 #include "pci_api.h"
59 #include "ahb_api.h"
60 #include "wlan_cfg.h"
61 #include "qdf_hang_event_notifier.h"
62 #include "qdf_platform.h"
63 
64 /* Maximum ms timeout for host to wake up target */
65 #define PCIE_WAKE_TIMEOUT 1000
66 #define RAMDUMP_EVENT_TIMEOUT 2500
67 
68 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
69  * PCIe data bus error
70  * As workaround for this issue - changing the reset sequence to
71  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
72  */
73 #define CPU_WARM_RESET_WAR
74 #define WLAN_CFG_MAX_PCIE_GROUPS 2
75 #define WLAN_CFG_MAX_CE_COUNT 12
76 
77 const char *dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS] = {
78 {
79 "pci0_wlan_grp_dp_0",
80 "pci0_wlan_grp_dp_1",
81 "pci0_wlan_grp_dp_2",
82 "pci0_wlan_grp_dp_3",
83 "pci0_wlan_grp_dp_4",
84 "pci0_wlan_grp_dp_5",
85 "pci0_wlan_grp_dp_6",
86 #if !defined(WLAN_MAX_PDEVS)
87 "pci0_wlan_grp_dp_7",
88 "pci0_wlan_grp_dp_8",
89 "pci0_wlan_grp_dp_9",
90 "pci0_wlan_grp_dp_10",
91 #endif
92 },
93 {
94 "pci1_wlan_grp_dp_0",
95 "pci1_wlan_grp_dp_1",
96 "pci1_wlan_grp_dp_2",
97 "pci1_wlan_grp_dp_3",
98 "pci1_wlan_grp_dp_4",
99 "pci1_wlan_grp_dp_5",
100 "pci1_wlan_grp_dp_6",
101 #if !defined(WLAN_MAX_PDEVS)
102 "pci1_wlan_grp_dp_7",
103 "pci1_wlan_grp_dp_8",
104 "pci1_wlan_grp_dp_9",
105 "pci1_wlan_grp_dp_10",
106 #endif
107 }
108 };
109 
110 const char *ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT] = {
111 {
112 "pci0_wlan_ce_0",
113 "pci0_wlan_ce_1",
114 "pci0_wlan_ce_2",
115 "pci0_wlan_ce_3",
116 "pci0_wlan_ce_4",
117 "pci0_wlan_ce_5",
118 "pci0_wlan_ce_6",
119 "pci0_wlan_ce_7",
120 "pci0_wlan_ce_8",
121 "pci0_wlan_ce_9",
122 "pci0_wlan_ce_10",
123 "pci0_wlan_ce_11",
124 },
125 {
126 "pci1_wlan_ce_0",
127 "pci1_wlan_ce_1",
128 "pci1_wlan_ce_2",
129 "pci1_wlan_ce_3",
130 "pci1_wlan_ce_4",
131 "pci1_wlan_ce_5",
132 "pci1_wlan_ce_6",
133 "pci1_wlan_ce_7",
134 "pci1_wlan_ce_8",
135 "pci1_wlan_ce_9",
136 "pci1_wlan_ce_10",
137 "pci1_wlan_ce_11",
138 }
139 };
140 
141 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
142 static inline int hif_get_pci_slot(struct hif_softc *scn)
143 {
144 	/*
145 	 * If WLAN_MAX_PDEVS is defined as 1, always return pci slot 0
146 	 * since there is only one pci device attached.
147 	 */
148 	return 0;
149 }
150 #else
151 static inline int hif_get_pci_slot(struct hif_softc *scn)
152 {
153 	uint32_t pci_id;
154 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
155 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
156 	uint32_t target_type = tgt_info->target_type;
157 	struct device_node *mhi_node;
158 	struct device_node *pcierp_node;
159 	struct device_node *pcie_node;
160 
161 	switch (target_type) {
162 	case TARGET_TYPE_QCN9000:
163 		/* of_node stored in qdf_dev points to the mhi node */
164 		mhi_node = scn->qdf_dev->dev->of_node;
165 		/*
166 		 * pcie id is stored in the main pci node which has to be taken
167 		 * from the second parent of mhi_node.
168 		 */
169 		pcierp_node = mhi_node->parent;
170 		pcie_node = pcierp_node->parent;
171 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0))
172 		pci_id = 0;
173 #else
174 		pci_id = of_get_pci_domain_nr(pcie_node);
175 #endif
176 		if (pci_id < 0 || pci_id >= WLAN_CFG_MAX_PCIE_GROUPS) {
177 			hif_err("pci_id: %d is invalid", pci_id);
178 			QDF_ASSERT(0);
179 			return 0;
180 		}
181 		return pci_id;
182 	default:
183 		/* Send pci_id 0 for all other targets */
184 		return 0;
185 	}
186 }
187 #endif
188 
189 /*
190  * Top-level interrupt handler for all PCI interrupts from a Target.
191  * When a block of MSI interrupts is allocated, this top-level handler
192  * is not used; instead, we directly call the correct sub-handler.
193  */
194 struct ce_irq_reg_table {
195 	uint32_t irq_enable;
196 	uint32_t irq_status;
197 };
198 
199 #ifndef QCA_WIFI_3_0_ADRASTEA
200 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
201 {
202 }
203 #else
204 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
205 {
206 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
207 	unsigned int target_enable0, target_enable1;
208 	unsigned int target_cause0, target_cause1;
209 
210 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
211 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
212 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
213 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
214 
215 	if ((target_enable0 & target_cause0) ||
216 	    (target_enable1 & target_cause1)) {
217 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
218 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
219 
220 		if (scn->notice_send)
221 			pld_intr_notify_q6(sc->dev);
222 	}
223 }
224 #endif
225 
226 
227 /**
228  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
229  * @scn: scn
230  *
231  * Return: N/A
232  */
233 static void pci_dispatch_interrupt(struct hif_softc *scn)
234 {
235 	uint32_t intr_summary;
236 	int id;
237 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
238 
239 	if (scn->hif_init_done != true)
240 		return;
241 
242 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
243 		return;
244 
245 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
246 
247 	if (intr_summary == 0) {
248 		if ((scn->target_status != TARGET_STATUS_RESET) &&
249 			(!qdf_atomic_read(&scn->link_suspended))) {
250 
251 			hif_write32_mb(scn, scn->mem +
252 				(SOC_CORE_BASE_ADDRESS |
253 				PCIE_INTR_ENABLE_ADDRESS),
254 				HOST_GROUP0_MASK);
255 
256 			hif_read32_mb(scn, scn->mem +
257 					(SOC_CORE_BASE_ADDRESS |
258 					PCIE_INTR_ENABLE_ADDRESS));
259 		}
260 		Q_TARGET_ACCESS_END(scn);
261 		return;
262 	}
263 	Q_TARGET_ACCESS_END(scn);
264 
265 	scn->ce_irq_summary = intr_summary;
266 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
267 		if (intr_summary & (1 << id)) {
268 			intr_summary &= ~(1 << id);
269 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
270 		}
271 	}
272 }
273 
274 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
275 {
276 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
277 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
278 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
279 
280 	volatile int tmp;
281 	uint16_t val = 0;
282 	uint32_t bar0 = 0;
283 	uint32_t fw_indicator_address, fw_indicator;
284 	bool ssr_irq = false;
285 	unsigned int host_cause, host_enable;
286 
287 	if (LEGACY_INTERRUPTS(sc)) {
288 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
289 			return IRQ_HANDLED;
290 
291 		if (ADRASTEA_BU) {
292 			host_enable = hif_read32_mb(sc, sc->mem +
293 						    PCIE_INTR_ENABLE_ADDRESS);
294 			host_cause = hif_read32_mb(sc, sc->mem +
295 						   PCIE_INTR_CAUSE_ADDRESS);
296 			if (!(host_enable & host_cause)) {
297 				hif_pci_route_adrastea_interrupt(sc);
298 				return IRQ_HANDLED;
299 			}
300 		}
301 
302 		/* Clear Legacy PCI line interrupts
303 		 * IMPORTANT: INTR_CLR regiser has to be set
304 		 * after INTR_ENABLE is set to 0,
305 		 * otherwise interrupt can not be really cleared
306 		 */
307 		hif_write32_mb(sc, sc->mem +
308 			      (SOC_CORE_BASE_ADDRESS |
309 			       PCIE_INTR_ENABLE_ADDRESS), 0);
310 
311 		hif_write32_mb(sc, sc->mem +
312 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
313 			       ADRASTEA_BU ?
314 			       (host_enable & host_cause) :
315 			      HOST_GROUP0_MASK);
316 
317 		if (ADRASTEA_BU)
318 			hif_write32_mb(sc, sc->mem + 0x2f100c,
319 				       (host_cause >> 1));
320 
321 		/* IMPORTANT: this extra read transaction is required to
322 		 * flush the posted write buffer
323 		 */
324 		if (!ADRASTEA_BU) {
325 		tmp =
326 			hif_read32_mb(sc, sc->mem +
327 				     (SOC_CORE_BASE_ADDRESS |
328 				      PCIE_INTR_ENABLE_ADDRESS));
329 
330 		if (tmp == 0xdeadbeef) {
331 			hif_err("SoC returns 0xdeadbeef!!");
332 
333 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
334 			hif_err("PCI Vendor ID = 0x%04x", val);
335 
336 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
337 			hif_err("PCI Device ID = 0x%04x", val);
338 
339 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
340 			hif_err("PCI Command = 0x%04x", val);
341 
342 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
343 			hif_err("PCI Status = 0x%04x", val);
344 
345 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
346 					      &bar0);
347 			hif_err("PCI BAR0 = 0x%08x", bar0);
348 
349 			hif_err("RTC_STATE_ADDRESS = 0x%08x",
350 				hif_read32_mb(sc, sc->mem +
351 					PCIE_LOCAL_BASE_ADDRESS
352 					+ RTC_STATE_ADDRESS));
353 			hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
354 				hif_read32_mb(sc, sc->mem +
355 					PCIE_LOCAL_BASE_ADDRESS
356 					+ PCIE_SOC_WAKE_ADDRESS));
357 			hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
358 				hif_read32_mb(sc, sc->mem + 0x80008),
359 				hif_read32_mb(sc, sc->mem + 0x8000c));
360 			hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
361 				hif_read32_mb(sc, sc->mem + 0x80010),
362 				hif_read32_mb(sc, sc->mem + 0x80014));
363 			hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
364 				hif_read32_mb(sc, sc->mem + 0x80018),
365 				hif_read32_mb(sc, sc->mem + 0x8001c));
366 			QDF_BUG(0);
367 		}
368 
369 		PCI_CLR_CAUSE0_REGISTER(sc);
370 		}
371 
372 		if (HAS_FW_INDICATOR) {
373 			fw_indicator_address = hif_state->fw_indicator_address;
374 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
375 			if ((fw_indicator != ~0) &&
376 			   (fw_indicator & FW_IND_EVENT_PENDING))
377 				ssr_irq = true;
378 		}
379 
380 		if (Q_TARGET_ACCESS_END(scn) < 0)
381 			return IRQ_HANDLED;
382 	}
383 	/* TBDXXX: Add support for WMAC */
384 
385 	if (ssr_irq) {
386 		sc->irq_event = irq;
387 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
388 
389 		qdf_atomic_inc(&scn->active_tasklet_cnt);
390 		tasklet_schedule(&sc->intr_tq);
391 	} else {
392 		pci_dispatch_interrupt(scn);
393 	}
394 
395 	return IRQ_HANDLED;
396 }
397 
398 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
399 {
400 	return 1;               /* FIX THIS */
401 }
402 
403 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
404 {
405 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
406 	int i = 0;
407 
408 	if (!irq || !size) {
409 		return -EINVAL;
410 	}
411 
412 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
413 		irq[0] = sc->irq;
414 		return 1;
415 	}
416 
417 	if (sc->num_msi_intrs > size) {
418 		qdf_print("Not enough space in irq buffer to return irqs");
419 		return -EINVAL;
420 	}
421 
422 	for (i = 0; i < sc->num_msi_intrs; i++) {
423 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
424 	}
425 
426 	return sc->num_msi_intrs;
427 }
428 
429 
430 /**
431  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
432  * @scn: hif_softc
433  *
434  * Return: void
435  */
436 #if CONFIG_ATH_PCIE_MAX_PERF == 0
437 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
438 {
439 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
440 	A_target_id_t pci_addr = scn->mem;
441 
442 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
443 	/*
444 	 * If the deferred sleep timer is running cancel it
445 	 * and put the soc into sleep.
446 	 */
447 	if (hif_state->fake_sleep == true) {
448 		qdf_timer_stop(&hif_state->sleep_timer);
449 		if (hif_state->verified_awake == false) {
450 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
451 				      PCIE_SOC_WAKE_ADDRESS,
452 				      PCIE_SOC_WAKE_RESET);
453 		}
454 		hif_state->fake_sleep = false;
455 	}
456 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
457 }
458 #else
459 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
460 {
461 }
462 #endif
463 
464 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
465 	hif_read32_mb(sc, (char *)(mem) + \
466 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
467 
468 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
469 	hif_write32_mb(sc, ((char *)(mem) + \
470 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
471 
472 #ifdef QCA_WIFI_3_0
473 /**
474  * hif_targ_is_awake() - check to see if the target is awake
475  * @hif_ctx: hif context
476  *
477  * emulation never goes to sleep
478  *
479  * Return: true if target is awake
480  */
481 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
482 {
483 	return true;
484 }
485 #else
486 /**
487  * hif_targ_is_awake() - check to see if the target is awake
488  * @hif_ctx: hif context
489  *
490  * Return: true if the targets clocks are on
491  */
492 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
493 {
494 	uint32_t val;
495 
496 	if (scn->recovery)
497 		return false;
498 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
499 		+ RTC_STATE_ADDRESS);
500 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
501 }
502 #endif
503 
504 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
505 static void hif_pci_device_reset(struct hif_pci_softc *sc)
506 {
507 	void __iomem *mem = sc->mem;
508 	int i;
509 	uint32_t val;
510 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
511 
512 	if (!scn->hostdef)
513 		return;
514 
515 	/* NB: Don't check resetok here.  This form of reset
516 	 * is integral to correct operation.
517 	 */
518 
519 	if (!SOC_GLOBAL_RESET_ADDRESS)
520 		return;
521 
522 	if (!mem)
523 		return;
524 
525 	hif_err("Reset Device");
526 
527 	/*
528 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
529 	 * writing WAKE_V, the Target may scribble over Host memory!
530 	 */
531 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
532 			       PCIE_SOC_WAKE_V_MASK);
533 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
534 		if (hif_targ_is_awake(scn, mem))
535 			break;
536 
537 		qdf_mdelay(1);
538 	}
539 
540 	/* Put Target, including PCIe, into RESET. */
541 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
542 	val |= 1;
543 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
544 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
545 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
546 		    RTC_STATE_COLD_RESET_MASK)
547 			break;
548 
549 		qdf_mdelay(1);
550 	}
551 
552 	/* Pull Target, including PCIe, out of RESET. */
553 	val &= ~1;
554 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
555 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
556 		if (!
557 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
558 		     RTC_STATE_COLD_RESET_MASK))
559 			break;
560 
561 		qdf_mdelay(1);
562 	}
563 
564 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
565 			       PCIE_SOC_WAKE_RESET);
566 }
567 
568 /* CPU warm reset function
569  * Steps:
570  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
571  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
572  *    correctly on WARM reset
573  * 3. Clear TARGET CPU LF timer interrupt
574  * 4. Reset all CEs to clear any pending CE tarnsactions
575  * 5. Warm reset CPU
576  */
577 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
578 {
579 	void __iomem *mem = sc->mem;
580 	int i;
581 	uint32_t val;
582 	uint32_t fw_indicator;
583 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
584 
585 	/* NB: Don't check resetok here.  This form of reset is
586 	 * integral to correct operation.
587 	 */
588 
589 	if (!mem)
590 		return;
591 
592 	hif_debug("Target Warm Reset");
593 
594 	/*
595 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
596 	 * writing WAKE_V, the Target may scribble over Host memory!
597 	 */
598 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
599 			       PCIE_SOC_WAKE_V_MASK);
600 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
601 		if (hif_targ_is_awake(scn, mem))
602 			break;
603 		qdf_mdelay(1);
604 	}
605 
606 	/*
607 	 * Disable Pending interrupts
608 	 */
609 	val =
610 		hif_read32_mb(sc, mem +
611 			     (SOC_CORE_BASE_ADDRESS |
612 			      PCIE_INTR_CAUSE_ADDRESS));
613 	hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
614 		  (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
615 	/* Target CPU Intr Cause */
616 	val = hif_read32_mb(sc, mem +
617 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
618 	hif_debug("Target CPU Intr Cause 0x%x", val);
619 
620 	val =
621 		hif_read32_mb(sc, mem +
622 			     (SOC_CORE_BASE_ADDRESS |
623 			      PCIE_INTR_ENABLE_ADDRESS));
624 	hif_write32_mb(sc, (mem +
625 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
626 	hif_write32_mb(sc, (mem +
627 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
628 		       HOST_GROUP0_MASK);
629 
630 	qdf_mdelay(100);
631 
632 	/* Clear FW_INDICATOR_ADDRESS */
633 	if (HAS_FW_INDICATOR) {
634 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
635 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
636 	}
637 
638 	/* Clear Target LF Timer interrupts */
639 	val =
640 		hif_read32_mb(sc, mem +
641 			     (RTC_SOC_BASE_ADDRESS +
642 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
643 	hif_debug("addr 0x%x : 0x%x",
644 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
645 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
646 	hif_write32_mb(sc, mem +
647 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
648 		      val);
649 
650 	/* Reset CE */
651 	val =
652 		hif_read32_mb(sc, mem +
653 			     (RTC_SOC_BASE_ADDRESS |
654 			      SOC_RESET_CONTROL_ADDRESS));
655 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
656 	hif_write32_mb(sc, (mem +
657 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
658 		      val);
659 	val =
660 		hif_read32_mb(sc, mem +
661 			     (RTC_SOC_BASE_ADDRESS |
662 			      SOC_RESET_CONTROL_ADDRESS));
663 	qdf_mdelay(10);
664 
665 	/* CE unreset */
666 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
667 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
668 		       SOC_RESET_CONTROL_ADDRESS), val);
669 	val =
670 		hif_read32_mb(sc, mem +
671 			     (RTC_SOC_BASE_ADDRESS |
672 			      SOC_RESET_CONTROL_ADDRESS));
673 	qdf_mdelay(10);
674 
675 	/* Read Target CPU Intr Cause */
676 	val = hif_read32_mb(sc, mem +
677 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
678 	hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
679 
680 	/* CPU warm RESET */
681 	val =
682 		hif_read32_mb(sc, mem +
683 			     (RTC_SOC_BASE_ADDRESS |
684 			      SOC_RESET_CONTROL_ADDRESS));
685 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
686 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
687 		       SOC_RESET_CONTROL_ADDRESS), val);
688 	val =
689 		hif_read32_mb(sc, mem +
690 			     (RTC_SOC_BASE_ADDRESS |
691 			      SOC_RESET_CONTROL_ADDRESS));
692 	hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
693 
694 	qdf_mdelay(100);
695 	hif_debug("Target Warm reset complete");
696 
697 }
698 
699 #ifndef QCA_WIFI_3_0
700 /* only applicable to legacy ce */
701 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
702 {
703 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
704 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
705 	void __iomem *mem = sc->mem;
706 	uint32_t val;
707 
708 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
709 		return ATH_ISR_NOSCHED;
710 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
711 	if (Q_TARGET_ACCESS_END(scn) < 0)
712 		return ATH_ISR_SCHED;
713 
714 	hif_debug("FW_INDICATOR register is 0x%x", val);
715 
716 	if (val & FW_IND_HELPER)
717 		return 0;
718 
719 	return 1;
720 }
721 #endif
722 
723 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
724 {
725 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
726 	uint16_t device_id = 0;
727 	uint32_t val;
728 	uint16_t timeout_count = 0;
729 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
730 
731 	/* Check device ID from PCIe configuration space for link status */
732 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
733 	if (device_id != sc->devid) {
734 		hif_err("Device ID does match (read 0x%x, expect 0x%x)",
735 			device_id, sc->devid);
736 		return -EACCES;
737 	}
738 
739 	/* Check PCIe local register for bar/memory access */
740 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
741 			   RTC_STATE_ADDRESS);
742 	hif_debug("RTC_STATE_ADDRESS is %08x", val);
743 
744 	/* Try to wake up taget if it sleeps */
745 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
746 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
747 	hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
748 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
749 		PCIE_SOC_WAKE_ADDRESS));
750 
751 	/* Check if taget can be woken up */
752 	while (!hif_targ_is_awake(scn, sc->mem)) {
753 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
754 			hif_err("wake up timeout, %08x, %08x",
755 				hif_read32_mb(sc, sc->mem +
756 				     PCIE_LOCAL_BASE_ADDRESS +
757 				     RTC_STATE_ADDRESS),
758 				hif_read32_mb(sc, sc->mem +
759 				     PCIE_LOCAL_BASE_ADDRESS +
760 				     PCIE_SOC_WAKE_ADDRESS));
761 			return -EACCES;
762 		}
763 
764 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
765 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
766 
767 		qdf_mdelay(100);
768 		timeout_count += 100;
769 	}
770 
771 	/* Check Power register for SoC internal bus issues */
772 	val =
773 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
774 			     SOC_POWER_REG_OFFSET);
775 	hif_debug("Power register is %08x", val);
776 
777 	return 0;
778 }
779 
780 /**
781  * __hif_pci_dump_registers(): dump other PCI debug registers
782  * @scn: struct hif_softc
783  *
784  * This function dumps pci debug registers.  The parrent function
785  * dumps the copy engine registers before calling this function.
786  *
787  * Return: void
788  */
789 static void __hif_pci_dump_registers(struct hif_softc *scn)
790 {
791 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
792 	void __iomem *mem = sc->mem;
793 	uint32_t val, i, j;
794 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
795 	uint32_t ce_base;
796 
797 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
798 		return;
799 
800 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
801 	val =
802 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
803 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
804 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
805 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
806 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
807 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
808 
809 	/* DEBUG_CONTROL_ENABLE = 0x1 */
810 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
811 			   WLAN_DEBUG_CONTROL_OFFSET);
812 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
813 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
814 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
815 		      WLAN_DEBUG_CONTROL_OFFSET, val);
816 
817 	hif_debug("Debug: inputsel: %x dbgctrl: %x",
818 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
819 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
820 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
821 			    WLAN_DEBUG_CONTROL_OFFSET));
822 
823 	hif_debug("Debug CE");
824 	/* Loop CE debug output */
825 	/* AMBA_DEBUG_BUS_SEL = 0xc */
826 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
827 			    AMBA_DEBUG_BUS_OFFSET);
828 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
829 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
830 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
831 		       val);
832 
833 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
834 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
835 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
836 				   CE_WRAPPER_DEBUG_OFFSET);
837 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
838 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
839 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
840 			      CE_WRAPPER_DEBUG_OFFSET, val);
841 
842 		hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
843 			  wrapper_idx[i],
844 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
845 				AMBA_DEBUG_BUS_OFFSET),
846 			  hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
847 				CE_WRAPPER_DEBUG_OFFSET));
848 
849 		if (wrapper_idx[i] <= 7) {
850 			for (j = 0; j <= 5; j++) {
851 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
852 				/* For (j=0~5) write CE_DEBUG_SEL = j */
853 				val =
854 					hif_read32_mb(sc, mem + ce_base +
855 						     CE_DEBUG_OFFSET);
856 				val &= ~CE_DEBUG_SEL_MASK;
857 				val |= CE_DEBUG_SEL_SET(j);
858 				hif_write32_mb(sc, mem + ce_base +
859 					       CE_DEBUG_OFFSET, val);
860 
861 				/* read (@gpio_athr_wlan_reg)
862 				 * WLAN_DEBUG_OUT_DATA
863 				 */
864 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
865 						    + WLAN_DEBUG_OUT_OFFSET);
866 				val = WLAN_DEBUG_OUT_DATA_GET(val);
867 
868 				hif_debug("module%d: cedbg: %x out: %x",
869 					  j,
870 					  hif_read32_mb(sc, mem + ce_base +
871 						CE_DEBUG_OFFSET), val);
872 			}
873 		} else {
874 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
875 			val =
876 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
877 					     WLAN_DEBUG_OUT_OFFSET);
878 			val = WLAN_DEBUG_OUT_DATA_GET(val);
879 
880 			hif_debug("out: %x", val);
881 		}
882 	}
883 
884 	hif_debug("Debug PCIe:");
885 	/* Loop PCIe debug output */
886 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
887 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
888 			    AMBA_DEBUG_BUS_OFFSET);
889 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
890 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
891 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
892 		       AMBA_DEBUG_BUS_OFFSET, val);
893 
894 	for (i = 0; i <= 8; i++) {
895 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
896 		val =
897 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
898 				     AMBA_DEBUG_BUS_OFFSET);
899 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
900 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
901 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
902 			       AMBA_DEBUG_BUS_OFFSET, val);
903 
904 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
905 		val =
906 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
907 				     WLAN_DEBUG_OUT_OFFSET);
908 		val = WLAN_DEBUG_OUT_DATA_GET(val);
909 
910 		hif_debug("amdbg: %x out: %x %x",
911 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
912 				WLAN_DEBUG_OUT_OFFSET), val,
913 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
914 				WLAN_DEBUG_OUT_OFFSET));
915 	}
916 
917 	Q_TARGET_ACCESS_END(scn);
918 }
919 
920 /**
921  * hif_dump_registers(): dump bus debug registers
922  * @scn: struct hif_opaque_softc
923  *
924  * This function dumps hif bus debug registers
925  *
926  * Return: 0 for success or error code
927  */
928 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
929 {
930 	int status;
931 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
932 
933 	status = hif_dump_ce_registers(scn);
934 
935 	if (status)
936 		hif_err("Dump CE Registers Failed");
937 
938 	/* dump non copy engine pci registers */
939 	__hif_pci_dump_registers(scn);
940 
941 	return 0;
942 }
943 
944 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
945 
946 /* worker thread to schedule wlan_tasklet in SLUB debug build */
947 static void reschedule_tasklet_work_handler(void *arg)
948 {
949 	struct hif_pci_softc *sc = arg;
950 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
951 
952 	if (!scn) {
953 		hif_err("hif_softc is NULL");
954 		return;
955 	}
956 
957 	if (scn->hif_init_done == false) {
958 		hif_err("wlan driver is unloaded");
959 		return;
960 	}
961 
962 	tasklet_schedule(&sc->intr_tq);
963 }
964 
965 /**
966  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
967  * work
968  * @sc: HIF PCI Context
969  *
970  * Return: void
971  */
972 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
973 {
974 	qdf_create_work(0, &sc->reschedule_tasklet_work,
975 				reschedule_tasklet_work_handler, NULL);
976 }
977 #else
978 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
979 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
980 
981 void wlan_tasklet(unsigned long data)
982 {
983 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
984 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
985 
986 	if (scn->hif_init_done == false)
987 		goto end;
988 
989 	if (qdf_atomic_read(&scn->link_suspended))
990 		goto end;
991 
992 	if (!ADRASTEA_BU) {
993 		hif_fw_interrupt_handler(sc->irq_event, scn);
994 		if (scn->target_status == TARGET_STATUS_RESET)
995 			goto end;
996 	}
997 
998 end:
999 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
1000 	qdf_atomic_dec(&scn->active_tasklet_cnt);
1001 }
1002 
1003 /**
1004  * hif_disable_power_gating() - disable HW power gating
1005  * @hif_ctx: hif context
1006  *
1007  * disables pcie L1 power states
1008  */
1009 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1010 {
1011 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1012 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1013 
1014 	if (!scn) {
1015 		hif_err("Could not disable ASPM scn is null");
1016 		return;
1017 	}
1018 
1019 	/* Disable ASPM when pkt log is enabled */
1020 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1021 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1022 }
1023 
1024 /**
1025  * hif_enable_power_gating() - enable HW power gating
1026  * @hif_ctx: hif context
1027  *
1028  * enables pcie L1 power states
1029  */
1030 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1031 {
1032 	if (!sc) {
1033 		hif_err("Could not disable ASPM scn is null");
1034 		return;
1035 	}
1036 
1037 	/* Re-enable ASPM after firmware/OTP download is complete */
1038 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1039 }
1040 
1041 /**
1042  * hif_enable_power_management() - enable power management
1043  * @hif_ctx: hif context
1044  *
1045  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1046  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1047  *
1048  * note: epping mode does not call this function as it does not
1049  *       care about saving power.
1050  */
1051 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1052 				 bool is_packet_log_enabled)
1053 {
1054 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1055 	uint32_t mode;
1056 
1057 	if (!pci_ctx) {
1058 		hif_err("hif_ctx null");
1059 		return;
1060 	}
1061 
1062 	mode = hif_get_conparam(hif_sc);
1063 	if (mode == QDF_GLOBAL_FTM_MODE) {
1064 		hif_info("Enable power gating for FTM mode");
1065 		hif_enable_power_gating(pci_ctx);
1066 		return;
1067 	}
1068 
1069 	hif_pm_runtime_start(hif_sc);
1070 
1071 	if (!is_packet_log_enabled)
1072 		hif_enable_power_gating(pci_ctx);
1073 
1074 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1075 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1076 	    !ce_srng_based(hif_sc)) {
1077 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1078 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1079 			hif_err("Failed to set target to sleep");
1080 	}
1081 }
1082 
1083 /**
1084  * hif_disable_power_management() - disable power management
1085  * @hif_ctx: hif context
1086  *
1087  * Currently disables runtime pm. Should be updated to behave
1088  * if runtime pm is not started. Should be updated to take care
1089  * of aspm and soc sleep for driver load.
1090  */
1091 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1092 {
1093 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1094 
1095 	if (!pci_ctx) {
1096 		hif_err("hif_ctx null");
1097 		return;
1098 	}
1099 
1100 	hif_pm_runtime_stop(hif_ctx);
1101 }
1102 
1103 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1104 {
1105 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1106 
1107 	if (!pci_ctx) {
1108 		hif_err("hif_ctx null");
1109 		return;
1110 	}
1111 	hif_display_ce_stats(hif_ctx);
1112 
1113 	hif_print_pci_stats(pci_ctx);
1114 }
1115 
1116 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1117 {
1118 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1119 
1120 	if (!pci_ctx) {
1121 		hif_err("hif_ctx null");
1122 		return;
1123 	}
1124 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1125 }
1126 
1127 #define ATH_PCI_PROBE_RETRY_MAX 3
1128 /**
1129  * hif_bus_open(): hif_bus_open
1130  * @scn: scn
1131  * @bus_type: bus type
1132  *
1133  * Return: n/a
1134  */
1135 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1136 {
1137 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1138 
1139 	hif_ctx->bus_type = bus_type;
1140 	hif_pm_runtime_open(hif_ctx);
1141 
1142 	qdf_spinlock_create(&sc->irq_lock);
1143 
1144 	return hif_ce_open(hif_ctx);
1145 }
1146 
1147 /**
1148  * hif_wake_target_cpu() - wake the target's cpu
1149  * @scn: hif context
1150  *
1151  * Send an interrupt to the device to wake up the Target CPU
1152  * so it has an opportunity to notice any changed state.
1153  */
1154 static void hif_wake_target_cpu(struct hif_softc *scn)
1155 {
1156 	QDF_STATUS rv;
1157 	uint32_t core_ctrl;
1158 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1159 
1160 	rv = hif_diag_read_access(hif_hdl,
1161 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1162 				  &core_ctrl);
1163 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1164 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1165 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1166 
1167 	rv = hif_diag_write_access(hif_hdl,
1168 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1169 				   core_ctrl);
1170 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1171 }
1172 
1173 /**
1174  * soc_wake_reset() - allow the target to go to sleep
1175  * @scn: hif_softc
1176  *
1177  * Clear the force wake register.  This is done by
1178  * hif_sleep_entry and cancel defered timer sleep.
1179  */
1180 static void soc_wake_reset(struct hif_softc *scn)
1181 {
1182 	hif_write32_mb(scn, scn->mem +
1183 		PCIE_LOCAL_BASE_ADDRESS +
1184 		PCIE_SOC_WAKE_ADDRESS,
1185 		PCIE_SOC_WAKE_RESET);
1186 }
1187 
1188 /**
1189  * hif_sleep_entry() - gate target sleep
1190  * @arg: hif context
1191  *
1192  * This function is the callback for the sleep timer.
1193  * Check if last force awake critical section was at least
1194  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1195  * allow the target to go to sleep and cancel the sleep timer.
1196  * otherwise reschedule the sleep timer.
1197  */
1198 static void hif_sleep_entry(void *arg)
1199 {
1200 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1201 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1202 	uint32_t idle_ms;
1203 
1204 	if (scn->recovery)
1205 		return;
1206 
1207 	if (hif_is_driver_unloading(scn))
1208 		return;
1209 
1210 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1211 	if (hif_state->fake_sleep) {
1212 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1213 						    - hif_state->sleep_ticks);
1214 		if (!hif_state->verified_awake &&
1215 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1216 			if (!qdf_atomic_read(&scn->link_suspended)) {
1217 				soc_wake_reset(scn);
1218 				hif_state->fake_sleep = false;
1219 			}
1220 		} else {
1221 			qdf_timer_stop(&hif_state->sleep_timer);
1222 			qdf_timer_start(&hif_state->sleep_timer,
1223 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1224 		}
1225 	}
1226 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1227 }
1228 
1229 #define HIF_HIA_MAX_POLL_LOOP    1000000
1230 #define HIF_HIA_POLLING_DELAY_MS 10
1231 
1232 #ifdef QCA_HIF_HIA_EXTND
1233 
1234 static void hif_set_hia_extnd(struct hif_softc *scn)
1235 {
1236 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1237 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1238 	uint32_t target_type = tgt_info->target_type;
1239 
1240 	hif_info("E");
1241 
1242 	if ((target_type == TARGET_TYPE_AR900B) ||
1243 			target_type == TARGET_TYPE_QCA9984 ||
1244 			target_type == TARGET_TYPE_QCA9888) {
1245 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1246 		 * in RTC space
1247 		 */
1248 		tgt_info->target_revision
1249 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1250 					+ CHIP_ID_ADDRESS));
1251 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1252 			  target_type, tgt_info->target_revision);
1253 	}
1254 
1255 	{
1256 		uint32_t flag2_value = 0;
1257 		uint32_t flag2_targ_addr =
1258 			host_interest_item_address(target_type,
1259 			offsetof(struct host_interest_s, hi_skip_clock_init));
1260 
1261 		if ((ar900b_20_targ_clk != -1) &&
1262 			(frac != -1) && (intval != -1)) {
1263 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1264 				&flag2_value);
1265 			qdf_print("\n Setting clk_override");
1266 			flag2_value |= CLOCK_OVERRIDE;
1267 
1268 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1269 					flag2_value);
1270 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1271 		} else {
1272 			qdf_print("\n CLOCK PLL skipped");
1273 		}
1274 	}
1275 
1276 	if (target_type == TARGET_TYPE_AR900B
1277 			|| target_type == TARGET_TYPE_QCA9984
1278 			|| target_type == TARGET_TYPE_QCA9888) {
1279 
1280 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1281 		 * this would be supplied through module parameters,
1282 		 * if not supplied assumed default or same behavior as 1.0.
1283 		 * Assume 1.0 clock can't be tuned, reset to defaults
1284 		 */
1285 
1286 		qdf_print(KERN_INFO
1287 			  "%s: setting the target pll frac %x intval %x",
1288 			  __func__, frac, intval);
1289 
1290 		/* do not touch frac, and int val, let them be default -1,
1291 		 * if desired, host can supply these through module params
1292 		 */
1293 		if (frac != -1 || intval != -1) {
1294 			uint32_t flag2_value = 0;
1295 			uint32_t flag2_targ_addr;
1296 
1297 			flag2_targ_addr =
1298 				host_interest_item_address(target_type,
1299 				offsetof(struct host_interest_s,
1300 					hi_clock_info));
1301 			hif_diag_read_access(hif_hdl,
1302 				flag2_targ_addr, &flag2_value);
1303 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1304 				  flag2_value);
1305 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1306 			qdf_print("\n INT Val %x  Address %x",
1307 				  intval, flag2_value + 4);
1308 			hif_diag_write_access(hif_hdl,
1309 					flag2_value + 4, intval);
1310 		} else {
1311 			qdf_print(KERN_INFO
1312 				  "%s: no frac provided, skipping pre-configuring PLL",
1313 				  __func__);
1314 		}
1315 
1316 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1317 		if ((target_type == TARGET_TYPE_AR900B)
1318 			&& (tgt_info->target_revision == AR900B_REV_2)
1319 			&& ar900b_20_targ_clk != -1) {
1320 			uint32_t flag2_value = 0;
1321 			uint32_t flag2_targ_addr;
1322 
1323 			flag2_targ_addr
1324 				= host_interest_item_address(target_type,
1325 					offsetof(struct host_interest_s,
1326 					hi_desired_cpu_speed_hz));
1327 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1328 							&flag2_value);
1329 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1330 				  flag2_value);
1331 			hif_diag_write_access(hif_hdl, flag2_value,
1332 				ar900b_20_targ_clk/*300000000u*/);
1333 		} else if (target_type == TARGET_TYPE_QCA9888) {
1334 			uint32_t flag2_targ_addr;
1335 
1336 			if (200000000u != qca9888_20_targ_clk) {
1337 				qca9888_20_targ_clk = 300000000u;
1338 				/* Setting the target clock speed to 300 mhz */
1339 			}
1340 
1341 			flag2_targ_addr
1342 				= host_interest_item_address(target_type,
1343 					offsetof(struct host_interest_s,
1344 					hi_desired_cpu_speed_hz));
1345 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1346 				qca9888_20_targ_clk);
1347 		} else {
1348 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1349 				  __func__);
1350 		}
1351 	} else {
1352 		if (frac != -1 || intval != -1) {
1353 			uint32_t flag2_value = 0;
1354 			uint32_t flag2_targ_addr =
1355 				host_interest_item_address(target_type,
1356 					offsetof(struct host_interest_s,
1357 							hi_clock_info));
1358 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1359 						&flag2_value);
1360 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1361 				  flag2_value);
1362 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1363 			qdf_print("\n INT Val %x  Address %x", intval,
1364 				  flag2_value + 4);
1365 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1366 					      intval);
1367 		}
1368 	}
1369 }
1370 
1371 #else
1372 
1373 static void hif_set_hia_extnd(struct hif_softc *scn)
1374 {
1375 }
1376 
1377 #endif
1378 
1379 /**
1380  * hif_set_hia() - fill out the host interest area
1381  * @scn: hif context
1382  *
1383  * This is replaced by hif_wlan_enable for integrated targets.
1384  * This fills out the host interest area.  The firmware will
1385  * process these memory addresses when it is first brought out
1386  * of reset.
1387  *
1388  * Return: 0 for success.
1389  */
1390 static int hif_set_hia(struct hif_softc *scn)
1391 {
1392 	QDF_STATUS rv;
1393 	uint32_t interconnect_targ_addr = 0;
1394 	uint32_t pcie_state_targ_addr = 0;
1395 	uint32_t pipe_cfg_targ_addr = 0;
1396 	uint32_t svc_to_pipe_map = 0;
1397 	uint32_t pcie_config_flags = 0;
1398 	uint32_t flag2_value = 0;
1399 	uint32_t flag2_targ_addr = 0;
1400 #ifdef QCA_WIFI_3_0
1401 	uint32_t host_interest_area = 0;
1402 	uint8_t i;
1403 #else
1404 	uint32_t ealloc_value = 0;
1405 	uint32_t ealloc_targ_addr = 0;
1406 	uint8_t banks_switched = 1;
1407 	uint32_t chip_id;
1408 #endif
1409 	uint32_t pipe_cfg_addr;
1410 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1411 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1412 	uint32_t target_type = tgt_info->target_type;
1413 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1414 	static struct CE_pipe_config *target_ce_config;
1415 	struct service_to_pipe *target_service_to_ce_map;
1416 
1417 	hif_info("E");
1418 
1419 	hif_get_target_ce_config(scn,
1420 				 &target_ce_config, &target_ce_config_sz,
1421 				 &target_service_to_ce_map,
1422 				 &target_service_to_ce_map_sz,
1423 				 NULL, NULL);
1424 
1425 	if (ADRASTEA_BU)
1426 		return 0;
1427 
1428 #ifdef QCA_WIFI_3_0
1429 	i = 0;
1430 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1431 		host_interest_area = hif_read32_mb(scn, scn->mem +
1432 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1433 		if ((host_interest_area & 0x01) == 0) {
1434 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1435 			host_interest_area = 0;
1436 			i++;
1437 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1438 				hif_err("poll timeout: %d", i);
1439 		} else {
1440 			host_interest_area &= (~0x01);
1441 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1442 			break;
1443 		}
1444 	}
1445 
1446 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1447 		hif_err("hia polling timeout");
1448 		return -EIO;
1449 	}
1450 
1451 	if (host_interest_area == 0) {
1452 		hif_err("host_interest_area = 0");
1453 		return -EIO;
1454 	}
1455 
1456 	interconnect_targ_addr = host_interest_area +
1457 			offsetof(struct host_interest_area_t,
1458 			hi_interconnect_state);
1459 
1460 	flag2_targ_addr = host_interest_area +
1461 			offsetof(struct host_interest_area_t, hi_option_flag2);
1462 
1463 #else
1464 	interconnect_targ_addr = hif_hia_item_address(target_type,
1465 		offsetof(struct host_interest_s, hi_interconnect_state));
1466 	ealloc_targ_addr = hif_hia_item_address(target_type,
1467 		offsetof(struct host_interest_s, hi_early_alloc));
1468 	flag2_targ_addr = hif_hia_item_address(target_type,
1469 		offsetof(struct host_interest_s, hi_option_flag2));
1470 #endif
1471 	/* Supply Target-side CE configuration */
1472 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1473 			  &pcie_state_targ_addr);
1474 	if (rv != QDF_STATUS_SUCCESS) {
1475 		hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
1476 			interconnect_targ_addr, rv);
1477 		goto done;
1478 	}
1479 	if (pcie_state_targ_addr == 0) {
1480 		rv = QDF_STATUS_E_FAILURE;
1481 		hif_err("pcie state addr is 0");
1482 		goto done;
1483 	}
1484 	pipe_cfg_addr = pcie_state_targ_addr +
1485 			  offsetof(struct pcie_state_s,
1486 			  pipe_cfg_addr);
1487 	rv = hif_diag_read_access(hif_hdl,
1488 			  pipe_cfg_addr,
1489 			  &pipe_cfg_targ_addr);
1490 	if (rv != QDF_STATUS_SUCCESS) {
1491 		hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
1492 		goto done;
1493 	}
1494 	if (pipe_cfg_targ_addr == 0) {
1495 		rv = QDF_STATUS_E_FAILURE;
1496 		hif_err("pipe cfg addr is 0");
1497 		goto done;
1498 	}
1499 
1500 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1501 			(uint8_t *) target_ce_config,
1502 			target_ce_config_sz);
1503 
1504 	if (rv != QDF_STATUS_SUCCESS) {
1505 		hif_err("write pipe cfg: %d", rv);
1506 		goto done;
1507 	}
1508 
1509 	rv = hif_diag_read_access(hif_hdl,
1510 			  pcie_state_targ_addr +
1511 			  offsetof(struct pcie_state_s,
1512 			   svc_to_pipe_map),
1513 			  &svc_to_pipe_map);
1514 	if (rv != QDF_STATUS_SUCCESS) {
1515 		hif_err("get svc/pipe map: %d", rv);
1516 		goto done;
1517 	}
1518 	if (svc_to_pipe_map == 0) {
1519 		rv = QDF_STATUS_E_FAILURE;
1520 		hif_err("svc_to_pipe map is 0");
1521 		goto done;
1522 	}
1523 
1524 	rv = hif_diag_write_mem(hif_hdl,
1525 			svc_to_pipe_map,
1526 			(uint8_t *) target_service_to_ce_map,
1527 			target_service_to_ce_map_sz);
1528 	if (rv != QDF_STATUS_SUCCESS) {
1529 		hif_err("write svc/pipe map: %d", rv);
1530 		goto done;
1531 	}
1532 
1533 	rv = hif_diag_read_access(hif_hdl,
1534 			pcie_state_targ_addr +
1535 			offsetof(struct pcie_state_s,
1536 			config_flags),
1537 			&pcie_config_flags);
1538 	if (rv != QDF_STATUS_SUCCESS) {
1539 		hif_err("get pcie config_flags: %d", rv);
1540 		goto done;
1541 	}
1542 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1543 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1544 #else
1545 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1546 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1547 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1548 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1549 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1550 #endif
1551 	rv = hif_diag_write_mem(hif_hdl,
1552 			pcie_state_targ_addr +
1553 			offsetof(struct pcie_state_s,
1554 			config_flags),
1555 			(uint8_t *) &pcie_config_flags,
1556 			sizeof(pcie_config_flags));
1557 	if (rv != QDF_STATUS_SUCCESS) {
1558 		hif_err("write pcie config_flags: %d", rv);
1559 		goto done;
1560 	}
1561 
1562 #ifndef QCA_WIFI_3_0
1563 	/* configure early allocation */
1564 	ealloc_targ_addr = hif_hia_item_address(target_type,
1565 						offsetof(
1566 						struct host_interest_s,
1567 						hi_early_alloc));
1568 
1569 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1570 			&ealloc_value);
1571 	if (rv != QDF_STATUS_SUCCESS) {
1572 		hif_err("get early alloc val: %d", rv);
1573 		goto done;
1574 	}
1575 
1576 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1577 	ealloc_value |=
1578 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1579 		 HI_EARLY_ALLOC_MAGIC_MASK);
1580 
1581 	rv = hif_diag_read_access(hif_hdl,
1582 			  CHIP_ID_ADDRESS |
1583 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1584 	if (rv != QDF_STATUS_SUCCESS) {
1585 		hif_err("get chip id val: %d", rv);
1586 		goto done;
1587 	}
1588 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1589 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1590 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1591 		case 0x2:       /* ROME 1.3 */
1592 			/* 2 banks are switched to IRAM */
1593 			banks_switched = 2;
1594 			break;
1595 		case 0x4:       /* ROME 2.1 */
1596 		case 0x5:       /* ROME 2.2 */
1597 			banks_switched = 6;
1598 			break;
1599 		case 0x8:       /* ROME 3.0 */
1600 		case 0x9:       /* ROME 3.1 */
1601 		case 0xA:       /* ROME 3.2 */
1602 			banks_switched = 9;
1603 			break;
1604 		case 0x0:       /* ROME 1.0 */
1605 		case 0x1:       /* ROME 1.1 */
1606 		default:
1607 			/* 3 banks are switched to IRAM */
1608 			banks_switched = 3;
1609 			break;
1610 		}
1611 	}
1612 
1613 	ealloc_value |=
1614 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1615 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1616 
1617 	rv = hif_diag_write_access(hif_hdl,
1618 				ealloc_targ_addr,
1619 				ealloc_value);
1620 	if (rv != QDF_STATUS_SUCCESS) {
1621 		hif_err("set early alloc val: %d", rv);
1622 		goto done;
1623 	}
1624 #endif
1625 	if ((target_type == TARGET_TYPE_AR900B)
1626 			|| (target_type == TARGET_TYPE_QCA9984)
1627 			|| (target_type == TARGET_TYPE_QCA9888)
1628 			|| (target_type == TARGET_TYPE_AR9888)) {
1629 		hif_set_hia_extnd(scn);
1630 	}
1631 
1632 	/* Tell Target to proceed with initialization */
1633 	flag2_targ_addr = hif_hia_item_address(target_type,
1634 						offsetof(
1635 						struct host_interest_s,
1636 						hi_option_flag2));
1637 
1638 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1639 			  &flag2_value);
1640 	if (rv != QDF_STATUS_SUCCESS) {
1641 		hif_err("get option val: %d", rv);
1642 		goto done;
1643 	}
1644 
1645 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1646 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1647 			   flag2_value);
1648 	if (rv != QDF_STATUS_SUCCESS) {
1649 		hif_err("set option val: %d", rv);
1650 		goto done;
1651 	}
1652 
1653 	hif_wake_target_cpu(scn);
1654 
1655 done:
1656 
1657 	return qdf_status_to_os_return(rv);
1658 }
1659 
1660 /**
1661  * hif_bus_configure() - configure the pcie bus
1662  * @hif_sc: pointer to the hif context.
1663  *
1664  * return: 0 for success. nonzero for failure.
1665  */
1666 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1667 {
1668 	int status = 0;
1669 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1670 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1671 
1672 	hif_ce_prepare_config(hif_sc);
1673 
1674 	/* initialize sleep state adjust variables */
1675 	hif_state->sleep_timer_init = true;
1676 	hif_state->keep_awake_count = 0;
1677 	hif_state->fake_sleep = false;
1678 	hif_state->sleep_ticks = 0;
1679 
1680 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1681 			       hif_sleep_entry, (void *)hif_state,
1682 			       QDF_TIMER_TYPE_WAKE_APPS);
1683 	hif_state->sleep_timer_init = true;
1684 
1685 	status = hif_wlan_enable(hif_sc);
1686 	if (status) {
1687 		hif_err("hif_wlan_enable error: %d", status);
1688 		goto timer_free;
1689 	}
1690 
1691 	A_TARGET_ACCESS_LIKELY(hif_sc);
1692 
1693 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1694 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1695 	    !ce_srng_based(hif_sc)) {
1696 		/*
1697 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1698 		 * prevent sleep when we want to keep firmware always awake
1699 		 * note: when we want to keep firmware always awake,
1700 		 *       hif_target_sleep_state_adjust will point to a dummy
1701 		 *       function, and hif_pci_target_sleep_state_adjust must
1702 		 *       be called instead.
1703 		 * note: bus type check is here because AHB bus is reusing
1704 		 *       hif_pci_bus_configure code.
1705 		 */
1706 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1707 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1708 					false, true) < 0) {
1709 				status = -EACCES;
1710 				goto disable_wlan;
1711 			}
1712 		}
1713 	}
1714 
1715 	/* todo: consider replacing this with an srng field */
1716 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1717 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1718 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1719 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9100) ||
1720 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1721 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1722 		hif_sc->per_ce_irq = true;
1723 	}
1724 
1725 	status = hif_config_ce(hif_sc);
1726 	if (status)
1727 		goto disable_wlan;
1728 
1729 	if (hif_needs_bmi(hif_osc)) {
1730 		status = hif_set_hia(hif_sc);
1731 		if (status)
1732 			goto unconfig_ce;
1733 
1734 		hif_debug("hif_set_hia done");
1735 
1736 	}
1737 
1738 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1739 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1740 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1741 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9100) ||
1742 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1743 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
1744 		hif_debug("Skip irq config for PCI based 8074 target");
1745 	else {
1746 		status = hif_configure_irq(hif_sc);
1747 		if (status < 0)
1748 			goto unconfig_ce;
1749 	}
1750 
1751 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1752 
1753 	return status;
1754 
1755 unconfig_ce:
1756 	hif_unconfig_ce(hif_sc);
1757 disable_wlan:
1758 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1759 	hif_wlan_disable(hif_sc);
1760 
1761 timer_free:
1762 	qdf_timer_stop(&hif_state->sleep_timer);
1763 	qdf_timer_free(&hif_state->sleep_timer);
1764 	hif_state->sleep_timer_init = false;
1765 
1766 	hif_err("Failed, status: %d", status);
1767 	return status;
1768 }
1769 
1770 /**
1771  * hif_bus_close(): hif_bus_close
1772  *
1773  * Return: n/a
1774  */
1775 void hif_pci_close(struct hif_softc *hif_sc)
1776 {
1777 	hif_pm_runtime_close(hif_sc);
1778 	hif_ce_close(hif_sc);
1779 }
1780 
1781 #define BAR_NUM 0
1782 
1783 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
1784 				struct pci_dev *pdev,
1785 				const struct pci_device_id *id)
1786 {
1787 	void __iomem *mem;
1788 	int ret = 0;
1789 	uint16_t device_id = 0;
1790 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1791 
1792 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1793 	if (device_id != id->device)  {
1794 		hif_err(
1795 		   "dev id mismatch, config id = 0x%x, probing id = 0x%x",
1796 		   device_id, id->device);
1797 		/* pci link is down, so returing with error code */
1798 		return -EIO;
1799 	}
1800 
1801 	/* FIXME: temp. commenting out assign_resource
1802 	 * call for dev_attach to work on 2.6.38 kernel
1803 	 */
1804 #if (!defined(__LINUX_ARM_ARCH__))
1805 	if (pci_assign_resource(pdev, BAR_NUM)) {
1806 		hif_err("pci_assign_resource error");
1807 		return -EIO;
1808 	}
1809 #endif
1810 	if (pci_enable_device(pdev)) {
1811 		hif_err("pci_enable_device error");
1812 		return -EIO;
1813 	}
1814 
1815 	/* Request MMIO resources */
1816 	ret = pci_request_region(pdev, BAR_NUM, "ath");
1817 	if (ret) {
1818 		hif_err("PCI MMIO reservation error");
1819 		ret = -EIO;
1820 		goto err_region;
1821 	}
1822 
1823 #ifdef CONFIG_ARM_LPAE
1824 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1825 	 * for 32 bits device also.
1826 	 */
1827 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1828 	if (ret) {
1829 		hif_err("Cannot enable 64-bit pci DMA");
1830 		goto err_dma;
1831 	}
1832 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1833 	if (ret) {
1834 		hif_err("Cannot enable 64-bit DMA");
1835 		goto err_dma;
1836 	}
1837 #else
1838 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1839 	if (ret) {
1840 		hif_err("Cannot enable 32-bit pci DMA");
1841 		goto err_dma;
1842 	}
1843 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1844 	if (ret) {
1845 		hif_err("Cannot enable 32-bit consistent DMA!");
1846 		goto err_dma;
1847 	}
1848 #endif
1849 
1850 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1851 
1852 	/* Set bus master bit in PCI_COMMAND to enable DMA */
1853 	pci_set_master(pdev);
1854 
1855 	/* Arrange for access to Target SoC registers. */
1856 	mem = pci_iomap(pdev, BAR_NUM, 0);
1857 	if (!mem) {
1858 		hif_err("PCI iomap error");
1859 		ret = -EIO;
1860 		goto err_iomap;
1861 	}
1862 
1863 	hif_info("*****BAR is %pK", (void *)mem);
1864 
1865 	sc->mem = mem;
1866 
1867 	/* Hawkeye emulation specific change */
1868 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
1869 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
1870 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
1871 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
1872 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
1873 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
1874 		mem = mem + 0x0c000000;
1875 		sc->mem = mem;
1876 		hif_info("Changing PCI mem base to %pK", sc->mem);
1877 	}
1878 
1879 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
1880 	ol_sc->mem = mem;
1881 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
1882 	sc->pci_enabled = true;
1883 	return ret;
1884 
1885 err_iomap:
1886 	pci_clear_master(pdev);
1887 err_dma:
1888 	pci_release_region(pdev, BAR_NUM);
1889 err_region:
1890 	pci_disable_device(pdev);
1891 	return ret;
1892 }
1893 
1894 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
1895 			      struct pci_dev *pdev,
1896 			      const struct pci_device_id *id)
1897 {
1898 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1899 	sc->pci_enabled = true;
1900 	return 0;
1901 }
1902 
1903 
1904 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
1905 {
1906 	pci_disable_msi(sc->pdev);
1907 	pci_iounmap(sc->pdev, sc->mem);
1908 	pci_clear_master(sc->pdev);
1909 	pci_release_region(sc->pdev, BAR_NUM);
1910 	pci_disable_device(sc->pdev);
1911 }
1912 
1913 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
1914 
1915 static void hif_disable_pci(struct hif_pci_softc *sc)
1916 {
1917 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1918 
1919 	if (!ol_sc) {
1920 		hif_err("ol_sc = NULL");
1921 		return;
1922 	}
1923 	hif_pci_device_reset(sc);
1924 	sc->hif_pci_deinit(sc);
1925 
1926 	sc->mem = NULL;
1927 	ol_sc->mem = NULL;
1928 }
1929 
1930 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1931 {
1932 	int ret = 0;
1933 	int targ_awake_limit = 500;
1934 #ifndef QCA_WIFI_3_0
1935 	uint32_t fw_indicator;
1936 #endif
1937 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1938 
1939 	/*
1940 	 * Verify that the Target was started cleanly.*
1941 	 * The case where this is most likely is with an AUX-powered
1942 	 * Target and a Host in WoW mode. If the Host crashes,
1943 	 * loses power, or is restarted (without unloading the driver)
1944 	 * then the Target is left (aux) powered and running.  On a
1945 	 * subsequent driver load, the Target is in an unexpected state.
1946 	 * We try to catch that here in order to reset the Target and
1947 	 * retry the probe.
1948 	 */
1949 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1950 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1951 	while (!hif_targ_is_awake(scn, sc->mem)) {
1952 		if (0 == targ_awake_limit) {
1953 			hif_err("target awake timeout");
1954 			ret = -EAGAIN;
1955 			goto end;
1956 		}
1957 		qdf_mdelay(1);
1958 		targ_awake_limit--;
1959 	}
1960 
1961 #if PCIE_BAR0_READY_CHECKING
1962 	{
1963 		int wait_limit = 200;
1964 		/* Synchronization point: wait the BAR0 is configured */
1965 		while (wait_limit-- &&
1966 			   !(hif_read32_mb(sc, c->mem +
1967 					  PCIE_LOCAL_BASE_ADDRESS +
1968 					  PCIE_SOC_RDY_STATUS_ADDRESS)
1969 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
1970 			qdf_mdelay(10);
1971 		}
1972 		if (wait_limit < 0) {
1973 			/* AR6320v1 doesn't support checking of BAR0
1974 			 * configuration, takes one sec to wait BAR0 ready
1975 			 */
1976 			hif_debug("AR6320v1 waits two sec for BAR0");
1977 		}
1978 	}
1979 #endif
1980 
1981 #ifndef QCA_WIFI_3_0
1982 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
1983 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1984 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
1985 
1986 	if (fw_indicator & FW_IND_INITIALIZED) {
1987 		hif_err("Target is in an unknown state. EAGAIN");
1988 		ret = -EAGAIN;
1989 		goto end;
1990 	}
1991 #endif
1992 
1993 end:
1994 	return ret;
1995 }
1996 
1997 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
1998 {
1999 	int ret = 0;
2000 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2001 	uint32_t target_type = scn->target_info.target_type;
2002 
2003 	hif_info("E");
2004 
2005 	/* do notn support MSI or MSI IRQ failed */
2006 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2007 	ret = request_irq(sc->pdev->irq,
2008 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2009 			  "wlan_pci", sc);
2010 	if (ret) {
2011 		hif_err("request_irq failed, ret: %d", ret);
2012 		goto end;
2013 	}
2014 	scn->wake_irq = sc->pdev->irq;
2015 	/* Use sc->irq instead of sc->pdev-irq
2016 	 * platform_device pdev doesn't have an irq field
2017 	 */
2018 	sc->irq = sc->pdev->irq;
2019 	/* Use Legacy PCI Interrupts */
2020 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2021 		  PCIE_INTR_ENABLE_ADDRESS),
2022 		  HOST_GROUP0_MASK);
2023 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2024 			       PCIE_INTR_ENABLE_ADDRESS));
2025 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2026 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2027 
2028 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2029 			(target_type == TARGET_TYPE_AR900B)  ||
2030 			(target_type == TARGET_TYPE_QCA9984) ||
2031 			(target_type == TARGET_TYPE_AR9888) ||
2032 			(target_type == TARGET_TYPE_QCA9888) ||
2033 			(target_type == TARGET_TYPE_AR6320V1) ||
2034 			(target_type == TARGET_TYPE_AR6320V2) ||
2035 			(target_type == TARGET_TYPE_AR6320V3)) {
2036 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2037 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2038 	}
2039 end:
2040 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2041 			  "%s: X, ret = %d", __func__, ret);
2042 	return ret;
2043 }
2044 
2045 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2046 {
2047 	int ret;
2048 	int ce_id, irq;
2049 	uint32_t msi_data_start;
2050 	uint32_t msi_data_count;
2051 	uint32_t msi_irq_start;
2052 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2053 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2054 
2055 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2056 					    &msi_data_count, &msi_data_start,
2057 					    &msi_irq_start);
2058 	if (ret)
2059 		return ret;
2060 
2061 	/* needs to match the ce_id -> irq data mapping
2062 	 * used in the srng parameter configuration
2063 	 */
2064 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2065 		unsigned int msi_data;
2066 
2067 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2068 			continue;
2069 
2070 		if (!ce_sc->tasklets[ce_id].inited)
2071 			continue;
2072 
2073 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2074 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2075 
2076 		hif_pci_ce_irq_remove_affinity_hint(irq);
2077 
2078 		hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2079 			  ce_id, msi_data, irq);
2080 
2081 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2082 	}
2083 
2084 	return ret;
2085 }
2086 
2087 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2088 {
2089 	int i, j, irq;
2090 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2091 	struct hif_exec_context *hif_ext_group;
2092 
2093 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2094 		hif_ext_group = hif_state->hif_ext_group[i];
2095 		if (hif_ext_group->irq_requested) {
2096 			hif_ext_group->irq_requested = false;
2097 			for (j = 0; j < hif_ext_group->numirq; j++) {
2098 				irq = hif_ext_group->os_irq[j];
2099 				if (scn->irq_unlazy_disable)
2100 					irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
2101 				pfrm_free_irq(scn->qdf_dev->dev,
2102 					      irq, hif_ext_group);
2103 			}
2104 			hif_ext_group->numirq = 0;
2105 		}
2106 	}
2107 }
2108 
2109 /**
2110  * hif_nointrs(): disable IRQ
2111  *
2112  * This function stops interrupt(s)
2113  *
2114  * @scn: struct hif_softc
2115  *
2116  * Return: none
2117  */
2118 void hif_pci_nointrs(struct hif_softc *scn)
2119 {
2120 	int i, ret;
2121 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2122 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2123 
2124 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2125 
2126 	if (scn->request_irq_done == false)
2127 		return;
2128 
2129 	hif_pci_deconfigure_grp_irq(scn);
2130 
2131 	ret = hif_ce_srng_msi_free_irq(scn);
2132 	if (ret != -EINVAL) {
2133 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2134 
2135 		if (scn->wake_irq)
2136 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2137 		scn->wake_irq = 0;
2138 	} else if (sc->num_msi_intrs > 0) {
2139 		/* MSI interrupt(s) */
2140 		for (i = 0; i < sc->num_msi_intrs; i++)
2141 			free_irq(sc->irq + i, sc);
2142 		sc->num_msi_intrs = 0;
2143 	} else {
2144 		/* Legacy PCI line interrupt
2145 		 * Use sc->irq instead of sc->pdev-irq
2146 		 * platform_device pdev doesn't have an irq field
2147 		 */
2148 		free_irq(sc->irq, sc);
2149 	}
2150 	scn->request_irq_done = false;
2151 }
2152 
2153 static inline
2154 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2155 {
2156 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2157 		return true;
2158 	else
2159 		return false;
2160 }
2161 /**
2162  * hif_disable_bus(): hif_disable_bus
2163  *
2164  * This function disables the bus
2165  *
2166  * @bdev: bus dev
2167  *
2168  * Return: none
2169  */
2170 void hif_pci_disable_bus(struct hif_softc *scn)
2171 {
2172 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2173 	struct pci_dev *pdev;
2174 	void __iomem *mem;
2175 	struct hif_target_info *tgt_info = &scn->target_info;
2176 
2177 	/* Attach did not succeed, all resources have been
2178 	 * freed in error handler
2179 	 */
2180 	if (!sc)
2181 		return;
2182 
2183 	pdev = sc->pdev;
2184 	if (hif_pci_default_link_up(tgt_info)) {
2185 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2186 
2187 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2188 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2189 			       HOST_GROUP0_MASK);
2190 	}
2191 
2192 #if defined(CPU_WARM_RESET_WAR)
2193 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2194 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2195 	 * verified for AR9888_REV1
2196 	 */
2197 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2198 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2199 		hif_pci_device_warm_reset(sc);
2200 	else
2201 		hif_pci_device_reset(sc);
2202 #else
2203 	hif_pci_device_reset(sc);
2204 #endif
2205 	mem = (void __iomem *)sc->mem;
2206 	if (mem) {
2207 		hif_dump_pipe_debug_count(scn);
2208 		if (scn->athdiag_procfs_inited) {
2209 			athdiag_procfs_remove();
2210 			scn->athdiag_procfs_inited = false;
2211 		}
2212 		sc->hif_pci_deinit(sc);
2213 		scn->mem = NULL;
2214 	}
2215 	hif_info("X");
2216 }
2217 
2218 #ifdef FEATURE_RUNTIME_PM
2219 /**
2220  * hif_pci_get_rpm_ctx() - Map corresponding hif_runtime_pm_ctx
2221  * @scn: hif context
2222  *
2223  * This function will map and return the corresponding
2224  * hif_runtime_pm_ctx based on pcie interface.
2225  *
2226  * Return: struct hif_runtime_pm_ctx pointer
2227  */
2228 struct hif_runtime_pm_ctx *hif_pci_get_rpm_ctx(struct hif_softc *scn)
2229 {
2230 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2231 
2232 	return &sc->rpm_ctx;
2233 }
2234 
2235 /**
2236  * hif_pci_get_dev() - Map corresponding device structure
2237  * @scn: hif context
2238  *
2239  * This function will map and return the corresponding
2240  * device structure based on pcie interface.
2241  *
2242  * Return: struct device pointer
2243  */
2244 struct device *hif_pci_get_dev(struct hif_softc *scn)
2245 {
2246 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2247 
2248 	return sc->dev;
2249 }
2250 #endif
2251 
2252 #define OL_ATH_PCI_PM_CONTROL 0x44
2253 
2254 #if defined(CONFIG_PCI_MSM)
2255 /**
2256  * hif_bus_prevent_linkdown(): allow or permit linkdown
2257  * @flag: true prevents linkdown, false allows
2258  *
2259  * Calls into the platform driver to vote against taking down the
2260  * pcie link.
2261  *
2262  * Return: n/a
2263  */
2264 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2265 {
2266 	int errno;
2267 
2268 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2269 	hif_runtime_prevent_linkdown(scn, flag);
2270 
2271 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2272 	if (errno)
2273 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
2274 }
2275 #else
2276 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2277 {
2278 	hif_info("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
2279 	hif_runtime_prevent_linkdown(scn, flag);
2280 }
2281 #endif
2282 
2283 /**
2284  * hif_pci_bus_suspend(): prepare hif for suspend
2285  *
2286  * Return: Errno
2287  */
2288 int hif_pci_bus_suspend(struct hif_softc *scn)
2289 {
2290 	QDF_STATUS ret;
2291 
2292 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2293 
2294 	ret = hif_try_complete_tasks(scn);
2295 	if (QDF_IS_STATUS_ERROR(ret)) {
2296 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2297 		return -EBUSY;
2298 	}
2299 
2300 	/* Stop the HIF Sleep Timer */
2301 	hif_cancel_deferred_target_sleep(scn);
2302 
2303 	scn->bus_suspended = true;
2304 
2305 	return 0;
2306 }
2307 
2308 #ifdef PCI_LINK_STATUS_SANITY
2309 /**
2310  * __hif_check_link_status() - API to check if PCIe link is active/not
2311  * @scn: HIF Context
2312  *
2313  * API reads the PCIe config space to verify if PCIe link training is
2314  * successful or not.
2315  *
2316  * Return: Success/Failure
2317  */
2318 static int __hif_check_link_status(struct hif_softc *scn)
2319 {
2320 	uint16_t dev_id = 0;
2321 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2322 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2323 
2324 	if (!sc) {
2325 		hif_err("HIF Bus Context is Invalid");
2326 		return -EINVAL;
2327 	}
2328 
2329 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2330 
2331 	if (dev_id == sc->devid)
2332 		return 0;
2333 
2334 	hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2335 	       dev_id);
2336 
2337 	scn->recovery = true;
2338 
2339 	if (cbk && cbk->set_recovery_in_progress)
2340 		cbk->set_recovery_in_progress(cbk->context, true);
2341 	else
2342 		hif_err("Driver Global Recovery is not set");
2343 
2344 	pld_is_pci_link_down(sc->dev);
2345 	return -EACCES;
2346 }
2347 #else
2348 static inline int __hif_check_link_status(struct hif_softc *scn)
2349 {
2350 	return 0;
2351 }
2352 #endif
2353 
2354 
2355 #ifdef HIF_BUS_LOG_INFO
2356 bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
2357 		       unsigned int *offset)
2358 {
2359 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2360 	struct hang_event_bus_info info = {0};
2361 	size_t size;
2362 
2363 	if (!sc) {
2364 		hif_err("HIF Bus Context is Invalid");
2365 		return false;
2366 	}
2367 
2368 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
2369 
2370 	size = sizeof(info);
2371 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
2372 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
2373 
2374 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
2375 		return false;
2376 
2377 	qdf_mem_copy(data + *offset, &info, size);
2378 	*offset = *offset + size;
2379 
2380 	if (info.dev_id == sc->devid)
2381 		return false;
2382 
2383 	qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
2384 	qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
2385 			     (QDF_WLAN_HANG_FW_OFFSET - size));
2386 	return true;
2387 }
2388 #endif
2389 
2390 /**
2391  * hif_pci_bus_resume(): prepare hif for resume
2392  *
2393  * Return: Errno
2394  */
2395 int hif_pci_bus_resume(struct hif_softc *scn)
2396 {
2397 	int errno;
2398 
2399 	scn->bus_suspended = false;
2400 
2401 	errno = __hif_check_link_status(scn);
2402 	if (errno)
2403 		return errno;
2404 
2405 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2406 
2407 	return 0;
2408 }
2409 
2410 /**
2411  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2412  * @scn: hif context
2413  *
2414  * Ensure that if we received the wakeup message before the irq
2415  * was disabled that the message is pocessed before suspending.
2416  *
2417  * Return: -EBUSY if we fail to flush the tasklets.
2418  */
2419 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2420 {
2421 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2422 		qdf_atomic_set(&scn->link_suspended, 1);
2423 
2424 	hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
2425 
2426 	return 0;
2427 }
2428 
2429 /**
2430  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2431  * @scn: hif context
2432  *
2433  * Ensure that if we received the wakeup message before the irq
2434  * was disabled that the message is pocessed before suspending.
2435  *
2436  * Return: -EBUSY if we fail to flush the tasklets.
2437  */
2438 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2439 {
2440 	hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
2441 
2442 	/* a vote for link up can come in the middle of the ongoing resume
2443 	 * process. hence, clear the link suspend flag once
2444 	 * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
2445 	 * by this time
2446 	 */
2447 	qdf_atomic_set(&scn->link_suspended, 0);
2448 
2449 	return 0;
2450 }
2451 
2452 #if CONFIG_PCIE_64BIT_MSI
2453 static void hif_free_msi_ctx(struct hif_softc *scn)
2454 {
2455 	struct hif_pci_softc *sc = scn->hif_sc;
2456 	struct hif_msi_info *info = &sc->msi_info;
2457 	struct device *dev = scn->qdf_dev->dev;
2458 
2459 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2460 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2461 	info->magic = NULL;
2462 	info->magic_dma = 0;
2463 }
2464 #else
2465 static void hif_free_msi_ctx(struct hif_softc *scn)
2466 {
2467 }
2468 #endif
2469 
2470 void hif_pci_disable_isr(struct hif_softc *scn)
2471 {
2472 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2473 
2474 	hif_exec_kill(&scn->osc);
2475 	hif_nointrs(scn);
2476 	hif_free_msi_ctx(scn);
2477 	/* Cancel the pending tasklet */
2478 	ce_tasklet_kill(scn);
2479 	tasklet_kill(&sc->intr_tq);
2480 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2481 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2482 }
2483 
2484 /* Function to reset SoC */
2485 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2486 {
2487 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2488 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2489 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2490 
2491 #if defined(CPU_WARM_RESET_WAR)
2492 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2493 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2494 	 * verified for AR9888_REV1
2495 	 */
2496 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2497 		hif_pci_device_warm_reset(sc);
2498 	else
2499 		hif_pci_device_reset(sc);
2500 #else
2501 	hif_pci_device_reset(sc);
2502 #endif
2503 }
2504 
2505 #ifdef CONFIG_PCI_MSM
2506 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
2507 {
2508 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
2509 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
2510 }
2511 #else
2512 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
2513 #endif
2514 
2515 /**
2516  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2517  * @sc: HIF PCIe Context
2518  *
2519  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2520  *
2521  * Return: Failure to caller
2522  */
2523 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2524 {
2525 	uint16_t val = 0;
2526 	uint32_t bar = 0;
2527 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2528 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2529 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2530 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2531 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2532 	A_target_id_t pci_addr = scn->mem;
2533 
2534 	hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
2535 
2536 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2537 
2538 	hif_info("PCI Vendor ID = 0x%04x", val);
2539 
2540 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2541 
2542 	hif_info("PCI Device ID = 0x%04x", val);
2543 
2544 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
2545 
2546 	hif_info("PCI Command = 0x%04x", val);
2547 
2548 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
2549 
2550 	hif_info("PCI Status = 0x%04x", val);
2551 
2552 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2553 
2554 	hif_info("PCI BAR 0 = 0x%08x", bar);
2555 
2556 	hif_info("SOC_WAKE_ADDR 0%08x",
2557 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2558 				PCIE_SOC_WAKE_ADDRESS));
2559 
2560 	hif_info("RTC_STATE_ADDR 0x%08x",
2561 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2562 							RTC_STATE_ADDRESS));
2563 
2564 	hif_info("wakeup target");
2565 	hif_msm_pcie_debug_info(sc);
2566 
2567 	if (!cfg->enable_self_recovery)
2568 		QDF_BUG(0);
2569 
2570 	scn->recovery = true;
2571 
2572 	if (cbk->set_recovery_in_progress)
2573 		cbk->set_recovery_in_progress(cbk->context, true);
2574 
2575 	pld_is_pci_link_down(sc->dev);
2576 	return -EACCES;
2577 }
2578 
2579 /*
2580  * For now, we use simple on-demand sleep/wake.
2581  * Some possible improvements:
2582  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2583  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2584  *   Careful, though, these functions may be used by
2585  *  interrupt handlers ("atomic")
2586  *  -Don't use host_reg_table for this code; instead use values directly
2587  *  -Use a separate timer to track activity and allow Target to sleep only
2588  *   if it hasn't done anything for a while; may even want to delay some
2589  *   processing for a short while in order to "batch" (e.g.) transmit
2590  *   requests with completion processing into "windows of up time".  Costs
2591  *   some performance, but improves power utilization.
2592  *  -On some platforms, it might be possible to eliminate explicit
2593  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
2594  *   recover from the failure by forcing the Target awake.
2595  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
2596  *   overhead in some cases. Perhaps this makes more sense when
2597  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2598  *   disabled.
2599  *  -It is possible to compile this code out and simply force the Target
2600  *   to remain awake.  That would yield optimal performance at the cost of
2601  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2602  *
2603  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2604  */
2605 /**
2606  * hif_target_sleep_state_adjust() - on-demand sleep/wake
2607  * @scn: hif_softc pointer.
2608  * @sleep_ok: bool
2609  * @wait_for_it: bool
2610  *
2611  * Output the pipe error counts of each pipe to log file
2612  *
2613  * Return: int
2614  */
2615 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
2616 			      bool sleep_ok, bool wait_for_it)
2617 {
2618 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2619 	A_target_id_t pci_addr = scn->mem;
2620 	static int max_delay;
2621 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2622 	static int debug;
2623 	if (scn->recovery)
2624 		return -EACCES;
2625 
2626 	if (qdf_atomic_read(&scn->link_suspended)) {
2627 		hif_err("Invalid access, PCIe link is down");
2628 		debug = true;
2629 		QDF_ASSERT(0);
2630 		return -EACCES;
2631 	}
2632 
2633 	if (debug) {
2634 		wait_for_it = true;
2635 		hif_err("Invalid access, PCIe link is suspended");
2636 		QDF_ASSERT(0);
2637 	}
2638 
2639 	if (sleep_ok) {
2640 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2641 		hif_state->keep_awake_count--;
2642 		if (hif_state->keep_awake_count == 0) {
2643 			/* Allow sleep */
2644 			hif_state->verified_awake = false;
2645 			hif_state->sleep_ticks = qdf_system_ticks();
2646 		}
2647 		if (hif_state->fake_sleep == false) {
2648 			/* Set the Fake Sleep */
2649 			hif_state->fake_sleep = true;
2650 
2651 			/* Start the Sleep Timer */
2652 			qdf_timer_stop(&hif_state->sleep_timer);
2653 			qdf_timer_start(&hif_state->sleep_timer,
2654 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2655 		}
2656 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2657 	} else {
2658 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2659 
2660 		if (hif_state->fake_sleep) {
2661 			hif_state->verified_awake = true;
2662 		} else {
2663 			if (hif_state->keep_awake_count == 0) {
2664 				/* Force AWAKE */
2665 				hif_write32_mb(sc, pci_addr +
2666 					      PCIE_LOCAL_BASE_ADDRESS +
2667 					      PCIE_SOC_WAKE_ADDRESS,
2668 					      PCIE_SOC_WAKE_V_MASK);
2669 			}
2670 		}
2671 		hif_state->keep_awake_count++;
2672 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2673 
2674 		if (wait_for_it && !hif_state->verified_awake) {
2675 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
2676 			int tot_delay = 0;
2677 			int curr_delay = 5;
2678 
2679 			for (;; ) {
2680 				if (hif_targ_is_awake(scn, pci_addr)) {
2681 					hif_state->verified_awake = true;
2682 					break;
2683 				}
2684 				if (!hif_pci_targ_is_present(scn, pci_addr))
2685 					break;
2686 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2687 					return hif_log_soc_wakeup_timeout(sc);
2688 
2689 				OS_DELAY(curr_delay);
2690 				tot_delay += curr_delay;
2691 
2692 				if (curr_delay < 50)
2693 					curr_delay += 5;
2694 			}
2695 
2696 			/*
2697 			 * NB: If Target has to come out of Deep Sleep,
2698 			 * this may take a few Msecs. Typically, though
2699 			 * this delay should be <30us.
2700 			 */
2701 			if (tot_delay > max_delay)
2702 				max_delay = tot_delay;
2703 		}
2704 	}
2705 
2706 	if (debug && hif_state->verified_awake) {
2707 		debug = 0;
2708 		hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2709 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2710 				PCIE_INTR_ENABLE_ADDRESS),
2711 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2712 				PCIE_INTR_CAUSE_ADDRESS),
2713 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2714 				CPU_INTR_ADDRESS),
2715 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2716 				PCIE_INTR_CLR_ADDRESS),
2717 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
2718 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2725 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
2726 {
2727 	uint32_t value;
2728 	void *addr;
2729 
2730 	addr = scn->mem + offset;
2731 	value = hif_read32_mb(scn, addr);
2732 
2733 	{
2734 		unsigned long irq_flags;
2735 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2736 
2737 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2738 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2739 		pcie_access_log[idx].is_write = false;
2740 		pcie_access_log[idx].addr = addr;
2741 		pcie_access_log[idx].value = value;
2742 		pcie_access_log_seqnum++;
2743 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2744 	}
2745 
2746 	return value;
2747 }
2748 
2749 void
2750 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
2751 {
2752 	void *addr;
2753 
2754 	addr = scn->mem + (offset);
2755 	hif_write32_mb(scn, addr, value);
2756 
2757 	{
2758 		unsigned long irq_flags;
2759 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2760 
2761 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2762 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2763 		pcie_access_log[idx].is_write = true;
2764 		pcie_access_log[idx].addr = addr;
2765 		pcie_access_log[idx].value = value;
2766 		pcie_access_log_seqnum++;
2767 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2768 	}
2769 }
2770 
2771 /**
2772  * hif_target_dump_access_log() - dump access log
2773  *
2774  * dump access log
2775  *
2776  * Return: n/a
2777  */
2778 void hif_target_dump_access_log(void)
2779 {
2780 	int idx, len, start_idx, cur_idx;
2781 	unsigned long irq_flags;
2782 
2783 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2784 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2785 		len = PCIE_ACCESS_LOG_NUM;
2786 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2787 	} else {
2788 		len = pcie_access_log_seqnum;
2789 		start_idx = 0;
2790 	}
2791 
2792 	for (idx = 0; idx < len; idx++) {
2793 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2794 		hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
2795 		       idx,
2796 		       pcie_access_log[cur_idx].seqnum,
2797 		       pcie_access_log[cur_idx].is_write,
2798 		       pcie_access_log[cur_idx].addr,
2799 		       pcie_access_log[cur_idx].value);
2800 	}
2801 
2802 	pcie_access_log_seqnum = 0;
2803 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2804 }
2805 #endif
2806 
2807 #ifndef HIF_AHB
2808 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
2809 {
2810 	QDF_BUG(0);
2811 	return -EINVAL;
2812 }
2813 
2814 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
2815 {
2816 	QDF_BUG(0);
2817 	return -EINVAL;
2818 }
2819 #endif
2820 
2821 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
2822 {
2823 	struct ce_tasklet_entry *tasklet_entry = context;
2824 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
2825 }
2826 extern const char *ce_name[];
2827 
2828 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
2829 {
2830 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
2831 
2832 	return pci_scn->ce_msi_irq_num[ce_id];
2833 }
2834 
2835 /* hif_srng_msi_irq_disable() - disable the irq for msi
2836  * @hif_sc: hif context
2837  * @ce_id: which ce to disable copy complete interrupts for
2838  *
2839  * since MSI interrupts are not level based, the system can function
2840  * without disabling these interrupts.  Interrupt mitigation can be
2841  * added here for better system performance.
2842  */
2843 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2844 {
2845 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
2846 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2847 }
2848 
2849 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2850 {
2851 	if (__hif_check_link_status(hif_sc))
2852 		return;
2853 
2854 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
2855 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2856 }
2857 
2858 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2859 {
2860 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2861 }
2862 
2863 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2864 {
2865 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2866 }
2867 
2868 int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
2869 {
2870 	int ret = 0;
2871 	int irq;
2872 	uint32_t msi_data_start;
2873 	uint32_t msi_data_count;
2874 	unsigned int msi_data;
2875 	uint32_t msi_irq_start;
2876 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2877 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
2878 	int pci_slot;
2879 
2880 	if (ce_id >= CE_COUNT_MAX)
2881 		return -EINVAL;
2882 
2883 	/* do ce irq assignments */
2884 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2885 					  &msi_data_count, &msi_data_start,
2886 					  &msi_irq_start);
2887 
2888 	/* needs to match the ce_id -> irq data mapping
2889 	 * used in the srng parameter configuration
2890 	 */
2891 	pci_slot = hif_get_pci_slot(scn);
2892 	msi_data = (ce_id % msi_data_count) + msi_irq_start;
2893 	irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2894 	hif_debug("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
2895 		__func__, ce_id, msi_data, irq,
2896 		&ce_sc->tasklets[ce_id]);
2897 
2898 	/* implies the ce is also initialized */
2899 	if (!ce_sc->tasklets[ce_id].inited)
2900 		goto skip;
2901 
2902 	pci_sc->ce_msi_irq_num[ce_id] = irq;
2903 	ret = pfrm_request_irq(scn->qdf_dev->dev,
2904 			       irq, hif_ce_interrupt_handler, IRQF_SHARED,
2905 			       ce_irqname[pci_slot][ce_id],
2906 			       &ce_sc->tasklets[ce_id]);
2907 	if (ret)
2908 		return -EINVAL;
2909 
2910 skip:
2911 	return ret;
2912 }
2913 
2914 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
2915 {
2916 	int ret;
2917 	int ce_id, irq;
2918 	uint32_t msi_data_start;
2919 	uint32_t msi_data_count;
2920 	uint32_t msi_irq_start;
2921 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2922 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2923 	int pci_slot;
2924 
2925 	if (!scn->disable_wake_irq) {
2926 		/* do wake irq assignment */
2927 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
2928 						  &msi_data_count,
2929 						  &msi_data_start,
2930 						  &msi_irq_start);
2931 		if (ret)
2932 			return ret;
2933 
2934 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
2935 						msi_irq_start);
2936 
2937 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
2938 				       hif_wake_interrupt_handler,
2939 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
2940 
2941 		if (ret)
2942 			return ret;
2943 	}
2944 
2945 	/* do ce irq assignments */
2946 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2947 					  &msi_data_count, &msi_data_start,
2948 					  &msi_irq_start);
2949 	if (ret)
2950 		goto free_wake_irq;
2951 
2952 	if (ce_srng_based(scn)) {
2953 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
2954 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
2955 	} else {
2956 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
2957 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
2958 	}
2959 
2960 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
2961 
2962 	/* needs to match the ce_id -> irq data mapping
2963 	 * used in the srng parameter configuration
2964 	 */
2965 	pci_slot = hif_get_pci_slot(scn);
2966 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2967 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2968 			continue;
2969 
2970 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
2971 			continue;
2972 
2973 		ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
2974 		if (ret)
2975 			goto free_irq;
2976 	}
2977 
2978 	return ret;
2979 
2980 free_irq:
2981 	/* the request_irq for the last ce_id failed so skip it. */
2982 	while (ce_id > 0 && ce_id < scn->ce_count) {
2983 		unsigned int msi_data;
2984 
2985 		ce_id--;
2986 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2987 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2988 		pfrm_free_irq(scn->qdf_dev->dev,
2989 			      irq, &ce_sc->tasklets[ce_id]);
2990 	}
2991 
2992 free_wake_irq:
2993 	if (!scn->disable_wake_irq) {
2994 		pfrm_free_irq(scn->qdf_dev->dev,
2995 			      scn->wake_irq, scn->qdf_dev->dev);
2996 		scn->wake_irq = 0;
2997 	}
2998 
2999 	return ret;
3000 }
3001 
3002 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3003 {
3004 	int i;
3005 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3006 
3007 	for (i = 0; i < hif_ext_group->numirq; i++)
3008 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
3009 					hif_ext_group->os_irq[i]);
3010 }
3011 
3012 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3013 {
3014 	int i;
3015 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3016 
3017 	for (i = 0; i < hif_ext_group->numirq; i++)
3018 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
3019 }
3020 
3021 /**
3022  * hif_pci_get_irq_name() - get irqname
3023  * This function gives irqnumber to irqname
3024  * mapping.
3025  *
3026  * @irq_no: irq number
3027  *
3028  * Return: irq name
3029  */
3030 const char *hif_pci_get_irq_name(int irq_no)
3031 {
3032 	return "pci-dummy";
3033 }
3034 
3035 #ifdef HIF_CPU_PERF_AFFINE_MASK
3036 void hif_pci_irq_set_affinity_hint(
3037 	struct hif_exec_context *hif_ext_group)
3038 {
3039 	int i, ret;
3040 	unsigned int cpus;
3041 	bool mask_set = false;
3042 
3043 	for (i = 0; i < hif_ext_group->numirq; i++)
3044 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
3045 
3046 	for (i = 0; i < hif_ext_group->numirq; i++) {
3047 		qdf_for_each_online_cpu(cpus) {
3048 			if (qdf_topology_physical_package_id(cpus) ==
3049 				CPU_CLUSTER_TYPE_PERF) {
3050 				qdf_cpumask_set_cpu(cpus,
3051 						    &hif_ext_group->
3052 						    new_cpu_mask[i]);
3053 				mask_set = true;
3054 			}
3055 		}
3056 	}
3057 	for (i = 0; i < hif_ext_group->numirq; i++) {
3058 		if (mask_set) {
3059 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3060 						  IRQ_NO_BALANCING, 0);
3061 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3062 						       (struct qdf_cpu_mask *)
3063 						       &hif_ext_group->
3064 						       new_cpu_mask[i]);
3065 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3066 						  0, IRQ_NO_BALANCING);
3067 			if (ret)
3068 				qdf_err("Set affinity %*pbl fails for IRQ %d ",
3069 					qdf_cpumask_pr_args(&hif_ext_group->
3070 							    new_cpu_mask[i]),
3071 					hif_ext_group->os_irq[i]);
3072 			else
3073 				qdf_debug("Set affinity %*pbl for IRQ: %d",
3074 					  qdf_cpumask_pr_args(&hif_ext_group->
3075 							      new_cpu_mask[i]),
3076 					  hif_ext_group->os_irq[i]);
3077 		} else {
3078 			qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
3079 				hif_ext_group->os_irq[i]);
3080 		}
3081 	}
3082 }
3083 
3084 void hif_pci_ce_irq_set_affinity_hint(
3085 	struct hif_softc *scn)
3086 {
3087 	int ret;
3088 	unsigned int cpus;
3089 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3090 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3091 	struct CE_attr *host_ce_conf;
3092 	int ce_id;
3093 	qdf_cpu_mask ce_cpu_mask;
3094 
3095 	host_ce_conf = ce_sc->host_ce_config;
3096 	qdf_cpumask_clear(&ce_cpu_mask);
3097 
3098 	qdf_for_each_online_cpu(cpus) {
3099 		if (qdf_topology_physical_package_id(cpus) ==
3100 			CPU_CLUSTER_TYPE_PERF) {
3101 			qdf_cpumask_set_cpu(cpus,
3102 					    &ce_cpu_mask);
3103 		} else {
3104 			hif_err_rl("Unable to set cpu mask for offline CPU %d"
3105 				   , cpus);
3106 		}
3107 	}
3108 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
3109 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
3110 		return;
3111 	}
3112 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3113 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3114 			continue;
3115 		qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
3116 		qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
3117 				 &ce_cpu_mask);
3118 		qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id],
3119 					  IRQ_NO_BALANCING, 0);
3120 		ret = qdf_dev_set_irq_affinity(
3121 			pci_sc->ce_msi_irq_num[ce_id],
3122 			(struct qdf_cpu_mask *)&pci_sc->ce_irq_cpu_mask[ce_id]);
3123 		qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id],
3124 					  0, IRQ_NO_BALANCING);
3125 		if (ret)
3126 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
3127 				   qdf_cpumask_pr_args(
3128 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3129 				   pci_sc->ce_msi_irq_num[ce_id]);
3130 		else
3131 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
3132 				     qdf_cpumask_pr_args(
3133 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3134 				     pci_sc->ce_msi_irq_num[ce_id]);
3135 	}
3136 }
3137 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3138 
3139 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3140 {
3141 	int i;
3142 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3143 	struct hif_exec_context *hif_ext_group;
3144 
3145 	hif_core_ctl_set_boost(true);
3146 	/* Set IRQ affinity for WLAN DP interrupts*/
3147 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3148 		hif_ext_group = hif_state->hif_ext_group[i];
3149 		hif_pci_irq_set_affinity_hint(hif_ext_group);
3150 	}
3151 	/* Set IRQ affinity for CE interrupts*/
3152 	hif_pci_ce_irq_set_affinity_hint(scn);
3153 }
3154 
3155 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3156 			      struct hif_exec_context *hif_ext_group)
3157 {
3158 	int ret = 0;
3159 	int irq = 0;
3160 	int j;
3161 	int pci_slot;
3162 
3163 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3164 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3165 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3166 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3167 
3168 	pci_slot = hif_get_pci_slot(scn);
3169 	for (j = 0; j < hif_ext_group->numirq; j++) {
3170 		irq = hif_ext_group->irq[j];
3171 		if (scn->irq_unlazy_disable)
3172 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
3173 		hif_debug("request_irq = %d for grp %d",
3174 			  irq, hif_ext_group->grp_id);
3175 		ret = pfrm_request_irq(
3176 				scn->qdf_dev->dev, irq,
3177 				hif_ext_group_interrupt_handler,
3178 				IRQF_SHARED | IRQF_NO_SUSPEND,
3179 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3180 				hif_ext_group);
3181 		if (ret) {
3182 			hif_err("request_irq failed ret = %d", ret);
3183 			return -EFAULT;
3184 		}
3185 		hif_ext_group->os_irq[j] = irq;
3186 	}
3187 	hif_ext_group->irq_requested = true;
3188 	return 0;
3189 }
3190 
3191 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490))
3192 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3193 			    uint32_t offset)
3194 {
3195 	return hal_read32_mb(hif_sc->hal_soc, offset);
3196 }
3197 
3198 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3199 			 uint32_t offset,
3200 			 uint32_t value)
3201 {
3202 	hal_write32_mb(hif_sc->hal_soc, offset, value);
3203 }
3204 #else
3205 /* TODO: Need to implement other chips carefully */
3206 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3207 			    uint32_t offset)
3208 {
3209 	return 0;
3210 }
3211 
3212 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3213 			 uint32_t offset,
3214 			 uint32_t value)
3215 {
3216 }
3217 #endif
3218 
3219 /**
3220  * hif_configure_irq() - configure interrupt
3221  *
3222  * This function configures interrupt(s)
3223  *
3224  * @sc: PCIe control struct
3225  * @hif_hdl: struct HIF_CE_state
3226  *
3227  * Return: 0 - for success
3228  */
3229 int hif_configure_irq(struct hif_softc *scn)
3230 {
3231 	int ret = 0;
3232 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3233 
3234 	hif_info("E");
3235 
3236 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3237 		scn->request_irq_done = false;
3238 		return 0;
3239 	}
3240 
3241 	hif_init_reschedule_tasklet_work(sc);
3242 
3243 	ret = hif_ce_msi_configure_irq(scn);
3244 	if (ret == 0) {
3245 		goto end;
3246 	}
3247 
3248 	switch (scn->target_info.target_type) {
3249 	case TARGET_TYPE_IPQ4019:
3250 		ret = hif_ahb_configure_legacy_irq(sc);
3251 		break;
3252 	case TARGET_TYPE_QCA8074:
3253 	case TARGET_TYPE_QCA8074V2:
3254 	case TARGET_TYPE_QCA6018:
3255 	case TARGET_TYPE_QCA5018:
3256 		ret = hif_ahb_configure_irq(sc);
3257 		break;
3258 	default:
3259 		ret = hif_pci_configure_legacy_irq(sc);
3260 		break;
3261 	}
3262 	if (ret < 0) {
3263 		hif_err("hif_pci_configure_legacy_irq error = %d", ret);
3264 		return ret;
3265 	}
3266 end:
3267 	scn->request_irq_done = true;
3268 	return 0;
3269 }
3270 
3271 /**
3272  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3273  * @scn: hif control structure
3274  *
3275  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3276  * stuck at a polling loop in pcie_address_config in FW
3277  *
3278  * Return: none
3279  */
3280 static void hif_trigger_timer_irq(struct hif_softc *scn)
3281 {
3282 	int tmp;
3283 	/* Trigger IRQ on Peregrine/Swift by setting
3284 	 * IRQ Bit of LF_TIMER 0
3285 	 */
3286 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3287 						SOC_LF_TIMER_STATUS0_ADDRESS));
3288 	/* Set Raw IRQ Bit */
3289 	tmp |= 1;
3290 	/* SOC_LF_TIMER_STATUS0 */
3291 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3292 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3293 }
3294 
3295 /**
3296  * hif_target_sync() : ensure the target is ready
3297  * @scn: hif control structure
3298  *
3299  * Informs fw that we plan to use legacy interupts so that
3300  * it can begin booting. Ensures that the fw finishes booting
3301  * before continuing. Should be called before trying to write
3302  * to the targets other registers for the first time.
3303  *
3304  * Return: none
3305  */
3306 static void hif_target_sync(struct hif_softc *scn)
3307 {
3308 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3309 			    PCIE_INTR_ENABLE_ADDRESS),
3310 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3311 	/* read to flush pcie write */
3312 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3313 			PCIE_INTR_ENABLE_ADDRESS));
3314 
3315 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3316 			PCIE_SOC_WAKE_ADDRESS,
3317 			PCIE_SOC_WAKE_V_MASK);
3318 	while (!hif_targ_is_awake(scn, scn->mem))
3319 		;
3320 
3321 	if (HAS_FW_INDICATOR) {
3322 		int wait_limit = 500;
3323 		int fw_ind = 0;
3324 		int retry_count = 0;
3325 		uint32_t target_type = scn->target_info.target_type;
3326 fw_retry:
3327 		hif_info("Loop checking FW signal");
3328 		while (1) {
3329 			fw_ind = hif_read32_mb(scn, scn->mem +
3330 					FW_INDICATOR_ADDRESS);
3331 			if (fw_ind & FW_IND_INITIALIZED)
3332 				break;
3333 			if (wait_limit-- < 0)
3334 				break;
3335 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3336 			    PCIE_INTR_ENABLE_ADDRESS),
3337 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3338 			    /* read to flush pcie write */
3339 			(void)hif_read32_mb(scn, scn->mem +
3340 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3341 
3342 			qdf_mdelay(10);
3343 		}
3344 		if (wait_limit < 0) {
3345 			if (target_type == TARGET_TYPE_AR9888 &&
3346 			    retry_count++ < 2) {
3347 				hif_trigger_timer_irq(scn);
3348 				wait_limit = 500;
3349 				goto fw_retry;
3350 			}
3351 			hif_info("FW signal timed out");
3352 			qdf_assert_always(0);
3353 		} else {
3354 			hif_info("Got FW signal, retries = %x", 500-wait_limit);
3355 		}
3356 	}
3357 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3358 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3359 }
3360 
3361 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3362 				     struct device *dev)
3363 {
3364 	struct pld_soc_info info;
3365 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3366 
3367 	pld_get_soc_info(dev, &info);
3368 	sc->mem = info.v_addr;
3369 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3370 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3371 	scn->target_info.target_version = info.soc_id;
3372 	scn->target_info.target_revision = 0;
3373 }
3374 
3375 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3376 				       struct device *dev)
3377 {}
3378 
3379 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3380 				    int device_id)
3381 {
3382 	if (!pld_have_platform_driver_support(sc->dev))
3383 		return false;
3384 
3385 	switch (device_id) {
3386 	case QCA6290_DEVICE_ID:
3387 	case QCN9000_DEVICE_ID:
3388 	case QCA6290_EMULATION_DEVICE_ID:
3389 	case QCA6390_DEVICE_ID:
3390 	case QCA6490_DEVICE_ID:
3391 	case AR6320_DEVICE_ID:
3392 	case QCN7605_DEVICE_ID:
3393 		return true;
3394 	}
3395 	return false;
3396 }
3397 
3398 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3399 					   int device_id)
3400 {
3401 	if (hif_is_pld_based_target(sc, device_id)) {
3402 		sc->hif_enable_pci = hif_enable_pci_pld;
3403 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3404 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3405 	} else {
3406 		sc->hif_enable_pci = hif_enable_pci_nopld;
3407 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3408 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3409 	}
3410 }
3411 
3412 #ifdef HIF_REG_WINDOW_SUPPORT
3413 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3414 					       u32 target_type)
3415 {
3416 	switch (target_type) {
3417 	case TARGET_TYPE_QCN7605:
3418 		sc->use_register_windowing = true;
3419 		qdf_spinlock_create(&sc->register_access_lock);
3420 		sc->register_window = 0;
3421 		break;
3422 	default:
3423 		sc->use_register_windowing = false;
3424 	}
3425 }
3426 #else
3427 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3428 					       u32 target_type)
3429 {
3430 	sc->use_register_windowing = false;
3431 }
3432 #endif
3433 
3434 /**
3435  * hif_enable_bus(): enable bus
3436  *
3437  * This function enables the bus
3438  *
3439  * @ol_sc: soft_sc struct
3440  * @dev: device pointer
3441  * @bdev: bus dev pointer
3442  * bid: bus id pointer
3443  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3444  * Return: QDF_STATUS
3445  */
3446 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3447 			  struct device *dev, void *bdev,
3448 			  const struct hif_bus_id *bid,
3449 			  enum hif_enable_type type)
3450 {
3451 	int ret = 0;
3452 	uint32_t hif_type;
3453 	uint32_t target_type = TARGET_TYPE_UNKNOWN;
3454 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3455 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3456 	uint16_t revision_id = 0;
3457 	int probe_again = 0;
3458 	struct pci_dev *pdev = bdev;
3459 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3460 	struct hif_target_info *tgt_info;
3461 
3462 	if (!ol_sc) {
3463 		hif_err("hif_ctx is NULL");
3464 		return QDF_STATUS_E_NOMEM;
3465 	}
3466 	/* Following print is used by various tools to identify
3467 	 * WLAN SOC (e.g. crash dump analysis and reporting tool).
3468 	 */
3469 	hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
3470 		 hif_get_conparam(ol_sc), id->device);
3471 
3472 	sc->pdev = pdev;
3473 	sc->dev = &pdev->dev;
3474 	sc->devid = id->device;
3475 	sc->cacheline_sz = dma_get_cache_alignment();
3476 	tgt_info = hif_get_target_info_handle(hif_hdl);
3477 	hif_pci_init_deinit_ops_attach(sc, id->device);
3478 	sc->hif_pci_get_soc_info(sc, dev);
3479 again:
3480 	ret = sc->hif_enable_pci(sc, pdev, id);
3481 	if (ret < 0) {
3482 		hif_err("hif_enable_pci error = %d", ret);
3483 		goto err_enable_pci;
3484 	}
3485 	hif_info("hif_enable_pci done");
3486 
3487 	/* Temporary FIX: disable ASPM on peregrine.
3488 	 * Will be removed after the OTP is programmed
3489 	 */
3490 	hif_disable_power_gating(hif_hdl);
3491 
3492 	device_disable_async_suspend(&pdev->dev);
3493 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3494 
3495 	ret = hif_get_device_type(id->device, revision_id,
3496 						&hif_type, &target_type);
3497 	if (ret < 0) {
3498 		hif_err("Invalid device id/revision_id");
3499 		goto err_tgtstate;
3500 	}
3501 	hif_info("hif_type = 0x%x, target_type = 0x%x",
3502 		hif_type, target_type);
3503 
3504 	hif_register_tbl_attach(ol_sc, hif_type);
3505 	hif_target_register_tbl_attach(ol_sc, target_type);
3506 
3507 	hif_pci_init_reg_windowing_support(sc, target_type);
3508 
3509 	tgt_info->target_type = target_type;
3510 
3511 	/*
3512 	 * Disable unlzay interrupt registration for QCN9000
3513 	 */
3514 	if (target_type == TARGET_TYPE_QCN9000)
3515 		ol_sc->irq_unlazy_disable = 1;
3516 
3517 	if (ce_srng_based(ol_sc)) {
3518 		hif_info("Skip tgt_wake up for srng devices");
3519 	} else {
3520 		ret = hif_pci_probe_tgt_wakeup(sc);
3521 		if (ret < 0) {
3522 			hif_err("hif_pci_prob_wakeup error = %d", ret);
3523 			if (ret == -EAGAIN)
3524 				probe_again++;
3525 			goto err_tgtstate;
3526 		}
3527 		hif_info("hif_pci_probe_tgt_wakeup done");
3528 	}
3529 
3530 	if (!ol_sc->mem_pa) {
3531 		hif_err("BAR0 uninitialized");
3532 		ret = -EIO;
3533 		goto err_tgtstate;
3534 	}
3535 
3536 	if (!ce_srng_based(ol_sc)) {
3537 		hif_target_sync(ol_sc);
3538 
3539 		if (hif_pci_default_link_up(tgt_info))
3540 			hif_vote_link_up(hif_hdl);
3541 	}
3542 
3543 	return QDF_STATUS_SUCCESS;
3544 
3545 err_tgtstate:
3546 	hif_disable_pci(sc);
3547 	sc->pci_enabled = false;
3548 	hif_err("hif_disable_pci done");
3549 	return QDF_STATUS_E_ABORTED;
3550 
3551 err_enable_pci:
3552 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3553 		int delay_time;
3554 
3555 		hif_info("pci reprobe");
3556 		/* 10, 40, 90, 100, 100, ... */
3557 		delay_time = max(100, 10 * (probe_again * probe_again));
3558 		qdf_mdelay(delay_time);
3559 		goto again;
3560 	}
3561 	return qdf_status_from_os_return(ret);
3562 }
3563 
3564 /**
3565  * hif_pci_irq_enable() - ce_irq_enable
3566  * @scn: hif_softc
3567  * @ce_id: ce_id
3568  *
3569  * Return: void
3570  */
3571 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3572 {
3573 	uint32_t tmp = 1 << ce_id;
3574 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3575 
3576 	qdf_spin_lock_irqsave(&sc->irq_lock);
3577 	scn->ce_irq_summary &= ~tmp;
3578 	if (scn->ce_irq_summary == 0) {
3579 		/* Enable Legacy PCI line interrupts */
3580 		if (LEGACY_INTERRUPTS(sc) &&
3581 			(scn->target_status != TARGET_STATUS_RESET) &&
3582 			(!qdf_atomic_read(&scn->link_suspended))) {
3583 
3584 			hif_write32_mb(scn, scn->mem +
3585 				(SOC_CORE_BASE_ADDRESS |
3586 				PCIE_INTR_ENABLE_ADDRESS),
3587 				HOST_GROUP0_MASK);
3588 
3589 			hif_read32_mb(scn, scn->mem +
3590 					(SOC_CORE_BASE_ADDRESS |
3591 					PCIE_INTR_ENABLE_ADDRESS));
3592 		}
3593 	}
3594 	if (scn->hif_init_done == true)
3595 		Q_TARGET_ACCESS_END(scn);
3596 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3597 
3598 	/* check for missed firmware crash */
3599 	hif_fw_interrupt_handler(0, scn);
3600 }
3601 
3602 /**
3603  * hif_pci_irq_disable() - ce_irq_disable
3604  * @scn: hif_softc
3605  * @ce_id: ce_id
3606  *
3607  * only applicable to legacy copy engine...
3608  *
3609  * Return: void
3610  */
3611 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3612 {
3613 	/* For Rome only need to wake up target */
3614 	/* target access is maintained until interrupts are re-enabled */
3615 	Q_TARGET_ACCESS_BEGIN(scn);
3616 }
3617 
3618 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3619 {
3620 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3621 
3622 	/* legacy case only has one irq */
3623 	return pci_scn->irq;
3624 }
3625 
3626 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
3627 {
3628 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3629 	struct hif_target_info *tgt_info;
3630 
3631 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
3632 
3633 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
3634 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
3635 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
3636 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
3637 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
3638 		/*
3639 		 * Need to consider offset's memtype for QCA6290/QCA8074,
3640 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
3641 		 * well initialized/defined.
3642 		 */
3643 		return 0;
3644 	}
3645 
3646 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
3647 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
3648 		return 0;
3649 	}
3650 
3651 	hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
3652 		offset, (uint32_t)(offset + sizeof(unsigned int)),
3653 		sc->mem_len);
3654 
3655 	return -EINVAL;
3656 }
3657 
3658 /**
3659  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
3660  * @scn: hif context
3661  *
3662  * Return: true if soc needs driver bmi otherwise false
3663  */
3664 bool hif_pci_needs_bmi(struct hif_softc *scn)
3665 {
3666 	return !ce_srng_based(scn);
3667 }
3668 
3669 #ifdef FORCE_WAKE
3670 #ifdef DEVICE_FORCE_WAKE_ENABLE
3671 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
3672 {
3673 	uint32_t timeout, value;
3674 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3675 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3676 
3677 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
3678 
3679 	if (qdf_in_interrupt())
3680 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
3681 	else
3682 		timeout = 0;
3683 
3684 	if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) {
3685 		hif_err("force wake request send failed");
3686 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
3687 		return -EINVAL;
3688 	}
3689 
3690 	/* If device's M1 state-change event races here, it can be ignored,
3691 	 * as the device is expected to immediately move from M2 to M0
3692 	 * without entering low power state.
3693 	 */
3694 	if (!pld_is_device_awake(scn->qdf_dev->dev))
3695 		hif_info("state-change event races, ignore");
3696 
3697 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
3698 	hif_write32_mb(scn,
3699 		       scn->mem +
3700 		       PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG,
3701 		       0);
3702 	hif_write32_mb(scn,
3703 		       scn->mem +
3704 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
3705 		       1);
3706 
3707 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
3708 	/*
3709 	 * do not reset the timeout
3710 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
3711 	 */
3712 	timeout = 0;
3713 	do {
3714 		value =
3715 		hif_read32_mb(scn,
3716 			      scn->mem +
3717 			      PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
3718 		if (value)
3719 			break;
3720 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
3721 		timeout += FORCE_WAKE_DELAY_MS;
3722 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
3723 
3724 	if (!value) {
3725 		hif_err("failed handshake mechanism");
3726 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
3727 		return -ETIMEDOUT;
3728 	}
3729 
3730 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
3731 	return 0;
3732 }
3733 
3734 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
3735 {
3736 	int ret;
3737 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3738 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3739 
3740 	ret = pld_force_wake_release(scn->qdf_dev->dev);
3741 	if (ret) {
3742 		hif_err("force wake release failure");
3743 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
3744 		return ret;
3745 	}
3746 
3747 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
3748 	hif_write32_mb(scn,
3749 		       scn->mem +
3750 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
3751 		       0);
3752 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
3753 	return 0;
3754 }
3755 
3756 #else /* DEVICE_FORCE_WAKE_ENABLE */
3757 /** hif_force_wake_request() - Disable the PCIE scratch register
3758  * write/read
3759  *
3760  * Return: 0
3761  */
3762 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
3763 {
3764 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3765 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3766 	uint32_t timeout;
3767 
3768 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
3769 
3770 	if (qdf_in_interrupt())
3771 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
3772 	else
3773 		timeout = 0;
3774 
3775 	if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) {
3776 		hif_err("force wake request send failed");
3777 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
3778 		return -EINVAL;
3779 	}
3780 
3781 	/* If device's M1 state-change event races here, it can be ignored,
3782 	 * as the device is expected to immediately move from M2 to M0
3783 	 * without entering low power state.
3784 	 */
3785 	if (!pld_is_device_awake(scn->qdf_dev->dev))
3786 		hif_info("state-change event races, ignore");
3787 
3788 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
3789 
3790 	return 0;
3791 }
3792 
3793 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
3794 {
3795 	int ret;
3796 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
3797 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3798 
3799 	ret = pld_force_wake_release(scn->qdf_dev->dev);
3800 	if (ret) {
3801 		hif_err("force wake release failure");
3802 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
3803 		return ret;
3804 	}
3805 
3806 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
3807 	return 0;
3808 }
3809 #endif /* DEVICE_FORCE_WAKE_ENABLE */
3810 
3811 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
3812 {
3813 	hif_debug("mhi_force_wake_request_vote: %d",
3814 		  pci_handle->stats.mhi_force_wake_request_vote);
3815 	hif_debug("mhi_force_wake_failure: %d",
3816 		  pci_handle->stats.mhi_force_wake_failure);
3817 	hif_debug("mhi_force_wake_success: %d",
3818 		  pci_handle->stats.mhi_force_wake_success);
3819 	hif_debug("soc_force_wake_register_write_success: %d",
3820 		  pci_handle->stats.soc_force_wake_register_write_success);
3821 	hif_debug("soc_force_wake_failure: %d",
3822 		  pci_handle->stats.soc_force_wake_failure);
3823 	hif_debug("soc_force_wake_success: %d",
3824 		  pci_handle->stats.soc_force_wake_success);
3825 	hif_debug("mhi_force_wake_release_failure: %d",
3826 		  pci_handle->stats.mhi_force_wake_release_failure);
3827 	hif_debug("mhi_force_wake_release_success: %d",
3828 		  pci_handle->stats.mhi_force_wake_release_success);
3829 	hif_debug("oc_force_wake_release_success: %d",
3830 		  pci_handle->stats.soc_force_wake_release_success);
3831 }
3832 #endif /* FORCE_WAKE */
3833 
3834 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
3835 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
3836 {
3837 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
3838 }
3839 
3840 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
3841 {
3842 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
3843 }
3844 #endif
3845