xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision dd5f5c1afa4ab969b68717be955752f19527fb17)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_arp.h>
24 #include <linux/of_pci.h>
25 #include <linux/version.h>
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "qdf_platform.h"
43 #include "pld_common.h"
44 #include "mp_dev.h"
45 #include "hif_debug.h"
46 
47 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
48 char *legacy_ic_irqname[] = {
49 "ce0",
50 "ce1",
51 "ce2",
52 "ce3",
53 "ce4",
54 "ce5",
55 "ce6",
56 "ce7",
57 "ce8",
58 "ce9",
59 "ce10",
60 "ce11",
61 "ce12",
62 "ce13",
63 "ce14",
64 "ce15",
65 "reo2sw8_intr2",
66 "reo2sw7_intr2",
67 "reo2sw6_intr2",
68 "reo2sw5_intr2",
69 "reo2sw4_intr2",
70 "reo2sw3_intr2",
71 "reo2sw2_intr2",
72 "reo2sw1_intr2",
73 "reo2sw0_intr2",
74 "reo2sw8_intr",
75 "reo2sw7_intr",
76 "reo2sw6_inrr",
77 "reo2sw5_intr",
78 "reo2sw4_intr",
79 "reo2sw3_intr",
80 "reo2sw2_intr",
81 "reo2sw1_intr",
82 "reo2sw0_intr",
83 "reo2status_intr2",
84 "reo_status",
85 "reo2rxdma_out_2",
86 "reo2rxdma_out_1",
87 "reo_cmd",
88 "sw2reo6",
89 "sw2reo5",
90 "sw2reo1",
91 "sw2reo",
92 "rxdma2reo_mlo_0_dst_ring1",
93 "rxdma2reo_mlo_0_dst_ring0",
94 "rxdma2reo_mlo_1_dst_ring1",
95 "rxdma2reo_mlo_1_dst_ring0",
96 "rxdma2reo_dst_ring1",
97 "rxdma2reo_dst_ring0",
98 "rxdma2sw_dst_ring1",
99 "rxdma2sw_dst_ring0",
100 "rxdma2release_dst_ring1",
101 "rxdma2release_dst_ring0",
102 "sw2rxdma_2_src_ring",
103 "sw2rxdma_1_src_ring",
104 "sw2rxdma_0",
105 "wbm2sw6_release2",
106 "wbm2sw5_release2",
107 "wbm2sw4_release2",
108 "wbm2sw3_release2",
109 "wbm2sw2_release2",
110 "wbm2sw1_release2",
111 "wbm2sw0_release2",
112 "wbm2sw6_release",
113 "wbm2sw5_release",
114 "wbm2sw4_release",
115 "wbm2sw3_release",
116 "wbm2sw2_release",
117 "wbm2sw1_release",
118 "wbm2sw0_release",
119 "wbm2sw_link",
120 "wbm_error_release",
121 "sw2txmon_src_ring",
122 "sw2rxmon_src_ring",
123 "txmon2sw_p1_intr1",
124 "txmon2sw_p1_intr0",
125 "txmon2sw_p0_dest1",
126 "txmon2sw_p0_dest0",
127 "rxmon2sw_p1_intr1",
128 "rxmon2sw_p1_intr0",
129 "rxmon2sw_p0_dest1",
130 "rxmon2sw_p0_dest0",
131 "sw_release",
132 "sw2tcl_credit2",
133 "sw2tcl_credit",
134 "sw2tcl4",
135 "sw2tcl5",
136 "sw2tcl3",
137 "sw2tcl2",
138 "sw2tcl1",
139 "sw2wbm1",
140 "misc_8",
141 "misc_7",
142 "misc_6",
143 "misc_5",
144 "misc_4",
145 "misc_3",
146 "misc_2",
147 "misc_1",
148 "misc_0",
149 };
150 #endif
151 
152 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
153 	defined(QCA_WIFI_KIWI))
154 #include "hal_api.h"
155 #endif
156 
157 #include "if_pci_internal.h"
158 #include "ce_tasklet.h"
159 #include "targaddrs.h"
160 #include "hif_exec.h"
161 
162 #include "pci_api.h"
163 #include "ahb_api.h"
164 #include "wlan_cfg.h"
165 #include "qdf_hang_event_notifier.h"
166 #include "qdf_platform.h"
167 #include "qal_devnode.h"
168 #include "qdf_irq.h"
169 
170 /* Maximum ms timeout for host to wake up target */
171 #define PCIE_WAKE_TIMEOUT 1000
172 #define RAMDUMP_EVENT_TIMEOUT 2500
173 
174 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
175  * PCIe data bus error
176  * As workaround for this issue - changing the reset sequence to
177  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
178  */
179 #define CPU_WARM_RESET_WAR
180 #define WLAN_CFG_MAX_PCIE_GROUPS 5
181 #ifdef QCA_WIFI_QCN9224
182 #define WLAN_CFG_MAX_CE_COUNT 16
183 #else
184 #define WLAN_CFG_MAX_CE_COUNT 12
185 #endif
186 #define DP_IRQ_NAME_LEN 25
187 char dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS][DP_IRQ_NAME_LEN] = {};
188 char ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT][DP_IRQ_NAME_LEN] = {};
189 
190 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
191 #define WLAN_CFG_MAX_LEGACY_IRQ_COUNT 160
192 char dp_legacy_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_LEGACY_IRQ_COUNT][DP_IRQ_NAME_LEN] = {};
193 #endif
194 
195 static inline int hif_get_pci_slot(struct hif_softc *scn)
196 {
197 	int pci_slot = pld_get_pci_slot(scn->qdf_dev->dev);
198 
199 	if (pci_slot < 0) {
200 		hif_err("Invalid PCI SLOT %d", pci_slot);
201 		qdf_assert_always(0);
202 		return 0;
203 	} else {
204 		return pci_slot;
205 	}
206 }
207 
208 /*
209  * Top-level interrupt handler for all PCI interrupts from a Target.
210  * When a block of MSI interrupts is allocated, this top-level handler
211  * is not used; instead, we directly call the correct sub-handler.
212  */
213 struct ce_irq_reg_table {
214 	uint32_t irq_enable;
215 	uint32_t irq_status;
216 };
217 
218 #ifndef QCA_WIFI_3_0_ADRASTEA
219 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
220 {
221 }
222 #else
223 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
224 {
225 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
226 	unsigned int target_enable0, target_enable1;
227 	unsigned int target_cause0, target_cause1;
228 
229 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
230 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
231 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
232 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
233 
234 	if ((target_enable0 & target_cause0) ||
235 	    (target_enable1 & target_cause1)) {
236 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
237 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
238 
239 		if (scn->notice_send)
240 			pld_intr_notify_q6(sc->dev);
241 	}
242 }
243 #endif
244 
245 
246 /**
247  * pci_dispatch_interrupt() - PCI interrupt dispatcher
248  * @scn: scn
249  *
250  * Return: N/A
251  */
252 static void pci_dispatch_interrupt(struct hif_softc *scn)
253 {
254 	uint32_t intr_summary;
255 	int id;
256 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
257 
258 	if (scn->hif_init_done != true)
259 		return;
260 
261 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
262 		return;
263 
264 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
265 
266 	if (intr_summary == 0) {
267 		if ((scn->target_status != TARGET_STATUS_RESET) &&
268 			(!qdf_atomic_read(&scn->link_suspended))) {
269 
270 			hif_write32_mb(scn, scn->mem +
271 				(SOC_CORE_BASE_ADDRESS |
272 				PCIE_INTR_ENABLE_ADDRESS),
273 				HOST_GROUP0_MASK);
274 
275 			hif_read32_mb(scn, scn->mem +
276 					(SOC_CORE_BASE_ADDRESS |
277 					PCIE_INTR_ENABLE_ADDRESS));
278 		}
279 		Q_TARGET_ACCESS_END(scn);
280 		return;
281 	}
282 	Q_TARGET_ACCESS_END(scn);
283 
284 	scn->ce_irq_summary = intr_summary;
285 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
286 		if (intr_summary & (1 << id)) {
287 			intr_summary &= ~(1 << id);
288 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
289 		}
290 	}
291 }
292 
293 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
294 {
295 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
296 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
297 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
298 
299 	volatile int tmp;
300 	uint16_t val = 0;
301 	uint32_t bar0 = 0;
302 	uint32_t fw_indicator_address, fw_indicator;
303 	bool ssr_irq = false;
304 	unsigned int host_cause, host_enable;
305 
306 	if (LEGACY_INTERRUPTS(sc)) {
307 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
308 			return IRQ_HANDLED;
309 
310 		if (ADRASTEA_BU) {
311 			host_enable = hif_read32_mb(sc, sc->mem +
312 						    PCIE_INTR_ENABLE_ADDRESS);
313 			host_cause = hif_read32_mb(sc, sc->mem +
314 						   PCIE_INTR_CAUSE_ADDRESS);
315 			if (!(host_enable & host_cause)) {
316 				hif_pci_route_adrastea_interrupt(sc);
317 				return IRQ_HANDLED;
318 			}
319 		}
320 
321 		/* Clear Legacy PCI line interrupts
322 		 * IMPORTANT: INTR_CLR register has to be set
323 		 * after INTR_ENABLE is set to 0,
324 		 * otherwise interrupt can not be really cleared
325 		 */
326 		hif_write32_mb(sc, sc->mem +
327 			      (SOC_CORE_BASE_ADDRESS |
328 			       PCIE_INTR_ENABLE_ADDRESS), 0);
329 
330 		hif_write32_mb(sc, sc->mem +
331 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
332 			       ADRASTEA_BU ?
333 			       (host_enable & host_cause) :
334 			      HOST_GROUP0_MASK);
335 
336 		if (ADRASTEA_BU)
337 			hif_write32_mb(sc, sc->mem + 0x2f100c,
338 				       (host_cause >> 1));
339 
340 		/* IMPORTANT: this extra read transaction is required to
341 		 * flush the posted write buffer
342 		 */
343 		if (!ADRASTEA_BU) {
344 		tmp =
345 			hif_read32_mb(sc, sc->mem +
346 				     (SOC_CORE_BASE_ADDRESS |
347 				      PCIE_INTR_ENABLE_ADDRESS));
348 
349 		if (tmp == 0xdeadbeef) {
350 			hif_err("SoC returns 0xdeadbeef!!");
351 
352 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
353 			hif_err("PCI Vendor ID = 0x%04x", val);
354 
355 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
356 			hif_err("PCI Device ID = 0x%04x", val);
357 
358 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
359 			hif_err("PCI Command = 0x%04x", val);
360 
361 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
362 			hif_err("PCI Status = 0x%04x", val);
363 
364 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
365 					      &bar0);
366 			hif_err("PCI BAR0 = 0x%08x", bar0);
367 
368 			hif_err("RTC_STATE_ADDRESS = 0x%08x",
369 				hif_read32_mb(sc, sc->mem +
370 					PCIE_LOCAL_BASE_ADDRESS
371 					+ RTC_STATE_ADDRESS));
372 			hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
373 				hif_read32_mb(sc, sc->mem +
374 					PCIE_LOCAL_BASE_ADDRESS
375 					+ PCIE_SOC_WAKE_ADDRESS));
376 			hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
377 				hif_read32_mb(sc, sc->mem + 0x80008),
378 				hif_read32_mb(sc, sc->mem + 0x8000c));
379 			hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
380 				hif_read32_mb(sc, sc->mem + 0x80010),
381 				hif_read32_mb(sc, sc->mem + 0x80014));
382 			hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
383 				hif_read32_mb(sc, sc->mem + 0x80018),
384 				hif_read32_mb(sc, sc->mem + 0x8001c));
385 			QDF_BUG(0);
386 		}
387 
388 		PCI_CLR_CAUSE0_REGISTER(sc);
389 		}
390 
391 		if (HAS_FW_INDICATOR) {
392 			fw_indicator_address = hif_state->fw_indicator_address;
393 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
394 			if ((fw_indicator != ~0) &&
395 			   (fw_indicator & FW_IND_EVENT_PENDING))
396 				ssr_irq = true;
397 		}
398 
399 		if (Q_TARGET_ACCESS_END(scn) < 0)
400 			return IRQ_HANDLED;
401 	}
402 	/* TBDXXX: Add support for WMAC */
403 
404 	if (ssr_irq) {
405 		sc->irq_event = irq;
406 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
407 
408 		qdf_atomic_inc(&scn->active_tasklet_cnt);
409 		tasklet_schedule(&sc->intr_tq);
410 	} else {
411 		pci_dispatch_interrupt(scn);
412 	}
413 
414 	return IRQ_HANDLED;
415 }
416 
417 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
418 {
419 	return 1;               /* FIX THIS */
420 }
421 
422 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
423 {
424 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
425 	int i = 0;
426 
427 	if (!irq || !size) {
428 		return -EINVAL;
429 	}
430 
431 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
432 		irq[0] = sc->irq;
433 		return 1;
434 	}
435 
436 	if (sc->num_msi_intrs > size) {
437 		qdf_print("Not enough space in irq buffer to return irqs");
438 		return -EINVAL;
439 	}
440 
441 	for (i = 0; i < sc->num_msi_intrs; i++) {
442 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
443 	}
444 
445 	return sc->num_msi_intrs;
446 }
447 
448 
449 /**
450  * hif_pci_cancel_deferred_target_sleep() - cancels the deferred target sleep
451  * @scn: hif_softc
452  *
453  * Return: void
454  */
455 #if CONFIG_ATH_PCIE_MAX_PERF == 0
456 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
457 {
458 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
459 	A_target_id_t pci_addr = scn->mem;
460 
461 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
462 	/*
463 	 * If the deferred sleep timer is running cancel it
464 	 * and put the soc into sleep.
465 	 */
466 	if (hif_state->fake_sleep == true) {
467 		qdf_timer_stop(&hif_state->sleep_timer);
468 		if (hif_state->verified_awake == false) {
469 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
470 				      PCIE_SOC_WAKE_ADDRESS,
471 				      PCIE_SOC_WAKE_RESET);
472 		}
473 		hif_state->fake_sleep = false;
474 	}
475 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
476 }
477 #else
478 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
479 {
480 }
481 #endif
482 
483 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
484 	hif_read32_mb(sc, (char *)(mem) + \
485 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
486 
487 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
488 	hif_write32_mb(sc, ((char *)(mem) + \
489 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
490 
491 #ifdef QCA_WIFI_3_0
492 /**
493  * hif_targ_is_awake() - check to see if the target is awake
494  * @hif_ctx: hif context
495  * @mem:
496  *
497  * emulation never goes to sleep
498  *
499  * Return: true if target is awake
500  */
501 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
502 {
503 	return true;
504 }
505 #else
506 /**
507  * hif_targ_is_awake() - check to see if the target is awake
508  * @scn: hif context
509  * @mem:
510  *
511  * Return: true if the targets clocks are on
512  */
513 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
514 {
515 	uint32_t val;
516 
517 	if (scn->recovery)
518 		return false;
519 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
520 		+ RTC_STATE_ADDRESS);
521 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
522 }
523 #endif
524 
525 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
526 static void hif_pci_device_reset(struct hif_pci_softc *sc)
527 {
528 	void __iomem *mem = sc->mem;
529 	int i;
530 	uint32_t val;
531 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
532 
533 	if (!scn->hostdef)
534 		return;
535 
536 	/* NB: Don't check resetok here.  This form of reset
537 	 * is integral to correct operation.
538 	 */
539 
540 	if (!SOC_GLOBAL_RESET_ADDRESS)
541 		return;
542 
543 	if (!mem)
544 		return;
545 
546 	hif_err("Reset Device");
547 
548 	/*
549 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
550 	 * writing WAKE_V, the Target may scribble over Host memory!
551 	 */
552 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
553 			       PCIE_SOC_WAKE_V_MASK);
554 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
555 		if (hif_targ_is_awake(scn, mem))
556 			break;
557 
558 		qdf_mdelay(1);
559 	}
560 
561 	/* Put Target, including PCIe, into RESET. */
562 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
563 	val |= 1;
564 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
565 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
566 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
567 		    RTC_STATE_COLD_RESET_MASK)
568 			break;
569 
570 		qdf_mdelay(1);
571 	}
572 
573 	/* Pull Target, including PCIe, out of RESET. */
574 	val &= ~1;
575 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
576 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
577 		if (!
578 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
579 		     RTC_STATE_COLD_RESET_MASK))
580 			break;
581 
582 		qdf_mdelay(1);
583 	}
584 
585 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
586 			       PCIE_SOC_WAKE_RESET);
587 }
588 
589 /* CPU warm reset function
590  * Steps:
591  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
592  * 2. Clear the FW_INDICATOR_ADDRESS -so Target CPU initializes FW
593  *    correctly on WARM reset
594  * 3. Clear TARGET CPU LF timer interrupt
595  * 4. Reset all CEs to clear any pending CE tarnsactions
596  * 5. Warm reset CPU
597  */
598 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
599 {
600 	void __iomem *mem = sc->mem;
601 	int i;
602 	uint32_t val;
603 	uint32_t fw_indicator;
604 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
605 
606 	/* NB: Don't check resetok here.  This form of reset is
607 	 * integral to correct operation.
608 	 */
609 
610 	if (!mem)
611 		return;
612 
613 	hif_debug("Target Warm Reset");
614 
615 	/*
616 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
617 	 * writing WAKE_V, the Target may scribble over Host memory!
618 	 */
619 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
620 			       PCIE_SOC_WAKE_V_MASK);
621 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
622 		if (hif_targ_is_awake(scn, mem))
623 			break;
624 		qdf_mdelay(1);
625 	}
626 
627 	/*
628 	 * Disable Pending interrupts
629 	 */
630 	val =
631 		hif_read32_mb(sc, mem +
632 			     (SOC_CORE_BASE_ADDRESS |
633 			      PCIE_INTR_CAUSE_ADDRESS));
634 	hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
635 		  (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
636 	/* Target CPU Intr Cause */
637 	val = hif_read32_mb(sc, mem +
638 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
639 	hif_debug("Target CPU Intr Cause 0x%x", val);
640 
641 	val =
642 		hif_read32_mb(sc, mem +
643 			     (SOC_CORE_BASE_ADDRESS |
644 			      PCIE_INTR_ENABLE_ADDRESS));
645 	hif_write32_mb(sc, (mem +
646 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
647 	hif_write32_mb(sc, (mem +
648 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
649 		       HOST_GROUP0_MASK);
650 
651 	qdf_mdelay(100);
652 
653 	/* Clear FW_INDICATOR_ADDRESS */
654 	if (HAS_FW_INDICATOR) {
655 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
656 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
657 	}
658 
659 	/* Clear Target LF Timer interrupts */
660 	val =
661 		hif_read32_mb(sc, mem +
662 			     (RTC_SOC_BASE_ADDRESS +
663 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
664 	hif_debug("addr 0x%x : 0x%x",
665 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
666 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
667 	hif_write32_mb(sc, mem +
668 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
669 		      val);
670 
671 	/* Reset CE */
672 	val =
673 		hif_read32_mb(sc, mem +
674 			     (RTC_SOC_BASE_ADDRESS |
675 			      SOC_RESET_CONTROL_ADDRESS));
676 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
677 	hif_write32_mb(sc, (mem +
678 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
679 		      val);
680 	val =
681 		hif_read32_mb(sc, mem +
682 			     (RTC_SOC_BASE_ADDRESS |
683 			      SOC_RESET_CONTROL_ADDRESS));
684 	qdf_mdelay(10);
685 
686 	/* CE unreset */
687 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
688 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
689 		       SOC_RESET_CONTROL_ADDRESS), val);
690 	val =
691 		hif_read32_mb(sc, mem +
692 			     (RTC_SOC_BASE_ADDRESS |
693 			      SOC_RESET_CONTROL_ADDRESS));
694 	qdf_mdelay(10);
695 
696 	/* Read Target CPU Intr Cause */
697 	val = hif_read32_mb(sc, mem +
698 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
699 	hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
700 
701 	/* CPU warm RESET */
702 	val =
703 		hif_read32_mb(sc, mem +
704 			     (RTC_SOC_BASE_ADDRESS |
705 			      SOC_RESET_CONTROL_ADDRESS));
706 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
707 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
708 		       SOC_RESET_CONTROL_ADDRESS), val);
709 	val =
710 		hif_read32_mb(sc, mem +
711 			     (RTC_SOC_BASE_ADDRESS |
712 			      SOC_RESET_CONTROL_ADDRESS));
713 	hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
714 
715 	qdf_mdelay(100);
716 	hif_debug("Target Warm reset complete");
717 
718 }
719 
720 #ifndef QCA_WIFI_3_0
721 /* only applicable to legacy ce */
722 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
723 {
724 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
725 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
726 	void __iomem *mem = sc->mem;
727 	uint32_t val;
728 
729 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
730 		return ATH_ISR_NOSCHED;
731 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
732 	if (Q_TARGET_ACCESS_END(scn) < 0)
733 		return ATH_ISR_SCHED;
734 
735 	hif_debug("FW_INDICATOR register is 0x%x", val);
736 
737 	if (val & FW_IND_HELPER)
738 		return 0;
739 
740 	return 1;
741 }
742 #endif
743 
744 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
745 {
746 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
747 	uint16_t device_id = 0;
748 	uint32_t val;
749 	uint16_t timeout_count = 0;
750 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
751 
752 	/* Check device ID from PCIe configuration space for link status */
753 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
754 	if (device_id != sc->devid) {
755 		hif_err("Device ID does match (read 0x%x, expect 0x%x)",
756 			device_id, sc->devid);
757 		return -EACCES;
758 	}
759 
760 	/* Check PCIe local register for bar/memory access */
761 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
762 			   RTC_STATE_ADDRESS);
763 	hif_debug("RTC_STATE_ADDRESS is %08x", val);
764 
765 	/* Try to wake up target if it sleeps */
766 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
767 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
768 	hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
769 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
770 		PCIE_SOC_WAKE_ADDRESS));
771 
772 	/* Check if target can be woken up */
773 	while (!hif_targ_is_awake(scn, sc->mem)) {
774 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
775 			hif_err("wake up timeout, %08x, %08x",
776 				hif_read32_mb(sc, sc->mem +
777 				     PCIE_LOCAL_BASE_ADDRESS +
778 				     RTC_STATE_ADDRESS),
779 				hif_read32_mb(sc, sc->mem +
780 				     PCIE_LOCAL_BASE_ADDRESS +
781 				     PCIE_SOC_WAKE_ADDRESS));
782 			return -EACCES;
783 		}
784 
785 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
786 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
787 
788 		qdf_mdelay(100);
789 		timeout_count += 100;
790 	}
791 
792 	/* Check Power register for SoC internal bus issues */
793 	val =
794 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
795 			     SOC_POWER_REG_OFFSET);
796 	hif_debug("Power register is %08x", val);
797 
798 	return 0;
799 }
800 
801 /**
802  * __hif_pci_dump_registers(): dump other PCI debug registers
803  * @scn: struct hif_softc
804  *
805  * This function dumps pci debug registers.  The parent function
806  * dumps the copy engine registers before calling this function.
807  *
808  * Return: void
809  */
810 static void __hif_pci_dump_registers(struct hif_softc *scn)
811 {
812 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
813 	void __iomem *mem = sc->mem;
814 	uint32_t val, i, j;
815 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
816 	uint32_t ce_base;
817 
818 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
819 		return;
820 
821 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
822 	val =
823 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
824 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
825 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
826 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
827 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
828 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
829 
830 	/* DEBUG_CONTROL_ENABLE = 0x1 */
831 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
832 			   WLAN_DEBUG_CONTROL_OFFSET);
833 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
834 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
835 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
836 		      WLAN_DEBUG_CONTROL_OFFSET, val);
837 
838 	hif_debug("Debug: inputsel: %x dbgctrl: %x",
839 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
840 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
841 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
842 			    WLAN_DEBUG_CONTROL_OFFSET));
843 
844 	hif_debug("Debug CE");
845 	/* Loop CE debug output */
846 	/* AMBA_DEBUG_BUS_SEL = 0xc */
847 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
848 			    AMBA_DEBUG_BUS_OFFSET);
849 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
850 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
851 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
852 		       val);
853 
854 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
855 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
856 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
857 				   CE_WRAPPER_DEBUG_OFFSET);
858 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
859 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
860 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
861 			      CE_WRAPPER_DEBUG_OFFSET, val);
862 
863 		hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
864 			  wrapper_idx[i],
865 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
866 				AMBA_DEBUG_BUS_OFFSET),
867 			  hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
868 				CE_WRAPPER_DEBUG_OFFSET));
869 
870 		if (wrapper_idx[i] <= 7) {
871 			for (j = 0; j <= 5; j++) {
872 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
873 				/* For (j=0~5) write CE_DEBUG_SEL = j */
874 				val =
875 					hif_read32_mb(sc, mem + ce_base +
876 						     CE_DEBUG_OFFSET);
877 				val &= ~CE_DEBUG_SEL_MASK;
878 				val |= CE_DEBUG_SEL_SET(j);
879 				hif_write32_mb(sc, mem + ce_base +
880 					       CE_DEBUG_OFFSET, val);
881 
882 				/* read (@gpio_athr_wlan_reg)
883 				 * WLAN_DEBUG_OUT_DATA
884 				 */
885 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
886 						    + WLAN_DEBUG_OUT_OFFSET);
887 				val = WLAN_DEBUG_OUT_DATA_GET(val);
888 
889 				hif_debug("module%d: cedbg: %x out: %x",
890 					  j,
891 					  hif_read32_mb(sc, mem + ce_base +
892 						CE_DEBUG_OFFSET), val);
893 			}
894 		} else {
895 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
896 			val =
897 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
898 					     WLAN_DEBUG_OUT_OFFSET);
899 			val = WLAN_DEBUG_OUT_DATA_GET(val);
900 
901 			hif_debug("out: %x", val);
902 		}
903 	}
904 
905 	hif_debug("Debug PCIe:");
906 	/* Loop PCIe debug output */
907 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
908 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
909 			    AMBA_DEBUG_BUS_OFFSET);
910 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
911 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
912 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
913 		       AMBA_DEBUG_BUS_OFFSET, val);
914 
915 	for (i = 0; i <= 8; i++) {
916 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
917 		val =
918 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
919 				     AMBA_DEBUG_BUS_OFFSET);
920 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
921 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
922 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
923 			       AMBA_DEBUG_BUS_OFFSET, val);
924 
925 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
926 		val =
927 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
928 				     WLAN_DEBUG_OUT_OFFSET);
929 		val = WLAN_DEBUG_OUT_DATA_GET(val);
930 
931 		hif_debug("amdbg: %x out: %x %x",
932 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
933 				WLAN_DEBUG_OUT_OFFSET), val,
934 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
935 				WLAN_DEBUG_OUT_OFFSET));
936 	}
937 
938 	Q_TARGET_ACCESS_END(scn);
939 }
940 
941 /**
942  * hif_pci_dump_registers(): dump bus debug registers
943  * @hif_ctx: struct hif_opaque_softc
944  *
945  * This function dumps hif bus debug registers
946  *
947  * Return: 0 for success or error code
948  */
949 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
950 {
951 	int status;
952 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
953 
954 	status = hif_dump_ce_registers(scn);
955 
956 	if (status)
957 		hif_err("Dump CE Registers Failed");
958 
959 	/* dump non copy engine pci registers */
960 	__hif_pci_dump_registers(scn);
961 
962 	return 0;
963 }
964 
965 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
966 
967 /* worker thread to schedule wlan_tasklet in SLUB debug build */
968 static void reschedule_tasklet_work_handler(void *arg)
969 {
970 	struct hif_pci_softc *sc = arg;
971 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
972 
973 	if (!scn) {
974 		hif_err("hif_softc is NULL");
975 		return;
976 	}
977 
978 	if (scn->hif_init_done == false) {
979 		hif_err("wlan driver is unloaded");
980 		return;
981 	}
982 
983 	tasklet_schedule(&sc->intr_tq);
984 }
985 
986 /**
987  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
988  * work
989  * @sc: HIF PCI Context
990  *
991  * Return: void
992  */
993 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
994 {
995 	qdf_create_work(0, &sc->reschedule_tasklet_work,
996 				reschedule_tasklet_work_handler, NULL);
997 }
998 #else
999 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
1000 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
1001 
1002 void wlan_tasklet(unsigned long data)
1003 {
1004 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
1005 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1006 
1007 	if (scn->hif_init_done == false)
1008 		goto end;
1009 
1010 	if (qdf_atomic_read(&scn->link_suspended))
1011 		goto end;
1012 
1013 	if (!ADRASTEA_BU) {
1014 		hif_fw_interrupt_handler(sc->irq_event, scn);
1015 		if (scn->target_status == TARGET_STATUS_RESET)
1016 			goto end;
1017 	}
1018 
1019 end:
1020 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
1021 	qdf_atomic_dec(&scn->active_tasklet_cnt);
1022 }
1023 
1024 /**
1025  * hif_disable_power_gating() - disable HW power gating
1026  * @hif_ctx: hif context
1027  *
1028  * disables pcie L1 power states
1029  */
1030 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1031 {
1032 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1033 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1034 
1035 	if (!scn) {
1036 		hif_err("Could not disable ASPM scn is null");
1037 		return;
1038 	}
1039 
1040 	/* Disable ASPM when pkt log is enabled */
1041 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1042 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1043 }
1044 
1045 /**
1046  * hif_enable_power_gating() - enable HW power gating
1047  * @sc: hif context
1048  *
1049  * enables pcie L1 power states
1050  */
1051 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1052 {
1053 	if (!sc) {
1054 		hif_err("Could not disable ASPM scn is null");
1055 		return;
1056 	}
1057 
1058 	/* Re-enable ASPM after firmware/OTP download is complete */
1059 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1060 }
1061 
1062 /**
1063  * hif_pci_enable_power_management() - enable power management
1064  * @hif_sc: hif context
1065  * @is_packet_log_enabled:
1066  *
1067  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1068  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1069  *
1070  * note: epping mode does not call this function as it does not
1071  *       care about saving power.
1072  */
1073 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1074 				 bool is_packet_log_enabled)
1075 {
1076 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1077 	uint32_t mode;
1078 
1079 	if (!pci_ctx) {
1080 		hif_err("hif_ctx null");
1081 		return;
1082 	}
1083 
1084 	mode = hif_get_conparam(hif_sc);
1085 	if (mode == QDF_GLOBAL_FTM_MODE) {
1086 		hif_info("Enable power gating for FTM mode");
1087 		hif_enable_power_gating(pci_ctx);
1088 		return;
1089 	}
1090 
1091 	hif_rtpm_start(hif_sc);
1092 
1093 	if (!is_packet_log_enabled)
1094 		hif_enable_power_gating(pci_ctx);
1095 
1096 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1097 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1098 	    !ce_srng_based(hif_sc)) {
1099 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1100 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1101 			hif_err("Failed to set target to sleep");
1102 	}
1103 }
1104 
1105 /**
1106  * hif_pci_disable_power_management() - disable power management
1107  * @hif_ctx: hif context
1108  *
1109  * Currently disables runtime pm. Should be updated to behave
1110  * if runtime pm is not started. Should be updated to take care
1111  * of aspm and soc sleep for driver load.
1112  */
1113 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1114 {
1115 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1116 
1117 	if (!pci_ctx) {
1118 		hif_err("hif_ctx null");
1119 		return;
1120 	}
1121 
1122 	hif_rtpm_stop(hif_ctx);
1123 }
1124 
1125 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1126 {
1127 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1128 
1129 	if (!pci_ctx) {
1130 		hif_err("hif_ctx null");
1131 		return;
1132 	}
1133 	hif_display_ce_stats(hif_ctx);
1134 
1135 	hif_print_pci_stats(pci_ctx);
1136 }
1137 
1138 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1139 {
1140 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1141 
1142 	if (!pci_ctx) {
1143 		hif_err("hif_ctx null");
1144 		return;
1145 	}
1146 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1147 }
1148 
1149 #define ATH_PCI_PROBE_RETRY_MAX 3
1150 /**
1151  * hif_pci_open(): hif_bus_open
1152  * @hif_ctx: scn
1153  * @bus_type: bus type
1154  *
1155  * Return: n/a
1156  */
1157 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1158 {
1159 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1160 
1161 	hif_ctx->bus_type = bus_type;
1162 	hif_rtpm_open(hif_ctx);
1163 
1164 	qdf_spinlock_create(&sc->irq_lock);
1165 	qdf_spinlock_create(&sc->force_wake_lock);
1166 
1167 	return hif_ce_open(hif_ctx);
1168 }
1169 
1170 /**
1171  * hif_wake_target_cpu() - wake the target's cpu
1172  * @scn: hif context
1173  *
1174  * Send an interrupt to the device to wake up the Target CPU
1175  * so it has an opportunity to notice any changed state.
1176  */
1177 static void hif_wake_target_cpu(struct hif_softc *scn)
1178 {
1179 	QDF_STATUS rv;
1180 	uint32_t core_ctrl;
1181 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1182 
1183 	rv = hif_diag_read_access(hif_hdl,
1184 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1185 				  &core_ctrl);
1186 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1187 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1188 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1189 
1190 	rv = hif_diag_write_access(hif_hdl,
1191 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1192 				   core_ctrl);
1193 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1194 }
1195 
1196 /**
1197  * soc_wake_reset() - allow the target to go to sleep
1198  * @scn: hif_softc
1199  *
1200  * Clear the force wake register.  This is done by
1201  * hif_sleep_entry and cancel deferred timer sleep.
1202  */
1203 static void soc_wake_reset(struct hif_softc *scn)
1204 {
1205 	hif_write32_mb(scn, scn->mem +
1206 		PCIE_LOCAL_BASE_ADDRESS +
1207 		PCIE_SOC_WAKE_ADDRESS,
1208 		PCIE_SOC_WAKE_RESET);
1209 }
1210 
1211 /**
1212  * hif_sleep_entry() - gate target sleep
1213  * @arg: hif context
1214  *
1215  * This function is the callback for the sleep timer.
1216  * Check if last force awake critical section was at least
1217  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1218  * allow the target to go to sleep and cancel the sleep timer.
1219  * otherwise reschedule the sleep timer.
1220  */
1221 static void hif_sleep_entry(void *arg)
1222 {
1223 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1224 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1225 	uint32_t idle_ms;
1226 
1227 	if (scn->recovery)
1228 		return;
1229 
1230 	if (hif_is_driver_unloading(scn))
1231 		return;
1232 
1233 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1234 	if (hif_state->fake_sleep) {
1235 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1236 						    - hif_state->sleep_ticks);
1237 		if (!hif_state->verified_awake &&
1238 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1239 			if (!qdf_atomic_read(&scn->link_suspended)) {
1240 				soc_wake_reset(scn);
1241 				hif_state->fake_sleep = false;
1242 			}
1243 		} else {
1244 			qdf_timer_stop(&hif_state->sleep_timer);
1245 			qdf_timer_start(&hif_state->sleep_timer,
1246 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1247 		}
1248 	}
1249 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1250 }
1251 
1252 #define HIF_HIA_MAX_POLL_LOOP    1000000
1253 #define HIF_HIA_POLLING_DELAY_MS 10
1254 
1255 #ifdef QCA_HIF_HIA_EXTND
1256 
1257 static void hif_set_hia_extnd(struct hif_softc *scn)
1258 {
1259 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1260 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1261 	uint32_t target_type = tgt_info->target_type;
1262 
1263 	hif_info("E");
1264 
1265 	if ((target_type == TARGET_TYPE_AR900B) ||
1266 			target_type == TARGET_TYPE_QCA9984 ||
1267 			target_type == TARGET_TYPE_QCA9888) {
1268 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1269 		 * in RTC space
1270 		 */
1271 		tgt_info->target_revision
1272 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1273 					+ CHIP_ID_ADDRESS));
1274 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1275 			  target_type, tgt_info->target_revision);
1276 	}
1277 
1278 	{
1279 		uint32_t flag2_value = 0;
1280 		uint32_t flag2_targ_addr =
1281 			host_interest_item_address(target_type,
1282 			offsetof(struct host_interest_s, hi_skip_clock_init));
1283 
1284 		if ((ar900b_20_targ_clk != -1) &&
1285 			(frac != -1) && (intval != -1)) {
1286 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1287 				&flag2_value);
1288 			qdf_print("\n Setting clk_override");
1289 			flag2_value |= CLOCK_OVERRIDE;
1290 
1291 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1292 					flag2_value);
1293 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1294 		} else {
1295 			qdf_print("\n CLOCK PLL skipped");
1296 		}
1297 	}
1298 
1299 	if (target_type == TARGET_TYPE_AR900B
1300 			|| target_type == TARGET_TYPE_QCA9984
1301 			|| target_type == TARGET_TYPE_QCA9888) {
1302 
1303 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1304 		 * this would be supplied through module parameters,
1305 		 * if not supplied assumed default or same behavior as 1.0.
1306 		 * Assume 1.0 clock can't be tuned, reset to defaults
1307 		 */
1308 
1309 		qdf_print(KERN_INFO
1310 			  "%s: setting the target pll frac %x intval %x",
1311 			  __func__, frac, intval);
1312 
1313 		/* do not touch frac, and int val, let them be default -1,
1314 		 * if desired, host can supply these through module params
1315 		 */
1316 		if (frac != -1 || intval != -1) {
1317 			uint32_t flag2_value = 0;
1318 			uint32_t flag2_targ_addr;
1319 
1320 			flag2_targ_addr =
1321 				host_interest_item_address(target_type,
1322 				offsetof(struct host_interest_s,
1323 					hi_clock_info));
1324 			hif_diag_read_access(hif_hdl,
1325 				flag2_targ_addr, &flag2_value);
1326 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1327 				  flag2_value);
1328 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1329 			qdf_print("\n INT Val %x  Address %x",
1330 				  intval, flag2_value + 4);
1331 			hif_diag_write_access(hif_hdl,
1332 					flag2_value + 4, intval);
1333 		} else {
1334 			qdf_print(KERN_INFO
1335 				  "%s: no frac provided, skipping pre-configuring PLL",
1336 				  __func__);
1337 		}
1338 
1339 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1340 		if ((target_type == TARGET_TYPE_AR900B)
1341 			&& (tgt_info->target_revision == AR900B_REV_2)
1342 			&& ar900b_20_targ_clk != -1) {
1343 			uint32_t flag2_value = 0;
1344 			uint32_t flag2_targ_addr;
1345 
1346 			flag2_targ_addr
1347 				= host_interest_item_address(target_type,
1348 					offsetof(struct host_interest_s,
1349 					hi_desired_cpu_speed_hz));
1350 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1351 							&flag2_value);
1352 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1353 				  flag2_value);
1354 			hif_diag_write_access(hif_hdl, flag2_value,
1355 				ar900b_20_targ_clk/*300000000u*/);
1356 		} else if (target_type == TARGET_TYPE_QCA9888) {
1357 			uint32_t flag2_targ_addr;
1358 
1359 			if (200000000u != qca9888_20_targ_clk) {
1360 				qca9888_20_targ_clk = 300000000u;
1361 				/* Setting the target clock speed to 300 mhz */
1362 			}
1363 
1364 			flag2_targ_addr
1365 				= host_interest_item_address(target_type,
1366 					offsetof(struct host_interest_s,
1367 					hi_desired_cpu_speed_hz));
1368 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1369 				qca9888_20_targ_clk);
1370 		} else {
1371 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1372 				  __func__);
1373 		}
1374 	} else {
1375 		if (frac != -1 || intval != -1) {
1376 			uint32_t flag2_value = 0;
1377 			uint32_t flag2_targ_addr =
1378 				host_interest_item_address(target_type,
1379 					offsetof(struct host_interest_s,
1380 							hi_clock_info));
1381 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1382 						&flag2_value);
1383 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1384 				  flag2_value);
1385 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1386 			qdf_print("\n INT Val %x  Address %x", intval,
1387 				  flag2_value + 4);
1388 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1389 					      intval);
1390 		}
1391 	}
1392 }
1393 
1394 #else
1395 
1396 static void hif_set_hia_extnd(struct hif_softc *scn)
1397 {
1398 }
1399 
1400 #endif
1401 
1402 /**
1403  * hif_set_hia() - fill out the host interest area
1404  * @scn: hif context
1405  *
1406  * This is replaced by hif_wlan_enable for integrated targets.
1407  * This fills out the host interest area.  The firmware will
1408  * process these memory addresses when it is first brought out
1409  * of reset.
1410  *
1411  * Return: 0 for success.
1412  */
1413 static int hif_set_hia(struct hif_softc *scn)
1414 {
1415 	QDF_STATUS rv;
1416 	uint32_t interconnect_targ_addr = 0;
1417 	uint32_t pcie_state_targ_addr = 0;
1418 	uint32_t pipe_cfg_targ_addr = 0;
1419 	uint32_t svc_to_pipe_map = 0;
1420 	uint32_t pcie_config_flags = 0;
1421 	uint32_t flag2_value = 0;
1422 	uint32_t flag2_targ_addr = 0;
1423 #ifdef QCA_WIFI_3_0
1424 	uint32_t host_interest_area = 0;
1425 	uint8_t i;
1426 #else
1427 	uint32_t ealloc_value = 0;
1428 	uint32_t ealloc_targ_addr = 0;
1429 	uint8_t banks_switched = 1;
1430 	uint32_t chip_id;
1431 #endif
1432 	uint32_t pipe_cfg_addr;
1433 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1434 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1435 	uint32_t target_type = tgt_info->target_type;
1436 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1437 	static struct CE_pipe_config *target_ce_config;
1438 	struct service_to_pipe *target_service_to_ce_map;
1439 
1440 	hif_info("E");
1441 
1442 	hif_get_target_ce_config(scn,
1443 				 &target_ce_config, &target_ce_config_sz,
1444 				 &target_service_to_ce_map,
1445 				 &target_service_to_ce_map_sz,
1446 				 NULL, NULL);
1447 
1448 	if (ADRASTEA_BU)
1449 		return 0;
1450 
1451 #ifdef QCA_WIFI_3_0
1452 	i = 0;
1453 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1454 		host_interest_area = hif_read32_mb(scn, scn->mem +
1455 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1456 		if ((host_interest_area & 0x01) == 0) {
1457 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1458 			host_interest_area = 0;
1459 			i++;
1460 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1461 				hif_err("poll timeout: %d", i);
1462 		} else {
1463 			host_interest_area &= (~0x01);
1464 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1465 			break;
1466 		}
1467 	}
1468 
1469 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1470 		hif_err("hia polling timeout");
1471 		return -EIO;
1472 	}
1473 
1474 	if (host_interest_area == 0) {
1475 		hif_err("host_interest_area = 0");
1476 		return -EIO;
1477 	}
1478 
1479 	interconnect_targ_addr = host_interest_area +
1480 			offsetof(struct host_interest_area_t,
1481 			hi_interconnect_state);
1482 
1483 	flag2_targ_addr = host_interest_area +
1484 			offsetof(struct host_interest_area_t, hi_option_flag2);
1485 
1486 #else
1487 	interconnect_targ_addr = hif_hia_item_address(target_type,
1488 		offsetof(struct host_interest_s, hi_interconnect_state));
1489 	ealloc_targ_addr = hif_hia_item_address(target_type,
1490 		offsetof(struct host_interest_s, hi_early_alloc));
1491 	flag2_targ_addr = hif_hia_item_address(target_type,
1492 		offsetof(struct host_interest_s, hi_option_flag2));
1493 #endif
1494 	/* Supply Target-side CE configuration */
1495 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1496 			  &pcie_state_targ_addr);
1497 	if (rv != QDF_STATUS_SUCCESS) {
1498 		hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
1499 			interconnect_targ_addr, rv);
1500 		goto done;
1501 	}
1502 	if (pcie_state_targ_addr == 0) {
1503 		rv = QDF_STATUS_E_FAILURE;
1504 		hif_err("pcie state addr is 0");
1505 		goto done;
1506 	}
1507 	pipe_cfg_addr = pcie_state_targ_addr +
1508 			  offsetof(struct pcie_state_s,
1509 			  pipe_cfg_addr);
1510 	rv = hif_diag_read_access(hif_hdl,
1511 			  pipe_cfg_addr,
1512 			  &pipe_cfg_targ_addr);
1513 	if (rv != QDF_STATUS_SUCCESS) {
1514 		hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
1515 		goto done;
1516 	}
1517 	if (pipe_cfg_targ_addr == 0) {
1518 		rv = QDF_STATUS_E_FAILURE;
1519 		hif_err("pipe cfg addr is 0");
1520 		goto done;
1521 	}
1522 
1523 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1524 			(uint8_t *) target_ce_config,
1525 			target_ce_config_sz);
1526 
1527 	if (rv != QDF_STATUS_SUCCESS) {
1528 		hif_err("write pipe cfg: %d", rv);
1529 		goto done;
1530 	}
1531 
1532 	rv = hif_diag_read_access(hif_hdl,
1533 			  pcie_state_targ_addr +
1534 			  offsetof(struct pcie_state_s,
1535 			   svc_to_pipe_map),
1536 			  &svc_to_pipe_map);
1537 	if (rv != QDF_STATUS_SUCCESS) {
1538 		hif_err("get svc/pipe map: %d", rv);
1539 		goto done;
1540 	}
1541 	if (svc_to_pipe_map == 0) {
1542 		rv = QDF_STATUS_E_FAILURE;
1543 		hif_err("svc_to_pipe map is 0");
1544 		goto done;
1545 	}
1546 
1547 	rv = hif_diag_write_mem(hif_hdl,
1548 			svc_to_pipe_map,
1549 			(uint8_t *) target_service_to_ce_map,
1550 			target_service_to_ce_map_sz);
1551 	if (rv != QDF_STATUS_SUCCESS) {
1552 		hif_err("write svc/pipe map: %d", rv);
1553 		goto done;
1554 	}
1555 
1556 	rv = hif_diag_read_access(hif_hdl,
1557 			pcie_state_targ_addr +
1558 			offsetof(struct pcie_state_s,
1559 			config_flags),
1560 			&pcie_config_flags);
1561 	if (rv != QDF_STATUS_SUCCESS) {
1562 		hif_err("get pcie config_flags: %d", rv);
1563 		goto done;
1564 	}
1565 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1566 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1567 #else
1568 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1569 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1570 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1571 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1572 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1573 #endif
1574 	rv = hif_diag_write_mem(hif_hdl,
1575 			pcie_state_targ_addr +
1576 			offsetof(struct pcie_state_s,
1577 			config_flags),
1578 			(uint8_t *) &pcie_config_flags,
1579 			sizeof(pcie_config_flags));
1580 	if (rv != QDF_STATUS_SUCCESS) {
1581 		hif_err("write pcie config_flags: %d", rv);
1582 		goto done;
1583 	}
1584 
1585 #ifndef QCA_WIFI_3_0
1586 	/* configure early allocation */
1587 	ealloc_targ_addr = hif_hia_item_address(target_type,
1588 						offsetof(
1589 						struct host_interest_s,
1590 						hi_early_alloc));
1591 
1592 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1593 			&ealloc_value);
1594 	if (rv != QDF_STATUS_SUCCESS) {
1595 		hif_err("get early alloc val: %d", rv);
1596 		goto done;
1597 	}
1598 
1599 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1600 	ealloc_value |=
1601 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1602 		 HI_EARLY_ALLOC_MAGIC_MASK);
1603 
1604 	rv = hif_diag_read_access(hif_hdl,
1605 			  CHIP_ID_ADDRESS |
1606 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1607 	if (rv != QDF_STATUS_SUCCESS) {
1608 		hif_err("get chip id val: %d", rv);
1609 		goto done;
1610 	}
1611 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1612 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1613 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1614 		case 0x2:       /* ROME 1.3 */
1615 			/* 2 banks are switched to IRAM */
1616 			banks_switched = 2;
1617 			break;
1618 		case 0x4:       /* ROME 2.1 */
1619 		case 0x5:       /* ROME 2.2 */
1620 			banks_switched = 6;
1621 			break;
1622 		case 0x8:       /* ROME 3.0 */
1623 		case 0x9:       /* ROME 3.1 */
1624 		case 0xA:       /* ROME 3.2 */
1625 			banks_switched = 9;
1626 			break;
1627 		case 0x0:       /* ROME 1.0 */
1628 		case 0x1:       /* ROME 1.1 */
1629 		default:
1630 			/* 3 banks are switched to IRAM */
1631 			banks_switched = 3;
1632 			break;
1633 		}
1634 	}
1635 
1636 	ealloc_value |=
1637 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1638 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1639 
1640 	rv = hif_diag_write_access(hif_hdl,
1641 				ealloc_targ_addr,
1642 				ealloc_value);
1643 	if (rv != QDF_STATUS_SUCCESS) {
1644 		hif_err("set early alloc val: %d", rv);
1645 		goto done;
1646 	}
1647 #endif
1648 	if ((target_type == TARGET_TYPE_AR900B)
1649 			|| (target_type == TARGET_TYPE_QCA9984)
1650 			|| (target_type == TARGET_TYPE_QCA9888)
1651 			|| (target_type == TARGET_TYPE_AR9888)) {
1652 		hif_set_hia_extnd(scn);
1653 	}
1654 
1655 	/* Tell Target to proceed with initialization */
1656 	flag2_targ_addr = hif_hia_item_address(target_type,
1657 						offsetof(
1658 						struct host_interest_s,
1659 						hi_option_flag2));
1660 
1661 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1662 			  &flag2_value);
1663 	if (rv != QDF_STATUS_SUCCESS) {
1664 		hif_err("get option val: %d", rv);
1665 		goto done;
1666 	}
1667 
1668 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1669 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1670 			   flag2_value);
1671 	if (rv != QDF_STATUS_SUCCESS) {
1672 		hif_err("set option val: %d", rv);
1673 		goto done;
1674 	}
1675 
1676 	hif_wake_target_cpu(scn);
1677 
1678 done:
1679 
1680 	return qdf_status_to_os_return(rv);
1681 }
1682 
1683 /**
1684  * hif_pci_bus_configure() - configure the pcie bus
1685  * @hif_sc: pointer to the hif context.
1686  *
1687  * return: 0 for success. nonzero for failure.
1688  */
1689 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1690 {
1691 	int status = 0;
1692 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1693 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1694 
1695 	hif_ce_prepare_config(hif_sc);
1696 
1697 	/* initialize sleep state adjust variables */
1698 	hif_state->sleep_timer_init = true;
1699 	hif_state->keep_awake_count = 0;
1700 	hif_state->fake_sleep = false;
1701 	hif_state->sleep_ticks = 0;
1702 
1703 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1704 			       hif_sleep_entry, (void *)hif_state,
1705 			       QDF_TIMER_TYPE_WAKE_APPS);
1706 	hif_state->sleep_timer_init = true;
1707 
1708 	status = hif_wlan_enable(hif_sc);
1709 	if (status) {
1710 		hif_err("hif_wlan_enable error: %d", status);
1711 		goto timer_free;
1712 	}
1713 
1714 	A_TARGET_ACCESS_LIKELY(hif_sc);
1715 
1716 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1717 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1718 	    !ce_srng_based(hif_sc)) {
1719 		/*
1720 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1721 		 * prevent sleep when we want to keep firmware always awake
1722 		 * note: when we want to keep firmware always awake,
1723 		 *       hif_target_sleep_state_adjust will point to a dummy
1724 		 *       function, and hif_pci_target_sleep_state_adjust must
1725 		 *       be called instead.
1726 		 * note: bus type check is here because AHB bus is reusing
1727 		 *       hif_pci_bus_configure code.
1728 		 */
1729 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1730 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1731 					false, true) < 0) {
1732 				status = -EACCES;
1733 				goto disable_wlan;
1734 			}
1735 		}
1736 	}
1737 
1738 	/* todo: consider replacing this with an srng field */
1739 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1740 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1741 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1742 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1743 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1744 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1745 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1746 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018) ||
1747 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6432)) &&
1748 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1749 		hif_sc->per_ce_irq = true;
1750 	}
1751 
1752 	status = hif_config_ce(hif_sc);
1753 	if (status)
1754 		goto disable_wlan;
1755 
1756 	if (hif_needs_bmi(hif_osc)) {
1757 		status = hif_set_hia(hif_sc);
1758 		if (status)
1759 			goto unconfig_ce;
1760 
1761 		hif_debug("hif_set_hia done");
1762 
1763 	}
1764 
1765 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1766 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1767 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1768 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1769 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1770 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1771 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1772 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018) ||
1773 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6432)) &&
1774 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
1775 		hif_debug("Skip irq config for PCI based 8074 target");
1776 	else {
1777 		status = hif_configure_irq(hif_sc);
1778 		if (status < 0)
1779 			goto unconfig_ce;
1780 	}
1781 
1782 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1783 
1784 	return status;
1785 
1786 unconfig_ce:
1787 	hif_unconfig_ce(hif_sc);
1788 disable_wlan:
1789 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1790 	hif_wlan_disable(hif_sc);
1791 
1792 timer_free:
1793 	qdf_timer_stop(&hif_state->sleep_timer);
1794 	qdf_timer_free(&hif_state->sleep_timer);
1795 	hif_state->sleep_timer_init = false;
1796 
1797 	hif_err("Failed, status: %d", status);
1798 	return status;
1799 }
1800 
1801 /**
1802  * hif_pci_close(): hif_bus_close
1803  * @hif_sc: HIF context
1804  *
1805  * Return: n/a
1806  */
1807 void hif_pci_close(struct hif_softc *hif_sc)
1808 {
1809 	hif_rtpm_close(hif_sc);
1810 	hif_ce_close(hif_sc);
1811 }
1812 
1813 #define BAR_NUM 0
1814 
1815 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
1816 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1817 {
1818 	return dma_set_mask(&pci_dev->dev, mask);
1819 }
1820 
1821 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1822 						u64 mask)
1823 {
1824 	return dma_set_coherent_mask(&pci_dev->dev, mask);
1825 }
1826 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
1827 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1828 {
1829 	return pci_set_dma_mask(pci_dev, mask);
1830 }
1831 
1832 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1833 						u64 mask)
1834 {
1835 	return pci_set_consistent_dma_mask(pci_dev, mask);
1836 }
1837 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
1838 
1839 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
1840 				struct pci_dev *pdev,
1841 				const struct pci_device_id *id)
1842 {
1843 	void __iomem *mem;
1844 	int ret = 0;
1845 	uint16_t device_id = 0;
1846 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1847 
1848 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1849 	if (device_id != id->device)  {
1850 		hif_err(
1851 		   "dev id mismatch, config id = 0x%x, probing id = 0x%x",
1852 		   device_id, id->device);
1853 		/* pci link is down, so returning with error code */
1854 		return -EIO;
1855 	}
1856 
1857 	/* FIXME: temp. commenting out assign_resource
1858 	 * call for dev_attach to work on 2.6.38 kernel
1859 	 */
1860 #if (!defined(__LINUX_ARM_ARCH__))
1861 	if (pci_assign_resource(pdev, BAR_NUM)) {
1862 		hif_err("pci_assign_resource error");
1863 		return -EIO;
1864 	}
1865 #endif
1866 	if (pci_enable_device(pdev)) {
1867 		hif_err("pci_enable_device error");
1868 		return -EIO;
1869 	}
1870 
1871 	/* Request MMIO resources */
1872 	ret = pci_request_region(pdev, BAR_NUM, "ath");
1873 	if (ret) {
1874 		hif_err("PCI MMIO reservation error");
1875 		ret = -EIO;
1876 		goto err_region;
1877 	}
1878 
1879 #ifdef CONFIG_ARM_LPAE
1880 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1881 	 * for 32 bits device also.
1882 	 */
1883 	ret =  hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1884 	if (ret) {
1885 		hif_err("Cannot enable 64-bit pci DMA");
1886 		goto err_dma;
1887 	}
1888 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(64));
1889 	if (ret) {
1890 		hif_err("Cannot enable 64-bit DMA");
1891 		goto err_dma;
1892 	}
1893 #else
1894 	ret = hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1895 	if (ret) {
1896 		hif_err("Cannot enable 32-bit pci DMA");
1897 		goto err_dma;
1898 	}
1899 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(32));
1900 	if (ret) {
1901 		hif_err("Cannot enable 32-bit coherent DMA!");
1902 		goto err_dma;
1903 	}
1904 #endif
1905 
1906 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1907 
1908 	/* Set bus master bit in PCI_COMMAND to enable DMA */
1909 	pci_set_master(pdev);
1910 
1911 	/* Arrange for access to Target SoC registers. */
1912 	mem = pci_iomap(pdev, BAR_NUM, 0);
1913 	if (!mem) {
1914 		hif_err("PCI iomap error");
1915 		ret = -EIO;
1916 		goto err_iomap;
1917 	}
1918 
1919 	hif_info("*****BAR is %pK", (void *)mem);
1920 
1921 	sc->mem = mem;
1922 
1923 	/* Hawkeye emulation specific change */
1924 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
1925 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
1926 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
1927 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
1928 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
1929 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
1930 		mem = mem + 0x0c000000;
1931 		sc->mem = mem;
1932 		hif_info("Changing PCI mem base to %pK", sc->mem);
1933 	}
1934 
1935 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
1936 	ol_sc->mem = mem;
1937 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
1938 	sc->pci_enabled = true;
1939 	return ret;
1940 
1941 err_iomap:
1942 	pci_clear_master(pdev);
1943 err_dma:
1944 	pci_release_region(pdev, BAR_NUM);
1945 err_region:
1946 	pci_disable_device(pdev);
1947 	return ret;
1948 }
1949 
1950 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
1951 			      struct pci_dev *pdev,
1952 			      const struct pci_device_id *id)
1953 {
1954 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1955 	sc->pci_enabled = true;
1956 	return 0;
1957 }
1958 
1959 
1960 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
1961 {
1962 	pci_disable_msi(sc->pdev);
1963 	pci_iounmap(sc->pdev, sc->mem);
1964 	pci_clear_master(sc->pdev);
1965 	pci_release_region(sc->pdev, BAR_NUM);
1966 	pci_disable_device(sc->pdev);
1967 }
1968 
1969 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
1970 
1971 static void hif_disable_pci(struct hif_pci_softc *sc)
1972 {
1973 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1974 
1975 	if (!ol_sc) {
1976 		hif_err("ol_sc = NULL");
1977 		return;
1978 	}
1979 	hif_pci_device_reset(sc);
1980 	sc->hif_pci_deinit(sc);
1981 
1982 	sc->mem = NULL;
1983 	ol_sc->mem = NULL;
1984 }
1985 
1986 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1987 {
1988 	int ret = 0;
1989 	int targ_awake_limit = 500;
1990 #ifndef QCA_WIFI_3_0
1991 	uint32_t fw_indicator;
1992 #endif
1993 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1994 
1995 	/*
1996 	 * Verify that the Target was started cleanly.*
1997 	 * The case where this is most likely is with an AUX-powered
1998 	 * Target and a Host in WoW mode. If the Host crashes,
1999 	 * loses power, or is restarted (without unloading the driver)
2000 	 * then the Target is left (aux) powered and running.  On a
2001 	 * subsequent driver load, the Target is in an unexpected state.
2002 	 * We try to catch that here in order to reset the Target and
2003 	 * retry the probe.
2004 	 */
2005 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2006 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2007 	while (!hif_targ_is_awake(scn, sc->mem)) {
2008 		if (0 == targ_awake_limit) {
2009 			hif_err("target awake timeout");
2010 			ret = -EAGAIN;
2011 			goto end;
2012 		}
2013 		qdf_mdelay(1);
2014 		targ_awake_limit--;
2015 	}
2016 
2017 #if PCIE_BAR0_READY_CHECKING
2018 	{
2019 		int wait_limit = 200;
2020 		/* Synchronization point: wait the BAR0 is configured */
2021 		while (wait_limit-- &&
2022 			   !(hif_read32_mb(sc, c->mem +
2023 					  PCIE_LOCAL_BASE_ADDRESS +
2024 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2025 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2026 			qdf_mdelay(10);
2027 		}
2028 		if (wait_limit < 0) {
2029 			/* AR6320v1 doesn't support checking of BAR0
2030 			 * configuration, takes one sec to wait BAR0 ready
2031 			 */
2032 			hif_debug("AR6320v1 waits two sec for BAR0");
2033 		}
2034 	}
2035 #endif
2036 
2037 #ifndef QCA_WIFI_3_0
2038 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2039 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2040 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2041 
2042 	if (fw_indicator & FW_IND_INITIALIZED) {
2043 		hif_err("Target is in an unknown state. EAGAIN");
2044 		ret = -EAGAIN;
2045 		goto end;
2046 	}
2047 #endif
2048 
2049 end:
2050 	return ret;
2051 }
2052 
2053 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2054 {
2055 	int ret = 0;
2056 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2057 	uint32_t target_type = scn->target_info.target_type;
2058 
2059 	hif_info("E");
2060 
2061 	/* do notn support MSI or MSI IRQ failed */
2062 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2063 	ret = request_irq(sc->pdev->irq,
2064 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2065 			  "wlan_pci", sc);
2066 	if (ret) {
2067 		hif_err("request_irq failed, ret: %d", ret);
2068 		goto end;
2069 	}
2070 	scn->wake_irq = sc->pdev->irq;
2071 	/* Use sc->irq instead of sc->pdev-irq
2072 	 * platform_device pdev doesn't have an irq field
2073 	 */
2074 	sc->irq = sc->pdev->irq;
2075 	/* Use Legacy PCI Interrupts */
2076 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2077 		  PCIE_INTR_ENABLE_ADDRESS),
2078 		  HOST_GROUP0_MASK);
2079 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2080 			       PCIE_INTR_ENABLE_ADDRESS));
2081 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2082 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2083 
2084 	if ((target_type == TARGET_TYPE_AR900B)  ||
2085 			(target_type == TARGET_TYPE_QCA9984) ||
2086 			(target_type == TARGET_TYPE_AR9888) ||
2087 			(target_type == TARGET_TYPE_QCA9888) ||
2088 			(target_type == TARGET_TYPE_AR6320V1) ||
2089 			(target_type == TARGET_TYPE_AR6320V2) ||
2090 			(target_type == TARGET_TYPE_AR6320V3)) {
2091 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2092 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2093 	}
2094 end:
2095 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2096 			  "%s: X, ret = %d", __func__, ret);
2097 	return ret;
2098 }
2099 
2100 static int hif_ce_srng_free_irq(struct hif_softc *scn)
2101 {
2102 	int ret = 0;
2103 	int ce_id, irq;
2104 	uint32_t msi_data_start;
2105 	uint32_t msi_data_count;
2106 	uint32_t msi_irq_start;
2107 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2108 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2109 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2110 
2111 	if (!pld_get_enable_intx(scn->qdf_dev->dev)) {
2112 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2113 						  &msi_data_count,
2114 						  &msi_data_start,
2115 						  &msi_irq_start);
2116 		if (ret)
2117 			return ret;
2118 	}
2119 
2120 	/* needs to match the ce_id -> irq data mapping
2121 	 * used in the srng parameter configuration
2122 	 */
2123 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2124 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2125 			continue;
2126 
2127 		if (!ce_sc->tasklets[ce_id].inited)
2128 			continue;
2129 
2130 		irq = sc->ce_irq_num[ce_id];
2131 
2132 		hif_ce_irq_remove_affinity_hint(irq);
2133 
2134 		hif_debug("%s: (ce_id %d, irq %d)", __func__, ce_id, irq);
2135 
2136 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2137 	}
2138 
2139 	return ret;
2140 }
2141 
2142 void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2143 {
2144 	int i, j, irq;
2145 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2146 	struct hif_exec_context *hif_ext_group;
2147 
2148 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2149 		hif_ext_group = hif_state->hif_ext_group[i];
2150 		if (hif_ext_group->irq_requested) {
2151 			hif_ext_group->irq_requested = false;
2152 			for (j = 0; j < hif_ext_group->numirq; j++) {
2153 				irq = hif_ext_group->os_irq[j];
2154 				if (scn->irq_unlazy_disable) {
2155 					qdf_dev_clear_irq_status_flags(
2156 							irq,
2157 							QDF_IRQ_DISABLE_UNLAZY);
2158 				}
2159 				pfrm_free_irq(scn->qdf_dev->dev,
2160 					      irq, hif_ext_group);
2161 			}
2162 			hif_ext_group->numirq = 0;
2163 		}
2164 	}
2165 }
2166 
2167 /**
2168  * hif_pci_nointrs(): disable IRQ
2169  * @scn: struct hif_softc
2170  *
2171  * This function stops interrupt(s)
2172  *
2173  * Return: none
2174  */
2175 void hif_pci_nointrs(struct hif_softc *scn)
2176 {
2177 	int i, ret;
2178 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2179 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2180 
2181 	scn->free_irq_done = true;
2182 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2183 
2184 	if (scn->request_irq_done == false)
2185 		return;
2186 
2187 	hif_pci_deconfigure_grp_irq(scn);
2188 
2189 	ret = hif_ce_srng_free_irq(scn);
2190 	if (ret != -EINVAL) {
2191 		/* ce irqs freed in hif_ce_srng_free_irq */
2192 
2193 		if (scn->wake_irq)
2194 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2195 		scn->wake_irq = 0;
2196 	} else if (sc->num_msi_intrs > 0) {
2197 		/* MSI interrupt(s) */
2198 		for (i = 0; i < sc->num_msi_intrs; i++)
2199 			free_irq(sc->irq + i, sc);
2200 		sc->num_msi_intrs = 0;
2201 	} else {
2202 		/* Legacy PCI line interrupt
2203 		 * Use sc->irq instead of sc->pdev-irq
2204 		 * platform_device pdev doesn't have an irq field
2205 		 */
2206 		free_irq(sc->irq, sc);
2207 	}
2208 	scn->request_irq_done = false;
2209 }
2210 
2211 static inline
2212 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2213 {
2214 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2215 		return true;
2216 	else
2217 		return false;
2218 }
2219 /**
2220  * hif_pci_disable_bus(): hif_disable_bus
2221  * @scn: hif context
2222  *
2223  * This function disables the bus
2224  *
2225  * Return: none
2226  */
2227 void hif_pci_disable_bus(struct hif_softc *scn)
2228 {
2229 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2230 	struct pci_dev *pdev;
2231 	void __iomem *mem;
2232 	struct hif_target_info *tgt_info = &scn->target_info;
2233 
2234 	/* Attach did not succeed, all resources have been
2235 	 * freed in error handler
2236 	 */
2237 	if (!sc)
2238 		return;
2239 
2240 	pdev = sc->pdev;
2241 	if (hif_pci_default_link_up(tgt_info)) {
2242 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2243 
2244 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2245 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2246 			       HOST_GROUP0_MASK);
2247 	}
2248 
2249 #if defined(CPU_WARM_RESET_WAR)
2250 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2251 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2252 	 * verified for AR9888_REV1
2253 	 */
2254 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2255 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2256 		hif_pci_device_warm_reset(sc);
2257 	else
2258 		hif_pci_device_reset(sc);
2259 #else
2260 	hif_pci_device_reset(sc);
2261 #endif
2262 	mem = (void __iomem *)sc->mem;
2263 	if (mem) {
2264 		hif_dump_pipe_debug_count(scn);
2265 		if (scn->athdiag_procfs_inited) {
2266 			athdiag_procfs_remove();
2267 			scn->athdiag_procfs_inited = false;
2268 		}
2269 		sc->hif_pci_deinit(sc);
2270 		scn->mem = NULL;
2271 	}
2272 	hif_info("X");
2273 }
2274 
2275 #define OL_ATH_PCI_PM_CONTROL 0x44
2276 
2277 #ifdef CONFIG_PLD_PCIE_CNSS
2278 /**
2279  * hif_pci_prevent_linkdown(): allow or permit linkdown
2280  * @scn: hif context
2281  * @flag: true prevents linkdown, false allows
2282  *
2283  * Calls into the platform driver to vote against taking down the
2284  * pcie link.
2285  *
2286  * Return: n/a
2287  */
2288 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2289 {
2290 	int errno;
2291 
2292 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2293 	hif_runtime_prevent_linkdown(scn, flag);
2294 
2295 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2296 	if (errno)
2297 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
2298 }
2299 #else
2300 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2301 {
2302 }
2303 #endif
2304 
2305 #ifdef CONFIG_PCI_LOW_POWER_INT_REG
2306 /**
2307  * hif_pci_config_low_power_int_register() - configure pci low power
2308  *                                           interrupt register.
2309  * @scn: hif context
2310  * @enable: true to enable the bits, false clear.
2311  *
2312  * Configure the bits INTR_L1SS and INTR_CLKPM of
2313  * PCIE_LOW_POWER_INT_MASK register.
2314  *
2315  * Return: n/a
2316  */
2317 static void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2318 						  bool enable)
2319 {
2320 	void *address;
2321 	uint32_t value;
2322 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2323 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2324 	uint32_t target_type = tgt_info->target_type;
2325 
2326 	/*
2327 	 * Only configure the bits INTR_L1SS and INTR_CLKPM of
2328 	 * PCIE_LOW_POWER_INT_MASK register for QCA6174 for high
2329 	 * consumption issue. NFA344A power consumption is above 80mA
2330 	 * after entering Modern Standby. But the power will drop to normal
2331 	 * after PERST# de-assert.
2332 	 */
2333 	if ((target_type == TARGET_TYPE_AR6320) ||
2334 	    (target_type == TARGET_TYPE_AR6320V1) ||
2335 	    (target_type == TARGET_TYPE_AR6320V2) ||
2336 	    (target_type == TARGET_TYPE_AR6320V3)) {
2337 		hif_info("Configure PCI low power int mask register");
2338 
2339 		address = scn->mem + PCIE_LOW_POWER_INT_MASK_OFFSET;
2340 
2341 		/* Configure bit3 INTR_L1SS */
2342 		value = hif_read32_mb(scn, address);
2343 		if (enable)
2344 			value |= INTR_L1SS;
2345 		else
2346 			value &= ~INTR_L1SS;
2347 		hif_write32_mb(scn, address, value);
2348 
2349 		/* Configure bit4 INTR_CLKPM */
2350 		value = hif_read32_mb(scn, address);
2351 		if (enable)
2352 			value |= INTR_CLKPM;
2353 		else
2354 			value &= ~INTR_CLKPM;
2355 		hif_write32_mb(scn, address, value);
2356 	}
2357 }
2358 #else
2359 static inline void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2360 							 bool enable)
2361 {
2362 }
2363 #endif
2364 
2365 /**
2366  * hif_pci_bus_suspend(): prepare hif for suspend
2367  * @scn: hif context
2368  *
2369  * Return: Errno
2370  */
2371 int hif_pci_bus_suspend(struct hif_softc *scn)
2372 {
2373 	QDF_STATUS ret;
2374 
2375 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2376 
2377 	ret = hif_try_complete_tasks(scn);
2378 	if (QDF_IS_STATUS_ERROR(ret)) {
2379 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2380 		return -EBUSY;
2381 	}
2382 
2383 	/*
2384 	 * In an unlikely case, if draining becomes infinite loop,
2385 	 * it returns an error, shall abort the bus suspend.
2386 	 */
2387 	ret = hif_drain_fw_diag_ce(scn);
2388 	if (ret) {
2389 		hif_err("draining fw_diag_ce goes infinite, so abort suspend");
2390 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2391 		return -EBUSY;
2392 	}
2393 
2394 	/* Stop the HIF Sleep Timer */
2395 	hif_cancel_deferred_target_sleep(scn);
2396 
2397 	/*
2398 	 * Only need clear the bits INTR_L1SS/INTR_CLKPM after suspend.
2399 	 * No need do enable bits after resume, as firmware will restore
2400 	 * the bits after resume.
2401 	 */
2402 	hif_pci_config_low_power_int_register(scn, false);
2403 
2404 	scn->bus_suspended = true;
2405 
2406 	return 0;
2407 }
2408 
2409 #ifdef PCI_LINK_STATUS_SANITY
2410 /**
2411  * __hif_check_link_status() - API to check if PCIe link is active/not
2412  * @scn: HIF Context
2413  *
2414  * API reads the PCIe config space to verify if PCIe link training is
2415  * successful or not.
2416  *
2417  * Return: Success/Failure
2418  */
2419 static int __hif_check_link_status(struct hif_softc *scn)
2420 {
2421 	uint16_t dev_id = 0;
2422 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2423 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2424 
2425 	if (!sc) {
2426 		hif_err("HIF Bus Context is Invalid");
2427 		return -EINVAL;
2428 	}
2429 
2430 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2431 
2432 	if (dev_id == sc->devid)
2433 		return 0;
2434 
2435 	hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2436 	       dev_id);
2437 
2438 	scn->recovery = true;
2439 
2440 	if (cbk && cbk->set_recovery_in_progress)
2441 		cbk->set_recovery_in_progress(cbk->context, true);
2442 	else
2443 		hif_err("Driver Global Recovery is not set");
2444 
2445 	pld_is_pci_link_down(sc->dev);
2446 	return -EACCES;
2447 }
2448 #else
2449 static inline int __hif_check_link_status(struct hif_softc *scn)
2450 {
2451 	return 0;
2452 }
2453 #endif
2454 
2455 
2456 #ifdef HIF_BUS_LOG_INFO
2457 bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
2458 		       unsigned int *offset)
2459 {
2460 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2461 	struct hang_event_bus_info info = {0};
2462 	size_t size;
2463 
2464 	if (!sc) {
2465 		hif_err("HIF Bus Context is Invalid");
2466 		return false;
2467 	}
2468 
2469 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
2470 
2471 	size = sizeof(info);
2472 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
2473 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
2474 
2475 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
2476 		return false;
2477 
2478 	qdf_mem_copy(data + *offset, &info, size);
2479 	*offset = *offset + size;
2480 
2481 	if (info.dev_id == sc->devid)
2482 		return false;
2483 
2484 	qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
2485 	qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
2486 			     (QDF_WLAN_HANG_FW_OFFSET - size));
2487 	return true;
2488 }
2489 #endif
2490 
2491 /**
2492  * hif_pci_bus_resume(): prepare hif for resume
2493  * @scn: hif context
2494  *
2495  * Return: Errno
2496  */
2497 int hif_pci_bus_resume(struct hif_softc *scn)
2498 {
2499 	int errno;
2500 
2501 	scn->bus_suspended = false;
2502 
2503 	errno = __hif_check_link_status(scn);
2504 	if (errno)
2505 		return errno;
2506 
2507 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2508 
2509 	return 0;
2510 }
2511 
2512 /**
2513  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2514  * @scn: hif context
2515  *
2516  * Ensure that if we received the wakeup message before the irq
2517  * was disabled that the message is processed before suspending.
2518  *
2519  * Return: -EBUSY if we fail to flush the tasklets.
2520  */
2521 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2522 {
2523 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2524 		qdf_atomic_set(&scn->link_suspended, 1);
2525 
2526 	return 0;
2527 }
2528 
2529 /**
2530  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2531  * @scn: hif context
2532  *
2533  * Ensure that if we received the wakeup message before the irq
2534  * was disabled that the message is processed before suspending.
2535  *
2536  * Return: -EBUSY if we fail to flush the tasklets.
2537  */
2538 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2539 {
2540 	/* a vote for link up can come in the middle of the ongoing resume
2541 	 * process. hence, clear the link suspend flag once
2542 	 * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
2543 	 * by this time
2544 	 */
2545 	qdf_atomic_set(&scn->link_suspended, 0);
2546 
2547 	return 0;
2548 }
2549 
2550 #if CONFIG_PCIE_64BIT_MSI
2551 static void hif_free_msi_ctx(struct hif_softc *scn)
2552 {
2553 	struct hif_pci_softc *sc = scn->hif_sc;
2554 	struct hif_msi_info *info = &sc->msi_info;
2555 	struct device *dev = scn->qdf_dev->dev;
2556 
2557 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2558 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2559 	info->magic = NULL;
2560 	info->magic_dma = 0;
2561 }
2562 #else
2563 static void hif_free_msi_ctx(struct hif_softc *scn)
2564 {
2565 }
2566 #endif
2567 
2568 void hif_pci_disable_isr(struct hif_softc *scn)
2569 {
2570 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2571 
2572 	hif_exec_kill(&scn->osc);
2573 	hif_nointrs(scn);
2574 	hif_free_msi_ctx(scn);
2575 	/* Cancel the pending tasklet */
2576 	ce_tasklet_kill(scn);
2577 	tasklet_kill(&sc->intr_tq);
2578 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2579 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2580 }
2581 
2582 /* Function to reset SoC */
2583 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2584 {
2585 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2586 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2587 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2588 
2589 #if defined(CPU_WARM_RESET_WAR)
2590 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2591 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2592 	 * verified for AR9888_REV1
2593 	 */
2594 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2595 		hif_pci_device_warm_reset(sc);
2596 	else
2597 		hif_pci_device_reset(sc);
2598 #else
2599 	hif_pci_device_reset(sc);
2600 #endif
2601 }
2602 
2603 /**
2604  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2605  * @sc: HIF PCIe Context
2606  *
2607  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2608  *
2609  * Return: Failure to caller
2610  */
2611 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2612 {
2613 	uint16_t val = 0;
2614 	uint32_t bar = 0;
2615 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2616 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2617 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2618 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2619 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2620 	A_target_id_t pci_addr = scn->mem;
2621 
2622 	hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
2623 
2624 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2625 
2626 	hif_info("PCI Vendor ID = 0x%04x", val);
2627 
2628 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2629 
2630 	hif_info("PCI Device ID = 0x%04x", val);
2631 
2632 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
2633 
2634 	hif_info("PCI Command = 0x%04x", val);
2635 
2636 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
2637 
2638 	hif_info("PCI Status = 0x%04x", val);
2639 
2640 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2641 
2642 	hif_info("PCI BAR 0 = 0x%08x", bar);
2643 
2644 	hif_info("SOC_WAKE_ADDR 0%08x",
2645 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2646 				PCIE_SOC_WAKE_ADDRESS));
2647 
2648 	hif_info("RTC_STATE_ADDR 0x%08x",
2649 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2650 							RTC_STATE_ADDRESS));
2651 
2652 	hif_info("wakeup target");
2653 
2654 	if (!cfg->enable_self_recovery)
2655 		QDF_BUG(0);
2656 
2657 	scn->recovery = true;
2658 
2659 	if (cbk->set_recovery_in_progress)
2660 		cbk->set_recovery_in_progress(cbk->context, true);
2661 
2662 	pld_is_pci_link_down(sc->dev);
2663 	return -EACCES;
2664 }
2665 
2666 /*
2667  * For now, we use simple on-demand sleep/wake.
2668  * Some possible improvements:
2669  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2670  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2671  *   Careful, though, these functions may be used by
2672  *  interrupt handlers ("atomic")
2673  *  -Don't use host_reg_table for this code; instead use values directly
2674  *  -Use a separate timer to track activity and allow Target to sleep only
2675  *   if it hasn't done anything for a while; may even want to delay some
2676  *   processing for a short while in order to "batch" (e.g.) transmit
2677  *   requests with completion processing into "windows of up time".  Costs
2678  *   some performance, but improves power utilization.
2679  *  -On some platforms, it might be possible to eliminate explicit
2680  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
2681  *   recover from the failure by forcing the Target awake.
2682  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
2683  *   overhead in some cases. Perhaps this makes more sense when
2684  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2685  *   disabled.
2686  *  -It is possible to compile this code out and simply force the Target
2687  *   to remain awake.  That would yield optimal performance at the cost of
2688  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2689  *
2690  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2691  */
2692 
2693 /**
2694  * hif_pci_target_sleep_state_adjust() - on-demand sleep/wake
2695  * @scn: hif_softc pointer.
2696  * @sleep_ok: bool
2697  * @wait_for_it: bool
2698  *
2699  * Output the pipe error counts of each pipe to log file
2700  *
2701  * Return: int
2702  */
2703 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
2704 			      bool sleep_ok, bool wait_for_it)
2705 {
2706 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2707 	A_target_id_t pci_addr = scn->mem;
2708 	static int max_delay;
2709 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2710 	static int debug;
2711 	if (scn->recovery)
2712 		return -EACCES;
2713 
2714 	if (qdf_atomic_read(&scn->link_suspended)) {
2715 		hif_err("Invalid access, PCIe link is down");
2716 		debug = true;
2717 		QDF_ASSERT(0);
2718 		return -EACCES;
2719 	}
2720 
2721 	if (debug) {
2722 		wait_for_it = true;
2723 		hif_err("Invalid access, PCIe link is suspended");
2724 		QDF_ASSERT(0);
2725 	}
2726 
2727 	if (sleep_ok) {
2728 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2729 		hif_state->keep_awake_count--;
2730 		if (hif_state->keep_awake_count == 0) {
2731 			/* Allow sleep */
2732 			hif_state->verified_awake = false;
2733 			hif_state->sleep_ticks = qdf_system_ticks();
2734 		}
2735 		if (hif_state->fake_sleep == false) {
2736 			/* Set the Fake Sleep */
2737 			hif_state->fake_sleep = true;
2738 
2739 			/* Start the Sleep Timer */
2740 			qdf_timer_stop(&hif_state->sleep_timer);
2741 			qdf_timer_start(&hif_state->sleep_timer,
2742 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2743 		}
2744 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2745 	} else {
2746 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2747 
2748 		if (hif_state->fake_sleep) {
2749 			hif_state->verified_awake = true;
2750 		} else {
2751 			if (hif_state->keep_awake_count == 0) {
2752 				/* Force AWAKE */
2753 				hif_write32_mb(sc, pci_addr +
2754 					      PCIE_LOCAL_BASE_ADDRESS +
2755 					      PCIE_SOC_WAKE_ADDRESS,
2756 					      PCIE_SOC_WAKE_V_MASK);
2757 			}
2758 		}
2759 		hif_state->keep_awake_count++;
2760 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2761 
2762 		if (wait_for_it && !hif_state->verified_awake) {
2763 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
2764 			int tot_delay = 0;
2765 			int curr_delay = 5;
2766 
2767 			for (;; ) {
2768 				if (hif_targ_is_awake(scn, pci_addr)) {
2769 					hif_state->verified_awake = true;
2770 					break;
2771 				}
2772 				if (!hif_pci_targ_is_present(scn, pci_addr))
2773 					break;
2774 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2775 					return hif_log_soc_wakeup_timeout(sc);
2776 
2777 				OS_DELAY(curr_delay);
2778 				tot_delay += curr_delay;
2779 
2780 				if (curr_delay < 50)
2781 					curr_delay += 5;
2782 			}
2783 
2784 			/*
2785 			 * NB: If Target has to come out of Deep Sleep,
2786 			 * this may take a few Msecs. Typically, though
2787 			 * this delay should be <30us.
2788 			 */
2789 			if (tot_delay > max_delay)
2790 				max_delay = tot_delay;
2791 		}
2792 	}
2793 
2794 	if (debug && hif_state->verified_awake) {
2795 		debug = 0;
2796 		hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2797 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2798 				PCIE_INTR_ENABLE_ADDRESS),
2799 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2800 				PCIE_INTR_CAUSE_ADDRESS),
2801 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2802 				CPU_INTR_ADDRESS),
2803 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2804 				PCIE_INTR_CLR_ADDRESS),
2805 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
2806 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2807 	}
2808 
2809 	return 0;
2810 }
2811 
2812 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2813 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
2814 {
2815 	uint32_t value;
2816 	void *addr;
2817 
2818 	addr = scn->mem + offset;
2819 	value = hif_read32_mb(scn, addr);
2820 
2821 	{
2822 		unsigned long irq_flags;
2823 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2824 
2825 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2826 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2827 		pcie_access_log[idx].is_write = false;
2828 		pcie_access_log[idx].addr = addr;
2829 		pcie_access_log[idx].value = value;
2830 		pcie_access_log_seqnum++;
2831 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2832 	}
2833 
2834 	return value;
2835 }
2836 
2837 void
2838 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
2839 {
2840 	void *addr;
2841 
2842 	addr = scn->mem + (offset);
2843 	hif_write32_mb(scn, addr, value);
2844 
2845 	{
2846 		unsigned long irq_flags;
2847 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2848 
2849 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2850 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2851 		pcie_access_log[idx].is_write = true;
2852 		pcie_access_log[idx].addr = addr;
2853 		pcie_access_log[idx].value = value;
2854 		pcie_access_log_seqnum++;
2855 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2856 	}
2857 }
2858 
2859 /**
2860  * hif_target_dump_access_log() - dump access log
2861  *
2862  * dump access log
2863  *
2864  * Return: n/a
2865  */
2866 void hif_target_dump_access_log(void)
2867 {
2868 	int idx, len, start_idx, cur_idx;
2869 	unsigned long irq_flags;
2870 
2871 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2872 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2873 		len = PCIE_ACCESS_LOG_NUM;
2874 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2875 	} else {
2876 		len = pcie_access_log_seqnum;
2877 		start_idx = 0;
2878 	}
2879 
2880 	for (idx = 0; idx < len; idx++) {
2881 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2882 		hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
2883 		       idx,
2884 		       pcie_access_log[cur_idx].seqnum,
2885 		       pcie_access_log[cur_idx].is_write,
2886 		       pcie_access_log[cur_idx].addr,
2887 		       pcie_access_log[cur_idx].value);
2888 	}
2889 
2890 	pcie_access_log_seqnum = 0;
2891 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2892 }
2893 #endif
2894 
2895 #ifndef HIF_AHB
2896 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
2897 {
2898 	QDF_BUG(0);
2899 	return -EINVAL;
2900 }
2901 #endif
2902 
2903 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
2904 {
2905 	struct ce_tasklet_entry *tasklet_entry = context;
2906 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
2907 }
2908 extern const char *ce_name[];
2909 
2910 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
2911 {
2912 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
2913 
2914 	return pci_scn->ce_irq_num[ce_id];
2915 }
2916 
2917 /* hif_srng_msi_irq_disable() - disable the irq for msi
2918  * @hif_sc: hif context
2919  * @ce_id: which ce to disable copy complete interrupts for
2920  *
2921  * since MSI interrupts are not level based, the system can function
2922  * without disabling these interrupts.  Interrupt mitigation can be
2923  * added here for better system performance.
2924  */
2925 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2926 {
2927 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
2928 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2929 }
2930 
2931 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2932 {
2933 	if (__hif_check_link_status(hif_sc))
2934 		return;
2935 
2936 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
2937 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2938 }
2939 
2940 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2941 {
2942 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2943 }
2944 
2945 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2946 {
2947 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2948 }
2949 
2950 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
2951 /**
2952  * hif_ce_configure_legacyirq() - Configure CE interrupts
2953  * @scn: hif_softc pointer
2954  *
2955  * Configure CE legacy interrupts
2956  *
2957  * Return: int
2958  */
2959 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
2960 {
2961 	int ret = 0;
2962 	int irq, ce_id;
2963 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2964 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2965 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
2966 	int pci_slot;
2967 	qdf_device_t qdf_dev = scn->qdf_dev;
2968 
2969 	if (!pld_get_enable_intx(scn->qdf_dev->dev))
2970 		return -EINVAL;
2971 
2972 	scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
2973 	scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
2974 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
2975 
2976 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2977 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2978 			continue;
2979 
2980 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
2981 			continue;
2982 
2983 		ret = pfrm_get_irq(scn->qdf_dev->dev,
2984 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
2985 				   legacy_ic_irqname[ce_id], ce_id, &irq);
2986 		if (ret) {
2987 			dev_err(scn->qdf_dev->dev, "get irq failed\n");
2988 			ret = -EFAULT;
2989 			goto skip;
2990 		}
2991 
2992 		pci_slot = hif_get_pci_slot(scn);
2993 		qdf_scnprintf(ce_irqname[pci_slot][ce_id],
2994 			      DP_IRQ_NAME_LEN, "pci%d_ce_%u", pci_slot, ce_id);
2995 		pci_sc->ce_irq_num[ce_id] = irq;
2996 
2997 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
2998 				       hif_ce_interrupt_handler,
2999 				       IRQF_SHARED,
3000 				       ce_irqname[pci_slot][ce_id],
3001 				       &ce_sc->tasklets[ce_id]);
3002 		if (ret) {
3003 			hif_err("error = %d", ret);
3004 			return -EINVAL;
3005 		}
3006 	}
3007 
3008 skip:
3009 	return ret;
3010 }
3011 #else
3012 /**
3013  * hif_ce_configure_legacyirq() - Configure CE interrupts
3014  * @scn: hif_softc pointer
3015  *
3016  * Configure CE legacy interrupts
3017  *
3018  * Return: int
3019  */
3020 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
3021 {
3022 	return 0;
3023 }
3024 #endif
3025 
3026 int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
3027 {
3028 	int ret = 0;
3029 	int irq;
3030 	uint32_t msi_data_start;
3031 	uint32_t msi_data_count;
3032 	unsigned int msi_data;
3033 	int irq_id;
3034 	uint32_t msi_irq_start;
3035 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3036 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3037 	int pci_slot;
3038 	unsigned long irq_flags;
3039 
3040 	if (ce_id >= CE_COUNT_MAX)
3041 		return -EINVAL;
3042 
3043 	/* do ce irq assignments */
3044 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3045 					  &msi_data_count, &msi_data_start,
3046 					  &msi_irq_start);
3047 
3048 	if (ret) {
3049 		hif_err("Failed to get CE msi config");
3050 		return -EINVAL;
3051 	}
3052 
3053 	irq_id = scn->int_assignment->msi_idx[ce_id];
3054 	/* needs to match the ce_id -> irq data mapping
3055 	 * used in the srng parameter configuration
3056 	 */
3057 	pci_slot = hif_get_pci_slot(scn);
3058 	msi_data = irq_id + msi_irq_start;
3059 	irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3060 	if (pld_is_one_msi(scn->qdf_dev->dev))
3061 		irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
3062 	else
3063 		irq_flags = IRQF_SHARED;
3064 	hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d flag 0x%lx tasklet %pK)",
3065 		  __func__, ce_id, irq_id, msi_data, irq, irq_flags,
3066 		  &ce_sc->tasklets[ce_id]);
3067 
3068 	/* implies the ce is also initialized */
3069 	if (!ce_sc->tasklets[ce_id].inited)
3070 		goto skip;
3071 
3072 	pci_sc->ce_irq_num[ce_id] = irq;
3073 
3074 	qdf_scnprintf(ce_irqname[pci_slot][ce_id],
3075 		      DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u",
3076 		      pci_slot, ce_id);
3077 
3078 	ret = pfrm_request_irq(scn->qdf_dev->dev,
3079 			       irq, hif_ce_interrupt_handler, irq_flags,
3080 			       ce_irqname[pci_slot][ce_id],
3081 			       &ce_sc->tasklets[ce_id]);
3082 	if (ret)
3083 		return -EINVAL;
3084 
3085 skip:
3086 	return ret;
3087 }
3088 
3089 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3090 {
3091 	int ret;
3092 	int ce_id, irq;
3093 	uint32_t msi_data_start;
3094 	uint32_t msi_data_count;
3095 	uint32_t msi_irq_start;
3096 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3097 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
3098 
3099 	if (!scn->ini_cfg.disable_wake_irq) {
3100 		/* do wake irq assignment */
3101 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3102 						  &msi_data_count,
3103 						  &msi_data_start,
3104 						  &msi_irq_start);
3105 		if (ret)
3106 			return ret;
3107 
3108 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
3109 						msi_irq_start);
3110 		scn->wake_irq_type = HIF_PM_MSI_WAKE;
3111 
3112 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
3113 				       hif_wake_interrupt_handler,
3114 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
3115 
3116 		if (ret)
3117 			return ret;
3118 	}
3119 
3120 	/* do ce irq assignments */
3121 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3122 					  &msi_data_count, &msi_data_start,
3123 					  &msi_irq_start);
3124 	if (ret)
3125 		goto free_wake_irq;
3126 
3127 	if (ce_srng_based(scn)) {
3128 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3129 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3130 	} else {
3131 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3132 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3133 	}
3134 
3135 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3136 
3137 	/* needs to match the ce_id -> irq data mapping
3138 	 * used in the srng parameter configuration
3139 	 */
3140 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3141 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3142 			continue;
3143 
3144 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
3145 			continue;
3146 
3147 		ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
3148 		if (ret)
3149 			goto free_irq;
3150 	}
3151 
3152 	return ret;
3153 
3154 free_irq:
3155 	/* the request_irq for the last ce_id failed so skip it. */
3156 	while (ce_id > 0 && ce_id < scn->ce_count) {
3157 		unsigned int msi_data;
3158 
3159 		ce_id--;
3160 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
3161 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3162 		pfrm_free_irq(scn->qdf_dev->dev,
3163 			      irq, &ce_sc->tasklets[ce_id]);
3164 	}
3165 
3166 free_wake_irq:
3167 	if (!scn->ini_cfg.disable_wake_irq) {
3168 		pfrm_free_irq(scn->qdf_dev->dev,
3169 			      scn->wake_irq, scn->qdf_dev->dev);
3170 		scn->wake_irq = 0;
3171 		scn->wake_irq_type = HIF_PM_INVALID_WAKE;
3172 	}
3173 
3174 	return ret;
3175 }
3176 
3177 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3178 {
3179 	int i;
3180 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3181 
3182 	for (i = 0; i < hif_ext_group->numirq; i++)
3183 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
3184 					hif_ext_group->os_irq[i]);
3185 }
3186 
3187 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3188 {
3189 	int i;
3190 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3191 
3192 	for (i = 0; i < hif_ext_group->numirq; i++)
3193 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
3194 }
3195 
3196 /**
3197  * hif_pci_get_irq_name() - get irqname
3198  * This function gives irqnumber to irqname
3199  * mapping.
3200  *
3201  * @irq_no: irq number
3202  *
3203  * Return: irq name
3204  */
3205 const char *hif_pci_get_irq_name(int irq_no)
3206 {
3207 	return "pci-dummy";
3208 }
3209 
3210 #if defined(FEATURE_IRQ_AFFINITY) || defined(HIF_CPU_PERF_AFFINE_MASK)
3211 void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
3212 				   bool perf)
3213 {
3214 	int i, ret;
3215 	unsigned int cpus;
3216 	bool mask_set = false;
3217 	int cpu_cluster = perf ? CPU_CLUSTER_TYPE_PERF :
3218 						CPU_CLUSTER_TYPE_LITTLE;
3219 
3220 	for (i = 0; i < hif_ext_group->numirq; i++)
3221 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
3222 
3223 	for (i = 0; i < hif_ext_group->numirq; i++) {
3224 		qdf_for_each_online_cpu(cpus) {
3225 			if (qdf_topology_physical_package_id(cpus) ==
3226 			    cpu_cluster) {
3227 				qdf_cpumask_set_cpu(cpus,
3228 						    &hif_ext_group->
3229 						    new_cpu_mask[i]);
3230 				mask_set = true;
3231 			}
3232 		}
3233 	}
3234 	for (i = 0; i < hif_ext_group->numirq; i++) {
3235 		if (mask_set) {
3236 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3237 						  IRQ_NO_BALANCING, 0);
3238 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3239 						       (struct qdf_cpu_mask *)
3240 						       &hif_ext_group->
3241 						       new_cpu_mask[i]);
3242 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3243 						  0, IRQ_NO_BALANCING);
3244 			if (ret)
3245 				qdf_debug("Set affinity %*pbl fails for IRQ %d ",
3246 					  qdf_cpumask_pr_args(&hif_ext_group->
3247 							      new_cpu_mask[i]),
3248 					  hif_ext_group->os_irq[i]);
3249 		} else {
3250 			qdf_debug("Offline CPU: Set affinity fails for IRQ: %d",
3251 				  hif_ext_group->os_irq[i]);
3252 		}
3253 	}
3254 }
3255 #endif
3256 
3257 #ifdef HIF_CPU_PERF_AFFINE_MASK
3258 void hif_pci_ce_irq_set_affinity_hint(
3259 	struct hif_softc *scn)
3260 {
3261 	int ret;
3262 	unsigned int cpus;
3263 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3264 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3265 	struct CE_attr *host_ce_conf;
3266 	int ce_id;
3267 	qdf_cpu_mask ce_cpu_mask;
3268 
3269 	host_ce_conf = ce_sc->host_ce_config;
3270 	qdf_cpumask_clear(&ce_cpu_mask);
3271 
3272 	qdf_for_each_online_cpu(cpus) {
3273 		if (qdf_topology_physical_package_id(cpus) ==
3274 			CPU_CLUSTER_TYPE_PERF) {
3275 			qdf_cpumask_set_cpu(cpus,
3276 					    &ce_cpu_mask);
3277 		} else {
3278 			hif_err_rl("Unable to set cpu mask for offline CPU %d"
3279 				   , cpus);
3280 		}
3281 	}
3282 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
3283 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
3284 		return;
3285 	}
3286 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3287 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3288 			continue;
3289 		qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
3290 		qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
3291 				 &ce_cpu_mask);
3292 		qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
3293 					  IRQ_NO_BALANCING, 0);
3294 		ret = qdf_dev_set_irq_affinity(
3295 			pci_sc->ce_irq_num[ce_id],
3296 			(struct qdf_cpu_mask *)&pci_sc->ce_irq_cpu_mask[ce_id]);
3297 		qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
3298 					  0, IRQ_NO_BALANCING);
3299 		if (ret)
3300 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
3301 				   qdf_cpumask_pr_args(
3302 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3303 				   pci_sc->ce_irq_num[ce_id]);
3304 		else
3305 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
3306 				     qdf_cpumask_pr_args(
3307 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3308 				     pci_sc->ce_irq_num[ce_id]);
3309 	}
3310 }
3311 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3312 
3313 #ifdef HIF_CPU_CLEAR_AFFINITY
3314 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
3315 					   int intr_ctxt_id, int cpu)
3316 {
3317 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3318 	struct hif_exec_context *hif_ext_group;
3319 	int i, ret;
3320 
3321 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
3322 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
3323 
3324 		for (i = 0; i < hif_ext_group->numirq; i++) {
3325 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
3326 			qdf_cpumask_clear_cpu(cpu,
3327 					      &hif_ext_group->new_cpu_mask[i]);
3328 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3329 						  IRQ_NO_BALANCING, 0);
3330 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3331 						       (struct qdf_cpu_mask *)
3332 						       &hif_ext_group->
3333 						       new_cpu_mask[i]);
3334 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3335 						  0, IRQ_NO_BALANCING);
3336 			if (ret)
3337 				hif_err("Set affinity %*pbl fails for IRQ %d ",
3338 					qdf_cpumask_pr_args(&hif_ext_group->
3339 							    new_cpu_mask[i]),
3340 					hif_ext_group->os_irq[i]);
3341 			else
3342 				hif_debug("Set affinity %*pbl for IRQ: %d",
3343 					  qdf_cpumask_pr_args(&hif_ext_group->
3344 							      new_cpu_mask[i]),
3345 					  hif_ext_group->os_irq[i]);
3346 		}
3347 	}
3348 }
3349 #endif
3350 
3351 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3352 {
3353 	int i;
3354 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3355 	struct hif_exec_context *hif_ext_group;
3356 
3357 	hif_core_ctl_set_boost(true);
3358 	/* Set IRQ affinity for WLAN DP interrupts*/
3359 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3360 		hif_ext_group = hif_state->hif_ext_group[i];
3361 		hif_pci_irq_set_affinity_hint(hif_ext_group, true);
3362 	}
3363 	/* Set IRQ affinity for CE interrupts*/
3364 	hif_pci_ce_irq_set_affinity_hint(scn);
3365 }
3366 
3367 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3368 /**
3369  * hif_grp_configure_legacyirq() - Configure DP interrupts
3370  * @scn: hif_softc pointer
3371  * @hif_ext_group: hif extended group pointer
3372  *
3373  * Configure DP legacy interrupts
3374  *
3375  * Return: int
3376  */
3377 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3378 				       struct hif_exec_context *hif_ext_group)
3379 {
3380 	int ret = 0;
3381 	int irq = 0;
3382 	int j;
3383 	int pci_slot;
3384 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3385 	struct pci_dev *pdev = sc->pdev;
3386 	qdf_device_t qdf_dev = scn->qdf_dev;
3387 
3388 	for (j = 0; j < hif_ext_group->numirq; j++) {
3389 		ret = pfrm_get_irq(&pdev->dev,
3390 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
3391 				   legacy_ic_irqname[hif_ext_group->irq[j]],
3392 				   hif_ext_group->irq[j], &irq);
3393 		if (ret) {
3394 			dev_err(&pdev->dev, "get irq failed\n");
3395 			return -EFAULT;
3396 		}
3397 		hif_ext_group->os_irq[j] = irq;
3398 	}
3399 
3400 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3401 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3402 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3403 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3404 
3405 	pci_slot = hif_get_pci_slot(scn);
3406 	for (j = 0; j < hif_ext_group->numirq; j++) {
3407 		irq = hif_ext_group->os_irq[j];
3408 		if (scn->irq_unlazy_disable)
3409 			qdf_dev_set_irq_status_flags(irq,
3410 						     QDF_IRQ_DISABLE_UNLAZY);
3411 
3412 		hif_debug("request_irq = %d for grp %d",
3413 			  irq, hif_ext_group->grp_id);
3414 
3415 		qdf_scnprintf(dp_legacy_irqname[pci_slot][hif_ext_group->irq[j]],
3416 			      DP_IRQ_NAME_LEN, "pci%u_%s", pci_slot,
3417 			      legacy_ic_irqname[hif_ext_group->irq[j]]);
3418 
3419 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
3420 				       hif_ext_group_interrupt_handler,
3421 				       IRQF_SHARED | IRQF_NO_SUSPEND,
3422 				       dp_legacy_irqname[pci_slot][hif_ext_group->irq[j]],
3423 				       hif_ext_group);
3424 		if (ret) {
3425 			hif_err("request_irq failed ret = %d", ret);
3426 			return -EFAULT;
3427 		}
3428 		hif_ext_group->os_irq[j] = irq;
3429 	}
3430 	hif_ext_group->irq_requested = true;
3431 	return 0;
3432 }
3433 #else
3434 /**
3435  * hif_grp_configure_legacyirq() - Configure DP interrupts
3436  * @scn: hif_softc pointer
3437  * @hif_ext_group: hif extended group pointer
3438  *
3439  * Configure DP legacy interrupts
3440  *
3441  * Return: int
3442  */
3443 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3444 				       struct hif_exec_context *hif_ext_group)
3445 {
3446 	return 0;
3447 }
3448 #endif
3449 
3450 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3451 			      struct hif_exec_context *hif_ext_group)
3452 {
3453 	int ret = 0;
3454 	int irq = 0;
3455 	int j;
3456 	int pci_slot;
3457 	unsigned long irq_flags;
3458 
3459 	if (pld_get_enable_intx(scn->qdf_dev->dev))
3460 		return hif_grp_configure_legacyirq(scn, hif_ext_group);
3461 
3462 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3463 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3464 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3465 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3466 
3467 	pci_slot = hif_get_pci_slot(scn);
3468 	for (j = 0; j < hif_ext_group->numirq; j++) {
3469 		irq = hif_ext_group->irq[j];
3470 		if (scn->irq_unlazy_disable)
3471 			qdf_dev_set_irq_status_flags(irq,
3472 						     QDF_IRQ_DISABLE_UNLAZY);
3473 
3474 		if (pld_is_one_msi(scn->qdf_dev->dev))
3475 			irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
3476 		else
3477 			irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
3478 		hif_debug("request_irq = %d for grp %d irq_flags 0x%lx",
3479 			  irq, hif_ext_group->grp_id, irq_flags);
3480 
3481 		qdf_scnprintf(dp_irqname[pci_slot][hif_ext_group->grp_id],
3482 			      DP_IRQ_NAME_LEN, "pci%u_wlan_grp_dp_%u",
3483 			      pci_slot, hif_ext_group->grp_id);
3484 		ret = pfrm_request_irq(
3485 				scn->qdf_dev->dev, irq,
3486 				hif_ext_group_interrupt_handler,
3487 				irq_flags,
3488 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3489 				hif_ext_group);
3490 		if (ret) {
3491 			hif_err("request_irq failed ret = %d", ret);
3492 			return -EFAULT;
3493 		}
3494 		hif_ext_group->os_irq[j] = irq;
3495 	}
3496 	hif_ext_group->irq_requested = true;
3497 	return 0;
3498 }
3499 
3500 #ifdef FEATURE_IRQ_AFFINITY
3501 void hif_pci_set_grp_intr_affinity(struct hif_softc *scn,
3502 				   uint32_t grp_intr_bitmask, bool perf)
3503 {
3504 	int i;
3505 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3506 	struct hif_exec_context *hif_ext_group;
3507 
3508 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3509 		if (!(grp_intr_bitmask & BIT(i)))
3510 			continue;
3511 
3512 		hif_ext_group = hif_state->hif_ext_group[i];
3513 		hif_pci_irq_set_affinity_hint(hif_ext_group, perf);
3514 		qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3515 	}
3516 }
3517 #endif
3518 
3519 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
3520 	defined(QCA_WIFI_KIWI))
3521 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3522 			    uint32_t offset)
3523 {
3524 	return hal_read32_mb(hif_sc->hal_soc, offset);
3525 }
3526 
3527 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3528 			 uint32_t offset,
3529 			 uint32_t value)
3530 {
3531 	hal_write32_mb(hif_sc->hal_soc, offset, value);
3532 }
3533 #else
3534 /* TODO: Need to implement other chips carefully */
3535 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3536 			    uint32_t offset)
3537 {
3538 	return 0;
3539 }
3540 
3541 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3542 			 uint32_t offset,
3543 			 uint32_t value)
3544 {
3545 }
3546 #endif
3547 
3548 /**
3549  * hif_configure_irq() - configure interrupt
3550  * @scn: HIF context
3551  *
3552  * This function configures interrupt(s)
3553  *
3554  * Return: 0 - for success
3555  */
3556 int hif_configure_irq(struct hif_softc *scn)
3557 {
3558 	int ret = 0;
3559 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3560 
3561 	hif_info("E");
3562 
3563 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3564 		scn->request_irq_done = false;
3565 		return 0;
3566 	}
3567 
3568 	hif_init_reschedule_tasklet_work(sc);
3569 
3570 	ret = hif_ce_msi_configure_irq(scn);
3571 	if (ret == 0) {
3572 		goto end;
3573 	}
3574 
3575 	switch (scn->target_info.target_type) {
3576 	case TARGET_TYPE_QCA8074:
3577 	case TARGET_TYPE_QCA8074V2:
3578 	case TARGET_TYPE_QCA6018:
3579 	case TARGET_TYPE_QCA5018:
3580 	case TARGET_TYPE_QCA5332:
3581 	case TARGET_TYPE_QCA9574:
3582 	case TARGET_TYPE_QCN9160:
3583 		ret = hif_ahb_configure_irq(sc);
3584 		break;
3585 	case TARGET_TYPE_QCN9224:
3586 		ret = hif_ce_configure_legacyirq(scn);
3587 		break;
3588 	default:
3589 		ret = hif_pci_configure_legacy_irq(sc);
3590 		break;
3591 	}
3592 	if (ret < 0) {
3593 		hif_err("error = %d", ret);
3594 		return ret;
3595 	}
3596 end:
3597 	scn->request_irq_done = true;
3598 	return 0;
3599 }
3600 
3601 /**
3602  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3603  * @scn: hif control structure
3604  *
3605  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3606  * stuck at a polling loop in pcie_address_config in FW
3607  *
3608  * Return: none
3609  */
3610 static void hif_trigger_timer_irq(struct hif_softc *scn)
3611 {
3612 	int tmp;
3613 	/* Trigger IRQ on Peregrine/Swift by setting
3614 	 * IRQ Bit of LF_TIMER 0
3615 	 */
3616 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3617 						SOC_LF_TIMER_STATUS0_ADDRESS));
3618 	/* Set Raw IRQ Bit */
3619 	tmp |= 1;
3620 	/* SOC_LF_TIMER_STATUS0 */
3621 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3622 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3623 }
3624 
3625 /**
3626  * hif_target_sync() : ensure the target is ready
3627  * @scn: hif control structure
3628  *
3629  * Informs fw that we plan to use legacy interrupts so that
3630  * it can begin booting. Ensures that the fw finishes booting
3631  * before continuing. Should be called before trying to write
3632  * to the targets other registers for the first time.
3633  *
3634  * Return: none
3635  */
3636 static void hif_target_sync(struct hif_softc *scn)
3637 {
3638 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3639 			    PCIE_INTR_ENABLE_ADDRESS),
3640 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3641 	/* read to flush pcie write */
3642 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3643 			PCIE_INTR_ENABLE_ADDRESS));
3644 
3645 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3646 			PCIE_SOC_WAKE_ADDRESS,
3647 			PCIE_SOC_WAKE_V_MASK);
3648 	while (!hif_targ_is_awake(scn, scn->mem))
3649 		;
3650 
3651 	if (HAS_FW_INDICATOR) {
3652 		int wait_limit = 500;
3653 		int fw_ind = 0;
3654 		int retry_count = 0;
3655 		uint32_t target_type = scn->target_info.target_type;
3656 fw_retry:
3657 		hif_info("Loop checking FW signal");
3658 		while (1) {
3659 			fw_ind = hif_read32_mb(scn, scn->mem +
3660 					FW_INDICATOR_ADDRESS);
3661 			if (fw_ind & FW_IND_INITIALIZED)
3662 				break;
3663 			if (wait_limit-- < 0)
3664 				break;
3665 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3666 			    PCIE_INTR_ENABLE_ADDRESS),
3667 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3668 			    /* read to flush pcie write */
3669 			(void)hif_read32_mb(scn, scn->mem +
3670 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3671 
3672 			qdf_mdelay(10);
3673 		}
3674 		if (wait_limit < 0) {
3675 			if (target_type == TARGET_TYPE_AR9888 &&
3676 			    retry_count++ < 2) {
3677 				hif_trigger_timer_irq(scn);
3678 				wait_limit = 500;
3679 				goto fw_retry;
3680 			}
3681 			hif_info("FW signal timed out");
3682 			qdf_assert_always(0);
3683 		} else {
3684 			hif_info("Got FW signal, retries = %x", 500-wait_limit);
3685 		}
3686 	}
3687 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3688 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3689 }
3690 
3691 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3692 				     struct device *dev)
3693 {
3694 	struct pld_soc_info info;
3695 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3696 
3697 	pld_get_soc_info(dev, &info);
3698 	sc->mem = info.v_addr;
3699 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3700 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3701 	sc->device_version.family_number = info.device_version.family_number;
3702 	sc->device_version.device_number = info.device_version.device_number;
3703 	sc->device_version.major_version = info.device_version.major_version;
3704 	sc->device_version.minor_version = info.device_version.minor_version;
3705 
3706 	hif_info("%s: fam num %u dev ver %u maj ver %u min ver %u\n", __func__,
3707 		 sc->device_version.family_number,
3708 		 sc->device_version.device_number,
3709 		 sc->device_version.major_version,
3710 		 sc->device_version.minor_version);
3711 
3712 	/* dev_mem_info[0] is for CMEM */
3713 	scn->cmem_start = info.dev_mem_info[0].start;
3714 	scn->cmem_size = info.dev_mem_info[0].size;
3715 	scn->target_info.target_version = info.soc_id;
3716 	scn->target_info.target_revision = 0;
3717 	scn->target_info.soc_version = info.device_version.major_version;
3718 }
3719 
3720 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3721 				       struct device *dev)
3722 {}
3723 
3724 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3725 				    int device_id)
3726 {
3727 	if (!pld_have_platform_driver_support(sc->dev))
3728 		return false;
3729 
3730 	switch (device_id) {
3731 	case QCA6290_DEVICE_ID:
3732 	case QCN9000_DEVICE_ID:
3733 	case QCN9224_DEVICE_ID:
3734 	case QCA6290_EMULATION_DEVICE_ID:
3735 	case QCA6390_DEVICE_ID:
3736 	case QCA6490_DEVICE_ID:
3737 	case AR6320_DEVICE_ID:
3738 	case QCN7605_DEVICE_ID:
3739 	case KIWI_DEVICE_ID:
3740 	case MANGO_DEVICE_ID:
3741 	case PEACH_DEVICE_ID:
3742 		return true;
3743 	}
3744 	return false;
3745 }
3746 
3747 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3748 					   int device_id)
3749 {
3750 	if (hif_is_pld_based_target(sc, device_id)) {
3751 		sc->hif_enable_pci = hif_enable_pci_pld;
3752 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3753 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3754 	} else {
3755 		sc->hif_enable_pci = hif_enable_pci_nopld;
3756 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3757 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3758 	}
3759 }
3760 
3761 #ifdef HIF_REG_WINDOW_SUPPORT
3762 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3763 					       u32 target_type)
3764 {
3765 	switch (target_type) {
3766 	case TARGET_TYPE_QCN7605:
3767 	case TARGET_TYPE_QCA6490:
3768 	case TARGET_TYPE_QCA6390:
3769 	case TARGET_TYPE_KIWI:
3770 	case TARGET_TYPE_MANGO:
3771 	case TARGET_TYPE_PEACH:
3772 		sc->use_register_windowing = true;
3773 		qdf_spinlock_create(&sc->register_access_lock);
3774 		sc->register_window = 0;
3775 		break;
3776 	default:
3777 		sc->use_register_windowing = false;
3778 	}
3779 }
3780 #else
3781 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3782 					       u32 target_type)
3783 {
3784 	sc->use_register_windowing = false;
3785 }
3786 #endif
3787 
3788 /**
3789  * hif_pci_enable_bus(): enable bus
3790  * @ol_sc: soft_sc struct
3791  * @dev: device pointer
3792  * @bdev: bus dev pointer
3793  * @bid: bus id pointer
3794  * @type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3795  *
3796  * This function enables the bus
3797  *
3798  * Return: QDF_STATUS
3799  */
3800 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3801 			  struct device *dev, void *bdev,
3802 			  const struct hif_bus_id *bid,
3803 			  enum hif_enable_type type)
3804 {
3805 	int ret = 0;
3806 	uint32_t hif_type;
3807 	uint32_t target_type = TARGET_TYPE_UNKNOWN;
3808 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3809 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3810 	uint16_t revision_id = 0;
3811 	int probe_again = 0;
3812 	struct pci_dev *pdev = bdev;
3813 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3814 	struct hif_target_info *tgt_info;
3815 
3816 	if (!ol_sc) {
3817 		hif_err("hif_ctx is NULL");
3818 		return QDF_STATUS_E_NOMEM;
3819 	}
3820 	/* Following print is used by various tools to identify
3821 	 * WLAN SOC (e.g. crash dump analysis and reporting tool).
3822 	 */
3823 	hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
3824 		 hif_get_conparam(ol_sc), id->device);
3825 
3826 	sc->pdev = pdev;
3827 	sc->dev = &pdev->dev;
3828 	sc->devid = id->device;
3829 	sc->cacheline_sz = dma_get_cache_alignment();
3830 	tgt_info = hif_get_target_info_handle(hif_hdl);
3831 	hif_pci_init_deinit_ops_attach(sc, id->device);
3832 	sc->hif_pci_get_soc_info(sc, dev);
3833 again:
3834 	ret = sc->hif_enable_pci(sc, pdev, id);
3835 	if (ret < 0) {
3836 		hif_err("hif_enable_pci error = %d", ret);
3837 		goto err_enable_pci;
3838 	}
3839 	hif_info("hif_enable_pci done");
3840 
3841 	/* Temporary FIX: disable ASPM on peregrine.
3842 	 * Will be removed after the OTP is programmed
3843 	 */
3844 	hif_disable_power_gating(hif_hdl);
3845 
3846 	device_disable_async_suspend(&pdev->dev);
3847 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3848 
3849 	ret = hif_get_device_type(id->device, revision_id,
3850 						&hif_type, &target_type);
3851 	if (ret < 0) {
3852 		hif_err("Invalid device id/revision_id");
3853 		goto err_tgtstate;
3854 	}
3855 	hif_info("hif_type = 0x%x, target_type = 0x%x",
3856 		hif_type, target_type);
3857 
3858 	hif_register_tbl_attach(ol_sc, hif_type);
3859 	hif_target_register_tbl_attach(ol_sc, target_type);
3860 
3861 	hif_pci_init_reg_windowing_support(sc, target_type);
3862 
3863 	tgt_info->target_type = target_type;
3864 
3865 	/*
3866 	 * Disable unlzay interrupt registration for QCN9000
3867 	 */
3868 	if (target_type == TARGET_TYPE_QCN9000 ||
3869 	    target_type == TARGET_TYPE_QCN9224)
3870 		ol_sc->irq_unlazy_disable = 1;
3871 
3872 	if (ce_srng_based(ol_sc)) {
3873 		hif_info("Skip tgt_wake up for srng devices");
3874 	} else {
3875 		ret = hif_pci_probe_tgt_wakeup(sc);
3876 		if (ret < 0) {
3877 			hif_err("hif_pci_prob_wakeup error = %d", ret);
3878 			if (ret == -EAGAIN)
3879 				probe_again++;
3880 			goto err_tgtstate;
3881 		}
3882 		hif_info("hif_pci_probe_tgt_wakeup done");
3883 	}
3884 
3885 	if (!ol_sc->mem_pa) {
3886 		hif_err("BAR0 uninitialized");
3887 		ret = -EIO;
3888 		goto err_tgtstate;
3889 	}
3890 
3891 	if (!ce_srng_based(ol_sc)) {
3892 		hif_target_sync(ol_sc);
3893 
3894 		if (hif_pci_default_link_up(tgt_info))
3895 			hif_vote_link_up(hif_hdl);
3896 	}
3897 
3898 	return QDF_STATUS_SUCCESS;
3899 
3900 err_tgtstate:
3901 	hif_disable_pci(sc);
3902 	sc->pci_enabled = false;
3903 	hif_err("hif_disable_pci done");
3904 	return QDF_STATUS_E_ABORTED;
3905 
3906 err_enable_pci:
3907 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3908 		int delay_time;
3909 
3910 		hif_info("pci reprobe");
3911 		/* 10, 40, 90, 100, 100, ... */
3912 		delay_time = max(100, 10 * (probe_again * probe_again));
3913 		qdf_mdelay(delay_time);
3914 		goto again;
3915 	}
3916 	return qdf_status_from_os_return(ret);
3917 }
3918 
3919 /**
3920  * hif_pci_irq_enable() - ce_irq_enable
3921  * @scn: hif_softc
3922  * @ce_id: ce_id
3923  *
3924  * Return: void
3925  */
3926 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3927 {
3928 	uint32_t tmp = 1 << ce_id;
3929 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3930 
3931 	qdf_spin_lock_irqsave(&sc->irq_lock);
3932 	scn->ce_irq_summary &= ~tmp;
3933 	if (scn->ce_irq_summary == 0) {
3934 		/* Enable Legacy PCI line interrupts */
3935 		if (LEGACY_INTERRUPTS(sc) &&
3936 			(scn->target_status != TARGET_STATUS_RESET) &&
3937 			(!qdf_atomic_read(&scn->link_suspended))) {
3938 
3939 			hif_write32_mb(scn, scn->mem +
3940 				(SOC_CORE_BASE_ADDRESS |
3941 				PCIE_INTR_ENABLE_ADDRESS),
3942 				HOST_GROUP0_MASK);
3943 
3944 			hif_read32_mb(scn, scn->mem +
3945 					(SOC_CORE_BASE_ADDRESS |
3946 					PCIE_INTR_ENABLE_ADDRESS));
3947 		}
3948 	}
3949 	if (scn->hif_init_done == true)
3950 		Q_TARGET_ACCESS_END(scn);
3951 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3952 
3953 	/* check for missed firmware crash */
3954 	hif_fw_interrupt_handler(0, scn);
3955 }
3956 
3957 /**
3958  * hif_pci_irq_disable() - ce_irq_disable
3959  * @scn: hif_softc
3960  * @ce_id: ce_id
3961  *
3962  * only applicable to legacy copy engine...
3963  *
3964  * Return: void
3965  */
3966 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3967 {
3968 	/* For Rome only need to wake up target */
3969 	/* target access is maintained until interrupts are re-enabled */
3970 	Q_TARGET_ACCESS_BEGIN(scn);
3971 }
3972 
3973 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3974 {
3975 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3976 
3977 	/* legacy case only has one irq */
3978 	return pci_scn->irq;
3979 }
3980 
3981 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
3982 {
3983 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3984 	struct hif_target_info *tgt_info;
3985 
3986 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
3987 
3988 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
3989 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
3990 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
3991 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
3992 	    tgt_info->target_type == TARGET_TYPE_QCA8074 ||
3993 	    tgt_info->target_type == TARGET_TYPE_KIWI ||
3994 	    tgt_info->target_type == TARGET_TYPE_MANGO ||
3995 	    tgt_info->target_type == TARGET_TYPE_PEACH) {
3996 		/*
3997 		 * Need to consider offset's memtype for QCA6290/QCA8074,
3998 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
3999 		 * well initialized/defined.
4000 		 */
4001 		return 0;
4002 	}
4003 
4004 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4005 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4006 		return 0;
4007 	}
4008 
4009 	hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
4010 		offset, (uint32_t)(offset + sizeof(unsigned int)),
4011 		sc->mem_len);
4012 
4013 	return -EINVAL;
4014 }
4015 
4016 /**
4017  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4018  * @scn: hif context
4019  *
4020  * Return: true if soc needs driver bmi otherwise false
4021  */
4022 bool hif_pci_needs_bmi(struct hif_softc *scn)
4023 {
4024 	return !ce_srng_based(scn);
4025 }
4026 
4027 #ifdef FORCE_WAKE
4028 #if defined(DEVICE_FORCE_WAKE_ENABLE) && !defined(CONFIG_PLD_PCIE_FW_SIM)
4029 
4030 /*
4031  * HIF_POLL_UMAC_WAKE poll value to indicate if UMAC is powered up
4032  * Update the below macro with FW defined one.
4033  */
4034 #define HIF_POLL_UMAC_WAKE 0x2
4035 
4036 static inline int hif_soc_wake_request(struct hif_opaque_softc *hif_handle)
4037 {
4038 	uint32_t timeout, value;
4039 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4040 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4041 
4042 	qdf_spin_lock_bh(&pci_scn->force_wake_lock);
4043 	if ((qdf_atomic_inc_return(&scn->active_wake_req_cnt) > 1)) {
4044 		qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4045 		return 0;
4046 	}
4047 
4048 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 1);
4049 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
4050 	/*
4051 	 * do not reset the timeout
4052 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
4053 	 */
4054 	timeout = 0;
4055 	do {
4056 		value = hif_read32_mb(
4057 				scn, scn->mem +
4058 				PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
4059 		if (value == HIF_POLL_UMAC_WAKE)
4060 			break;
4061 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
4062 		timeout += FORCE_WAKE_DELAY_MS;
4063 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
4064 
4065 	if (value != HIF_POLL_UMAC_WAKE) {
4066 		hif_err("force wake handshake failed, reg value = 0x%x",
4067 			value);
4068 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
4069 		qdf_atomic_dec(&scn->active_wake_req_cnt);
4070 		qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4071 		return -ETIMEDOUT;
4072 	}
4073 
4074 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
4075 	qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4076 	return 0;
4077 }
4078 
4079 static inline void hif_soc_wake_release(struct hif_opaque_softc *hif_handle)
4080 {
4081 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4082 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4083 
4084 	qdf_spin_lock_bh(&pci_scn->force_wake_lock);
4085 	if (!qdf_atomic_dec_and_test(&scn->active_wake_req_cnt)) {
4086 		qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4087 		return;
4088 	}
4089 
4090 	/* Release umac force wake */
4091 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 0);
4092 	qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4093 }
4094 
4095 /**
4096  * hif_force_wake_request(): Enable the force wake recipe
4097  * @hif_handle: HIF handle
4098  *
4099  * Bring MHI to M0 state and force wake the UMAC by asserting the
4100  * soc wake reg. Poll the scratch reg to check if its set to
4101  * HIF_POLL_UMAC_WAKE. The polled value may return 0x1 in case UMAC
4102  * is powered down.
4103  *
4104  * Return: 0 if handshake is successful or ETIMEDOUT in case of failure
4105  */
4106 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4107 {
4108 	uint32_t timeout;
4109 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4110 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4111 	int ret, status = 0;
4112 
4113 	/* Prevent runtime PM or trigger resume firstly */
4114 	if (hif_rtpm_get(HIF_RTPM_GET_SYNC, HIF_RTPM_ID_FORCE_WAKE)) {
4115 		hif_err("runtime pm get failed");
4116 		return -EINVAL;
4117 	}
4118 
4119 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4120 	if (qdf_in_interrupt())
4121 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4122 	else
4123 		timeout = 0;
4124 
4125 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4126 	if (ret) {
4127 		hif_err("force wake request(timeout %u) send failed: %d",
4128 			timeout, ret);
4129 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4130 		status = -EINVAL;
4131 		goto release_rtpm_ref;
4132 	}
4133 
4134 	/* If device's M1 state-change event races here, it can be ignored,
4135 	 * as the device is expected to immediately move from M2 to M0
4136 	 * without entering low power state.
4137 	 */
4138 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4139 		hif_info("state-change event races, ignore");
4140 
4141 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4142 
4143 	ret = hif_soc_wake_request(hif_handle);
4144 	if (ret) {
4145 		hif_err("soc force wake failed: %d", ret);
4146 		status = ret;
4147 		goto release_mhi_wake;
4148 	}
4149 	return 0;
4150 
4151 release_mhi_wake:
4152 	/* Release MHI force wake */
4153 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4154 	if (ret) {
4155 		hif_err("pld force wake release failure");
4156 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4157 		return ret;
4158 	}
4159 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4160 release_rtpm_ref:
4161 	/* Release runtime PM force wake */
4162 	ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4163 	if (ret) {
4164 		hif_err("runtime pm put failure: %d", ret);
4165 		return ret;
4166 	}
4167 
4168 	return status;
4169 }
4170 
4171 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4172 {
4173 	int ret;
4174 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4175 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4176 
4177 	hif_soc_wake_release(hif_handle);
4178 
4179 	/* Release MHI force wake */
4180 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4181 	if (ret) {
4182 		hif_err("pld force wake release failure");
4183 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4184 		return ret;
4185 	}
4186 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4187 
4188 	/* Release runtime PM force wake */
4189 	ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4190 	if (ret) {
4191 		hif_err("runtime pm put failure");
4192 		return ret;
4193 	}
4194 
4195 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
4196 	return 0;
4197 }
4198 
4199 #else /* DEVICE_FORCE_WAKE_ENABLE */
4200 /** hif_force_wake_request() - Disable the PCIE scratch register
4201  * write/read
4202  *
4203  * Return: 0
4204  */
4205 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4206 {
4207 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4208 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4209 	uint32_t timeout;
4210 	int ret;
4211 
4212 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4213 
4214 	if (qdf_in_interrupt())
4215 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4216 	else
4217 		timeout = 0;
4218 
4219 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4220 	if (ret) {
4221 		hif_err("force wake request(timeout %u) send failed: %d",
4222 			timeout, ret);
4223 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4224 		return -EINVAL;
4225 	}
4226 
4227 	/* If device's M1 state-change event races here, it can be ignored,
4228 	 * as the device is expected to immediately move from M2 to M0
4229 	 * without entering low power state.
4230 	 */
4231 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4232 		hif_info("state-change event races, ignore");
4233 
4234 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4235 
4236 	return 0;
4237 }
4238 
4239 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4240 {
4241 	int ret;
4242 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4243 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4244 
4245 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4246 	if (ret) {
4247 		hif_err("force wake release failure");
4248 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4249 		return ret;
4250 	}
4251 
4252 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4253 	return 0;
4254 }
4255 #endif /* DEVICE_FORCE_WAKE_ENABLE */
4256 
4257 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
4258 {
4259 	hif_debug("mhi_force_wake_request_vote: %d",
4260 		  pci_handle->stats.mhi_force_wake_request_vote);
4261 	hif_debug("mhi_force_wake_failure: %d",
4262 		  pci_handle->stats.mhi_force_wake_failure);
4263 	hif_debug("mhi_force_wake_success: %d",
4264 		  pci_handle->stats.mhi_force_wake_success);
4265 	hif_debug("soc_force_wake_register_write_success: %d",
4266 		  pci_handle->stats.soc_force_wake_register_write_success);
4267 	hif_debug("soc_force_wake_failure: %d",
4268 		  pci_handle->stats.soc_force_wake_failure);
4269 	hif_debug("soc_force_wake_success: %d",
4270 		  pci_handle->stats.soc_force_wake_success);
4271 	hif_debug("mhi_force_wake_release_failure: %d",
4272 		  pci_handle->stats.mhi_force_wake_release_failure);
4273 	hif_debug("mhi_force_wake_release_success: %d",
4274 		  pci_handle->stats.mhi_force_wake_release_success);
4275 	hif_debug("oc_force_wake_release_success: %d",
4276 		  pci_handle->stats.soc_force_wake_release_success);
4277 }
4278 #endif /* FORCE_WAKE */
4279 
4280 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
4281 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
4282 {
4283 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4284 }
4285 
4286 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
4287 {
4288 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4289 }
4290 #endif
4291 
4292 #ifdef IPA_OPT_WIFI_DP
4293 int hif_prevent_l1(struct hif_opaque_softc *hif)
4294 {
4295 	struct hif_softc *hif_softc = (struct hif_softc *)hif;
4296 	int status;
4297 
4298 	status = hif_force_wake_request(hif);
4299 	if (status) {
4300 		hif_err("Force wake request error");
4301 		return status;
4302 	}
4303 
4304 	qdf_atomic_inc(&hif_softc->opt_wifi_dp_rtpm_cnt);
4305 	hif_info("opt_dp: pcie link up count %d",
4306 		 qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt));
4307 	return status;
4308 }
4309 
4310 void hif_allow_l1(struct hif_opaque_softc *hif)
4311 {
4312 	struct hif_softc *hif_softc = (struct hif_softc *)hif;
4313 	int status;
4314 
4315 	if (qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt) > 0) {
4316 		status = hif_force_wake_release(hif);
4317 		if (status) {
4318 			hif_err("Force wake release error");
4319 			return;
4320 		}
4321 
4322 		qdf_atomic_dec(&hif_softc->opt_wifi_dp_rtpm_cnt);
4323 		hif_info("opt_dp: pcie link down count %d",
4324 			 qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt));
4325 	}
4326 }
4327 #endif
4328