xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 8cfe6b10058a04cafb17eed051f2ddf11bee8931)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_arp.h>
24 #include <linux/of_pci.h>
25 #include <linux/version.h>
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "qdf_platform.h"
43 #include "pld_common.h"
44 #include "mp_dev.h"
45 #include "hif_debug.h"
46 
47 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
48 char *legacy_ic_irqname[] = {
49 "ce0",
50 "ce1",
51 "ce2",
52 "ce3",
53 "ce4",
54 "ce5",
55 "ce6",
56 "ce7",
57 "ce8",
58 "ce9",
59 "ce10",
60 "ce11",
61 "ce12",
62 "ce13",
63 "ce14",
64 "ce15",
65 "reo2sw8_intr2",
66 "reo2sw7_intr2",
67 "reo2sw6_intr2",
68 "reo2sw5_intr2",
69 "reo2sw4_intr2",
70 "reo2sw3_intr2",
71 "reo2sw2_intr2",
72 "reo2sw1_intr2",
73 "reo2sw0_intr2",
74 "reo2sw8_intr",
75 "reo2sw7_intr",
76 "reo2sw6_inrr",
77 "reo2sw5_intr",
78 "reo2sw4_intr",
79 "reo2sw3_intr",
80 "reo2sw2_intr",
81 "reo2sw1_intr",
82 "reo2sw0_intr",
83 "reo2status_intr2",
84 "reo_status",
85 "reo2rxdma_out_2",
86 "reo2rxdma_out_1",
87 "reo_cmd",
88 "sw2reo6",
89 "sw2reo5",
90 "sw2reo1",
91 "sw2reo",
92 "rxdma2reo_mlo_0_dst_ring1",
93 "rxdma2reo_mlo_0_dst_ring0",
94 "rxdma2reo_mlo_1_dst_ring1",
95 "rxdma2reo_mlo_1_dst_ring0",
96 "rxdma2reo_dst_ring1",
97 "rxdma2reo_dst_ring0",
98 "rxdma2sw_dst_ring1",
99 "rxdma2sw_dst_ring0",
100 "rxdma2release_dst_ring1",
101 "rxdma2release_dst_ring0",
102 "sw2rxdma_2_src_ring",
103 "sw2rxdma_1_src_ring",
104 "sw2rxdma_0",
105 "wbm2sw6_release2",
106 "wbm2sw5_release2",
107 "wbm2sw4_release2",
108 "wbm2sw3_release2",
109 "wbm2sw2_release2",
110 "wbm2sw1_release2",
111 "wbm2sw0_release2",
112 "wbm2sw6_release",
113 "wbm2sw5_release",
114 "wbm2sw4_release",
115 "wbm2sw3_release",
116 "wbm2sw2_release",
117 "wbm2sw1_release",
118 "wbm2sw0_release",
119 "wbm2sw_link",
120 "wbm_error_release",
121 "sw2txmon_src_ring",
122 "sw2rxmon_src_ring",
123 "txmon2sw_p1_intr1",
124 "txmon2sw_p1_intr0",
125 "txmon2sw_p0_dest1",
126 "txmon2sw_p0_dest0",
127 "rxmon2sw_p1_intr1",
128 "rxmon2sw_p1_intr0",
129 "rxmon2sw_p0_dest1",
130 "rxmon2sw_p0_dest0",
131 "sw_release",
132 "sw2tcl_credit2",
133 "sw2tcl_credit",
134 "sw2tcl4",
135 "sw2tcl5",
136 "sw2tcl3",
137 "sw2tcl2",
138 "sw2tcl1",
139 "sw2wbm1",
140 "misc_8",
141 "misc_7",
142 "misc_6",
143 "misc_5",
144 "misc_4",
145 "misc_3",
146 "misc_2",
147 "misc_1",
148 "misc_0",
149 };
150 #endif
151 
152 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
153 	defined(QCA_WIFI_KIWI))
154 #include "hal_api.h"
155 #endif
156 
157 #include "if_pci_internal.h"
158 #include "ce_tasklet.h"
159 #include "targaddrs.h"
160 #include "hif_exec.h"
161 
162 #include "pci_api.h"
163 #include "ahb_api.h"
164 #include "wlan_cfg.h"
165 #include "qdf_hang_event_notifier.h"
166 #include "qdf_platform.h"
167 #include "qal_devnode.h"
168 #include "qdf_irq.h"
169 
170 /* Maximum ms timeout for host to wake up target */
171 #define PCIE_WAKE_TIMEOUT 1000
172 #define RAMDUMP_EVENT_TIMEOUT 2500
173 
174 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
175  * PCIe data bus error
176  * As workaround for this issue - changing the reset sequence to
177  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
178  */
179 #define CPU_WARM_RESET_WAR
180 #define WLAN_CFG_MAX_PCIE_GROUPS 5
181 #ifdef QCA_WIFI_QCN9224
182 #define WLAN_CFG_MAX_CE_COUNT 16
183 #else
184 #define WLAN_CFG_MAX_CE_COUNT 12
185 #endif
186 #define DP_IRQ_NAME_LEN 25
187 char dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS][DP_IRQ_NAME_LEN] = {};
188 char ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT][DP_IRQ_NAME_LEN] = {};
189 
190 static inline int hif_get_pci_slot(struct hif_softc *scn)
191 {
192 	int pci_slot = pld_get_pci_slot(scn->qdf_dev->dev);
193 
194 	if (pci_slot < 0) {
195 		hif_err("Invalid PCI SLOT %d", pci_slot);
196 		qdf_assert_always(0);
197 		return 0;
198 	} else {
199 		return pci_slot;
200 	}
201 }
202 
203 /*
204  * Top-level interrupt handler for all PCI interrupts from a Target.
205  * When a block of MSI interrupts is allocated, this top-level handler
206  * is not used; instead, we directly call the correct sub-handler.
207  */
208 struct ce_irq_reg_table {
209 	uint32_t irq_enable;
210 	uint32_t irq_status;
211 };
212 
213 #ifndef QCA_WIFI_3_0_ADRASTEA
214 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
215 {
216 }
217 #else
218 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
219 {
220 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
221 	unsigned int target_enable0, target_enable1;
222 	unsigned int target_cause0, target_cause1;
223 
224 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
225 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
226 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
227 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
228 
229 	if ((target_enable0 & target_cause0) ||
230 	    (target_enable1 & target_cause1)) {
231 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
232 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
233 
234 		if (scn->notice_send)
235 			pld_intr_notify_q6(sc->dev);
236 	}
237 }
238 #endif
239 
240 
241 /**
242  * pci_dispatch_interrupt() - PCI interrupt dispatcher
243  * @scn: scn
244  *
245  * Return: N/A
246  */
247 static void pci_dispatch_interrupt(struct hif_softc *scn)
248 {
249 	uint32_t intr_summary;
250 	int id;
251 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
252 
253 	if (scn->hif_init_done != true)
254 		return;
255 
256 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
257 		return;
258 
259 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
260 
261 	if (intr_summary == 0) {
262 		if ((scn->target_status != TARGET_STATUS_RESET) &&
263 			(!qdf_atomic_read(&scn->link_suspended))) {
264 
265 			hif_write32_mb(scn, scn->mem +
266 				(SOC_CORE_BASE_ADDRESS |
267 				PCIE_INTR_ENABLE_ADDRESS),
268 				HOST_GROUP0_MASK);
269 
270 			hif_read32_mb(scn, scn->mem +
271 					(SOC_CORE_BASE_ADDRESS |
272 					PCIE_INTR_ENABLE_ADDRESS));
273 		}
274 		Q_TARGET_ACCESS_END(scn);
275 		return;
276 	}
277 	Q_TARGET_ACCESS_END(scn);
278 
279 	scn->ce_irq_summary = intr_summary;
280 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
281 		if (intr_summary & (1 << id)) {
282 			intr_summary &= ~(1 << id);
283 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
284 		}
285 	}
286 }
287 
288 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
289 {
290 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
291 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
292 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
293 
294 	volatile int tmp;
295 	uint16_t val = 0;
296 	uint32_t bar0 = 0;
297 	uint32_t fw_indicator_address, fw_indicator;
298 	bool ssr_irq = false;
299 	unsigned int host_cause, host_enable;
300 
301 	if (LEGACY_INTERRUPTS(sc)) {
302 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
303 			return IRQ_HANDLED;
304 
305 		if (ADRASTEA_BU) {
306 			host_enable = hif_read32_mb(sc, sc->mem +
307 						    PCIE_INTR_ENABLE_ADDRESS);
308 			host_cause = hif_read32_mb(sc, sc->mem +
309 						   PCIE_INTR_CAUSE_ADDRESS);
310 			if (!(host_enable & host_cause)) {
311 				hif_pci_route_adrastea_interrupt(sc);
312 				return IRQ_HANDLED;
313 			}
314 		}
315 
316 		/* Clear Legacy PCI line interrupts
317 		 * IMPORTANT: INTR_CLR register has to be set
318 		 * after INTR_ENABLE is set to 0,
319 		 * otherwise interrupt can not be really cleared
320 		 */
321 		hif_write32_mb(sc, sc->mem +
322 			      (SOC_CORE_BASE_ADDRESS |
323 			       PCIE_INTR_ENABLE_ADDRESS), 0);
324 
325 		hif_write32_mb(sc, sc->mem +
326 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
327 			       ADRASTEA_BU ?
328 			       (host_enable & host_cause) :
329 			      HOST_GROUP0_MASK);
330 
331 		if (ADRASTEA_BU)
332 			hif_write32_mb(sc, sc->mem + 0x2f100c,
333 				       (host_cause >> 1));
334 
335 		/* IMPORTANT: this extra read transaction is required to
336 		 * flush the posted write buffer
337 		 */
338 		if (!ADRASTEA_BU) {
339 		tmp =
340 			hif_read32_mb(sc, sc->mem +
341 				     (SOC_CORE_BASE_ADDRESS |
342 				      PCIE_INTR_ENABLE_ADDRESS));
343 
344 		if (tmp == 0xdeadbeef) {
345 			hif_err("SoC returns 0xdeadbeef!!");
346 
347 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
348 			hif_err("PCI Vendor ID = 0x%04x", val);
349 
350 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
351 			hif_err("PCI Device ID = 0x%04x", val);
352 
353 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
354 			hif_err("PCI Command = 0x%04x", val);
355 
356 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
357 			hif_err("PCI Status = 0x%04x", val);
358 
359 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
360 					      &bar0);
361 			hif_err("PCI BAR0 = 0x%08x", bar0);
362 
363 			hif_err("RTC_STATE_ADDRESS = 0x%08x",
364 				hif_read32_mb(sc, sc->mem +
365 					PCIE_LOCAL_BASE_ADDRESS
366 					+ RTC_STATE_ADDRESS));
367 			hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
368 				hif_read32_mb(sc, sc->mem +
369 					PCIE_LOCAL_BASE_ADDRESS
370 					+ PCIE_SOC_WAKE_ADDRESS));
371 			hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
372 				hif_read32_mb(sc, sc->mem + 0x80008),
373 				hif_read32_mb(sc, sc->mem + 0x8000c));
374 			hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
375 				hif_read32_mb(sc, sc->mem + 0x80010),
376 				hif_read32_mb(sc, sc->mem + 0x80014));
377 			hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
378 				hif_read32_mb(sc, sc->mem + 0x80018),
379 				hif_read32_mb(sc, sc->mem + 0x8001c));
380 			QDF_BUG(0);
381 		}
382 
383 		PCI_CLR_CAUSE0_REGISTER(sc);
384 		}
385 
386 		if (HAS_FW_INDICATOR) {
387 			fw_indicator_address = hif_state->fw_indicator_address;
388 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
389 			if ((fw_indicator != ~0) &&
390 			   (fw_indicator & FW_IND_EVENT_PENDING))
391 				ssr_irq = true;
392 		}
393 
394 		if (Q_TARGET_ACCESS_END(scn) < 0)
395 			return IRQ_HANDLED;
396 	}
397 	/* TBDXXX: Add support for WMAC */
398 
399 	if (ssr_irq) {
400 		sc->irq_event = irq;
401 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
402 
403 		qdf_atomic_inc(&scn->active_tasklet_cnt);
404 		tasklet_schedule(&sc->intr_tq);
405 	} else {
406 		pci_dispatch_interrupt(scn);
407 	}
408 
409 	return IRQ_HANDLED;
410 }
411 
412 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
413 {
414 	return 1;               /* FIX THIS */
415 }
416 
417 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
418 {
419 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
420 	int i = 0;
421 
422 	if (!irq || !size) {
423 		return -EINVAL;
424 	}
425 
426 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
427 		irq[0] = sc->irq;
428 		return 1;
429 	}
430 
431 	if (sc->num_msi_intrs > size) {
432 		qdf_print("Not enough space in irq buffer to return irqs");
433 		return -EINVAL;
434 	}
435 
436 	for (i = 0; i < sc->num_msi_intrs; i++) {
437 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
438 	}
439 
440 	return sc->num_msi_intrs;
441 }
442 
443 
444 /**
445  * hif_pci_cancel_deferred_target_sleep() - cancels the deferred target sleep
446  * @scn: hif_softc
447  *
448  * Return: void
449  */
450 #if CONFIG_ATH_PCIE_MAX_PERF == 0
451 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
452 {
453 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
454 	A_target_id_t pci_addr = scn->mem;
455 
456 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
457 	/*
458 	 * If the deferred sleep timer is running cancel it
459 	 * and put the soc into sleep.
460 	 */
461 	if (hif_state->fake_sleep == true) {
462 		qdf_timer_stop(&hif_state->sleep_timer);
463 		if (hif_state->verified_awake == false) {
464 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
465 				      PCIE_SOC_WAKE_ADDRESS,
466 				      PCIE_SOC_WAKE_RESET);
467 		}
468 		hif_state->fake_sleep = false;
469 	}
470 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
471 }
472 #else
473 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
474 {
475 }
476 #endif
477 
478 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
479 	hif_read32_mb(sc, (char *)(mem) + \
480 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
481 
482 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
483 	hif_write32_mb(sc, ((char *)(mem) + \
484 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
485 
486 #ifdef QCA_WIFI_3_0
487 /**
488  * hif_targ_is_awake() - check to see if the target is awake
489  * @hif_ctx: hif context
490  * @mem:
491  *
492  * emulation never goes to sleep
493  *
494  * Return: true if target is awake
495  */
496 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
497 {
498 	return true;
499 }
500 #else
501 /**
502  * hif_targ_is_awake() - check to see if the target is awake
503  * @scn: hif context
504  * @mem:
505  *
506  * Return: true if the targets clocks are on
507  */
508 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
509 {
510 	uint32_t val;
511 
512 	if (scn->recovery)
513 		return false;
514 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
515 		+ RTC_STATE_ADDRESS);
516 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
517 }
518 #endif
519 
520 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
521 static void hif_pci_device_reset(struct hif_pci_softc *sc)
522 {
523 	void __iomem *mem = sc->mem;
524 	int i;
525 	uint32_t val;
526 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
527 
528 	if (!scn->hostdef)
529 		return;
530 
531 	/* NB: Don't check resetok here.  This form of reset
532 	 * is integral to correct operation.
533 	 */
534 
535 	if (!SOC_GLOBAL_RESET_ADDRESS)
536 		return;
537 
538 	if (!mem)
539 		return;
540 
541 	hif_err("Reset Device");
542 
543 	/*
544 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
545 	 * writing WAKE_V, the Target may scribble over Host memory!
546 	 */
547 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
548 			       PCIE_SOC_WAKE_V_MASK);
549 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
550 		if (hif_targ_is_awake(scn, mem))
551 			break;
552 
553 		qdf_mdelay(1);
554 	}
555 
556 	/* Put Target, including PCIe, into RESET. */
557 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
558 	val |= 1;
559 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
560 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
561 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
562 		    RTC_STATE_COLD_RESET_MASK)
563 			break;
564 
565 		qdf_mdelay(1);
566 	}
567 
568 	/* Pull Target, including PCIe, out of RESET. */
569 	val &= ~1;
570 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
571 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
572 		if (!
573 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
574 		     RTC_STATE_COLD_RESET_MASK))
575 			break;
576 
577 		qdf_mdelay(1);
578 	}
579 
580 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
581 			       PCIE_SOC_WAKE_RESET);
582 }
583 
584 /* CPU warm reset function
585  * Steps:
586  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
587  * 2. Clear the FW_INDICATOR_ADDRESS -so Target CPU initializes FW
588  *    correctly on WARM reset
589  * 3. Clear TARGET CPU LF timer interrupt
590  * 4. Reset all CEs to clear any pending CE tarnsactions
591  * 5. Warm reset CPU
592  */
593 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
594 {
595 	void __iomem *mem = sc->mem;
596 	int i;
597 	uint32_t val;
598 	uint32_t fw_indicator;
599 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
600 
601 	/* NB: Don't check resetok here.  This form of reset is
602 	 * integral to correct operation.
603 	 */
604 
605 	if (!mem)
606 		return;
607 
608 	hif_debug("Target Warm Reset");
609 
610 	/*
611 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
612 	 * writing WAKE_V, the Target may scribble over Host memory!
613 	 */
614 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
615 			       PCIE_SOC_WAKE_V_MASK);
616 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
617 		if (hif_targ_is_awake(scn, mem))
618 			break;
619 		qdf_mdelay(1);
620 	}
621 
622 	/*
623 	 * Disable Pending interrupts
624 	 */
625 	val =
626 		hif_read32_mb(sc, mem +
627 			     (SOC_CORE_BASE_ADDRESS |
628 			      PCIE_INTR_CAUSE_ADDRESS));
629 	hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
630 		  (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
631 	/* Target CPU Intr Cause */
632 	val = hif_read32_mb(sc, mem +
633 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
634 	hif_debug("Target CPU Intr Cause 0x%x", val);
635 
636 	val =
637 		hif_read32_mb(sc, mem +
638 			     (SOC_CORE_BASE_ADDRESS |
639 			      PCIE_INTR_ENABLE_ADDRESS));
640 	hif_write32_mb(sc, (mem +
641 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
642 	hif_write32_mb(sc, (mem +
643 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
644 		       HOST_GROUP0_MASK);
645 
646 	qdf_mdelay(100);
647 
648 	/* Clear FW_INDICATOR_ADDRESS */
649 	if (HAS_FW_INDICATOR) {
650 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
651 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
652 	}
653 
654 	/* Clear Target LF Timer interrupts */
655 	val =
656 		hif_read32_mb(sc, mem +
657 			     (RTC_SOC_BASE_ADDRESS +
658 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
659 	hif_debug("addr 0x%x : 0x%x",
660 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
661 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
662 	hif_write32_mb(sc, mem +
663 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
664 		      val);
665 
666 	/* Reset CE */
667 	val =
668 		hif_read32_mb(sc, mem +
669 			     (RTC_SOC_BASE_ADDRESS |
670 			      SOC_RESET_CONTROL_ADDRESS));
671 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
672 	hif_write32_mb(sc, (mem +
673 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
674 		      val);
675 	val =
676 		hif_read32_mb(sc, mem +
677 			     (RTC_SOC_BASE_ADDRESS |
678 			      SOC_RESET_CONTROL_ADDRESS));
679 	qdf_mdelay(10);
680 
681 	/* CE unreset */
682 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
683 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
684 		       SOC_RESET_CONTROL_ADDRESS), val);
685 	val =
686 		hif_read32_mb(sc, mem +
687 			     (RTC_SOC_BASE_ADDRESS |
688 			      SOC_RESET_CONTROL_ADDRESS));
689 	qdf_mdelay(10);
690 
691 	/* Read Target CPU Intr Cause */
692 	val = hif_read32_mb(sc, mem +
693 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
694 	hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
695 
696 	/* CPU warm RESET */
697 	val =
698 		hif_read32_mb(sc, mem +
699 			     (RTC_SOC_BASE_ADDRESS |
700 			      SOC_RESET_CONTROL_ADDRESS));
701 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
702 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
703 		       SOC_RESET_CONTROL_ADDRESS), val);
704 	val =
705 		hif_read32_mb(sc, mem +
706 			     (RTC_SOC_BASE_ADDRESS |
707 			      SOC_RESET_CONTROL_ADDRESS));
708 	hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
709 
710 	qdf_mdelay(100);
711 	hif_debug("Target Warm reset complete");
712 
713 }
714 
715 #ifndef QCA_WIFI_3_0
716 /* only applicable to legacy ce */
717 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
718 {
719 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
720 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
721 	void __iomem *mem = sc->mem;
722 	uint32_t val;
723 
724 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
725 		return ATH_ISR_NOSCHED;
726 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
727 	if (Q_TARGET_ACCESS_END(scn) < 0)
728 		return ATH_ISR_SCHED;
729 
730 	hif_debug("FW_INDICATOR register is 0x%x", val);
731 
732 	if (val & FW_IND_HELPER)
733 		return 0;
734 
735 	return 1;
736 }
737 #endif
738 
739 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
740 {
741 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
742 	uint16_t device_id = 0;
743 	uint32_t val;
744 	uint16_t timeout_count = 0;
745 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
746 
747 	/* Check device ID from PCIe configuration space for link status */
748 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
749 	if (device_id != sc->devid) {
750 		hif_err("Device ID does match (read 0x%x, expect 0x%x)",
751 			device_id, sc->devid);
752 		return -EACCES;
753 	}
754 
755 	/* Check PCIe local register for bar/memory access */
756 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
757 			   RTC_STATE_ADDRESS);
758 	hif_debug("RTC_STATE_ADDRESS is %08x", val);
759 
760 	/* Try to wake up target if it sleeps */
761 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
762 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
763 	hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
764 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
765 		PCIE_SOC_WAKE_ADDRESS));
766 
767 	/* Check if target can be woken up */
768 	while (!hif_targ_is_awake(scn, sc->mem)) {
769 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
770 			hif_err("wake up timeout, %08x, %08x",
771 				hif_read32_mb(sc, sc->mem +
772 				     PCIE_LOCAL_BASE_ADDRESS +
773 				     RTC_STATE_ADDRESS),
774 				hif_read32_mb(sc, sc->mem +
775 				     PCIE_LOCAL_BASE_ADDRESS +
776 				     PCIE_SOC_WAKE_ADDRESS));
777 			return -EACCES;
778 		}
779 
780 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
781 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
782 
783 		qdf_mdelay(100);
784 		timeout_count += 100;
785 	}
786 
787 	/* Check Power register for SoC internal bus issues */
788 	val =
789 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
790 			     SOC_POWER_REG_OFFSET);
791 	hif_debug("Power register is %08x", val);
792 
793 	return 0;
794 }
795 
796 /**
797  * __hif_pci_dump_registers(): dump other PCI debug registers
798  * @scn: struct hif_softc
799  *
800  * This function dumps pci debug registers.  The parent function
801  * dumps the copy engine registers before calling this function.
802  *
803  * Return: void
804  */
805 static void __hif_pci_dump_registers(struct hif_softc *scn)
806 {
807 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
808 	void __iomem *mem = sc->mem;
809 	uint32_t val, i, j;
810 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
811 	uint32_t ce_base;
812 
813 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
814 		return;
815 
816 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
817 	val =
818 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
819 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
820 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
821 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
822 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
823 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
824 
825 	/* DEBUG_CONTROL_ENABLE = 0x1 */
826 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
827 			   WLAN_DEBUG_CONTROL_OFFSET);
828 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
829 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
830 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
831 		      WLAN_DEBUG_CONTROL_OFFSET, val);
832 
833 	hif_debug("Debug: inputsel: %x dbgctrl: %x",
834 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
835 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
836 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
837 			    WLAN_DEBUG_CONTROL_OFFSET));
838 
839 	hif_debug("Debug CE");
840 	/* Loop CE debug output */
841 	/* AMBA_DEBUG_BUS_SEL = 0xc */
842 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
843 			    AMBA_DEBUG_BUS_OFFSET);
844 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
845 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
846 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
847 		       val);
848 
849 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
850 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
851 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
852 				   CE_WRAPPER_DEBUG_OFFSET);
853 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
854 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
855 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
856 			      CE_WRAPPER_DEBUG_OFFSET, val);
857 
858 		hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
859 			  wrapper_idx[i],
860 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
861 				AMBA_DEBUG_BUS_OFFSET),
862 			  hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
863 				CE_WRAPPER_DEBUG_OFFSET));
864 
865 		if (wrapper_idx[i] <= 7) {
866 			for (j = 0; j <= 5; j++) {
867 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
868 				/* For (j=0~5) write CE_DEBUG_SEL = j */
869 				val =
870 					hif_read32_mb(sc, mem + ce_base +
871 						     CE_DEBUG_OFFSET);
872 				val &= ~CE_DEBUG_SEL_MASK;
873 				val |= CE_DEBUG_SEL_SET(j);
874 				hif_write32_mb(sc, mem + ce_base +
875 					       CE_DEBUG_OFFSET, val);
876 
877 				/* read (@gpio_athr_wlan_reg)
878 				 * WLAN_DEBUG_OUT_DATA
879 				 */
880 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
881 						    + WLAN_DEBUG_OUT_OFFSET);
882 				val = WLAN_DEBUG_OUT_DATA_GET(val);
883 
884 				hif_debug("module%d: cedbg: %x out: %x",
885 					  j,
886 					  hif_read32_mb(sc, mem + ce_base +
887 						CE_DEBUG_OFFSET), val);
888 			}
889 		} else {
890 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
891 			val =
892 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
893 					     WLAN_DEBUG_OUT_OFFSET);
894 			val = WLAN_DEBUG_OUT_DATA_GET(val);
895 
896 			hif_debug("out: %x", val);
897 		}
898 	}
899 
900 	hif_debug("Debug PCIe:");
901 	/* Loop PCIe debug output */
902 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
903 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
904 			    AMBA_DEBUG_BUS_OFFSET);
905 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
906 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
907 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
908 		       AMBA_DEBUG_BUS_OFFSET, val);
909 
910 	for (i = 0; i <= 8; i++) {
911 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
912 		val =
913 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
914 				     AMBA_DEBUG_BUS_OFFSET);
915 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
916 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
917 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
918 			       AMBA_DEBUG_BUS_OFFSET, val);
919 
920 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
921 		val =
922 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
923 				     WLAN_DEBUG_OUT_OFFSET);
924 		val = WLAN_DEBUG_OUT_DATA_GET(val);
925 
926 		hif_debug("amdbg: %x out: %x %x",
927 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
928 				WLAN_DEBUG_OUT_OFFSET), val,
929 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
930 				WLAN_DEBUG_OUT_OFFSET));
931 	}
932 
933 	Q_TARGET_ACCESS_END(scn);
934 }
935 
936 /**
937  * hif_pci_dump_registers(): dump bus debug registers
938  * @hif_ctx: struct hif_opaque_softc
939  *
940  * This function dumps hif bus debug registers
941  *
942  * Return: 0 for success or error code
943  */
944 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
945 {
946 	int status;
947 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
948 
949 	status = hif_dump_ce_registers(scn);
950 
951 	if (status)
952 		hif_err("Dump CE Registers Failed");
953 
954 	/* dump non copy engine pci registers */
955 	__hif_pci_dump_registers(scn);
956 
957 	return 0;
958 }
959 
960 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
961 
962 /* worker thread to schedule wlan_tasklet in SLUB debug build */
963 static void reschedule_tasklet_work_handler(void *arg)
964 {
965 	struct hif_pci_softc *sc = arg;
966 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
967 
968 	if (!scn) {
969 		hif_err("hif_softc is NULL");
970 		return;
971 	}
972 
973 	if (scn->hif_init_done == false) {
974 		hif_err("wlan driver is unloaded");
975 		return;
976 	}
977 
978 	tasklet_schedule(&sc->intr_tq);
979 }
980 
981 /**
982  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
983  * work
984  * @sc: HIF PCI Context
985  *
986  * Return: void
987  */
988 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
989 {
990 	qdf_create_work(0, &sc->reschedule_tasklet_work,
991 				reschedule_tasklet_work_handler, NULL);
992 }
993 #else
994 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
995 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
996 
997 void wlan_tasklet(unsigned long data)
998 {
999 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
1000 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1001 
1002 	if (scn->hif_init_done == false)
1003 		goto end;
1004 
1005 	if (qdf_atomic_read(&scn->link_suspended))
1006 		goto end;
1007 
1008 	if (!ADRASTEA_BU) {
1009 		hif_fw_interrupt_handler(sc->irq_event, scn);
1010 		if (scn->target_status == TARGET_STATUS_RESET)
1011 			goto end;
1012 	}
1013 
1014 end:
1015 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
1016 	qdf_atomic_dec(&scn->active_tasklet_cnt);
1017 }
1018 
1019 /**
1020  * hif_disable_power_gating() - disable HW power gating
1021  * @hif_ctx: hif context
1022  *
1023  * disables pcie L1 power states
1024  */
1025 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1026 {
1027 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1028 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1029 
1030 	if (!scn) {
1031 		hif_err("Could not disable ASPM scn is null");
1032 		return;
1033 	}
1034 
1035 	/* Disable ASPM when pkt log is enabled */
1036 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1037 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1038 }
1039 
1040 /**
1041  * hif_enable_power_gating() - enable HW power gating
1042  * @sc: hif context
1043  *
1044  * enables pcie L1 power states
1045  */
1046 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1047 {
1048 	if (!sc) {
1049 		hif_err("Could not disable ASPM scn is null");
1050 		return;
1051 	}
1052 
1053 	/* Re-enable ASPM after firmware/OTP download is complete */
1054 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1055 }
1056 
1057 /**
1058  * hif_pci_enable_power_management() - enable power management
1059  * @hif_sc: hif context
1060  * @is_packet_log_enabled:
1061  *
1062  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1063  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1064  *
1065  * note: epping mode does not call this function as it does not
1066  *       care about saving power.
1067  */
1068 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1069 				 bool is_packet_log_enabled)
1070 {
1071 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1072 	uint32_t mode;
1073 
1074 	if (!pci_ctx) {
1075 		hif_err("hif_ctx null");
1076 		return;
1077 	}
1078 
1079 	mode = hif_get_conparam(hif_sc);
1080 	if (mode == QDF_GLOBAL_FTM_MODE) {
1081 		hif_info("Enable power gating for FTM mode");
1082 		hif_enable_power_gating(pci_ctx);
1083 		return;
1084 	}
1085 
1086 	hif_rtpm_start(hif_sc);
1087 
1088 	if (!is_packet_log_enabled)
1089 		hif_enable_power_gating(pci_ctx);
1090 
1091 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1092 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1093 	    !ce_srng_based(hif_sc)) {
1094 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1095 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1096 			hif_err("Failed to set target to sleep");
1097 	}
1098 }
1099 
1100 /**
1101  * hif_pci_disable_power_management() - disable power management
1102  * @hif_ctx: hif context
1103  *
1104  * Currently disables runtime pm. Should be updated to behave
1105  * if runtime pm is not started. Should be updated to take care
1106  * of aspm and soc sleep for driver load.
1107  */
1108 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1109 {
1110 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1111 
1112 	if (!pci_ctx) {
1113 		hif_err("hif_ctx null");
1114 		return;
1115 	}
1116 
1117 	hif_rtpm_stop(hif_ctx);
1118 }
1119 
1120 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1121 {
1122 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1123 
1124 	if (!pci_ctx) {
1125 		hif_err("hif_ctx null");
1126 		return;
1127 	}
1128 	hif_display_ce_stats(hif_ctx);
1129 
1130 	hif_print_pci_stats(pci_ctx);
1131 }
1132 
1133 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1134 {
1135 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1136 
1137 	if (!pci_ctx) {
1138 		hif_err("hif_ctx null");
1139 		return;
1140 	}
1141 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1142 }
1143 
1144 #define ATH_PCI_PROBE_RETRY_MAX 3
1145 /**
1146  * hif_pci_open(): hif_bus_open
1147  * @hif_ctx: scn
1148  * @bus_type: bus type
1149  *
1150  * Return: n/a
1151  */
1152 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1153 {
1154 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1155 
1156 	hif_ctx->bus_type = bus_type;
1157 	hif_rtpm_open(hif_ctx);
1158 
1159 	qdf_spinlock_create(&sc->irq_lock);
1160 
1161 	return hif_ce_open(hif_ctx);
1162 }
1163 
1164 /**
1165  * hif_wake_target_cpu() - wake the target's cpu
1166  * @scn: hif context
1167  *
1168  * Send an interrupt to the device to wake up the Target CPU
1169  * so it has an opportunity to notice any changed state.
1170  */
1171 static void hif_wake_target_cpu(struct hif_softc *scn)
1172 {
1173 	QDF_STATUS rv;
1174 	uint32_t core_ctrl;
1175 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1176 
1177 	rv = hif_diag_read_access(hif_hdl,
1178 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1179 				  &core_ctrl);
1180 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1181 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1182 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1183 
1184 	rv = hif_diag_write_access(hif_hdl,
1185 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1186 				   core_ctrl);
1187 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1188 }
1189 
1190 /**
1191  * soc_wake_reset() - allow the target to go to sleep
1192  * @scn: hif_softc
1193  *
1194  * Clear the force wake register.  This is done by
1195  * hif_sleep_entry and cancel deferred timer sleep.
1196  */
1197 static void soc_wake_reset(struct hif_softc *scn)
1198 {
1199 	hif_write32_mb(scn, scn->mem +
1200 		PCIE_LOCAL_BASE_ADDRESS +
1201 		PCIE_SOC_WAKE_ADDRESS,
1202 		PCIE_SOC_WAKE_RESET);
1203 }
1204 
1205 /**
1206  * hif_sleep_entry() - gate target sleep
1207  * @arg: hif context
1208  *
1209  * This function is the callback for the sleep timer.
1210  * Check if last force awake critical section was at least
1211  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1212  * allow the target to go to sleep and cancel the sleep timer.
1213  * otherwise reschedule the sleep timer.
1214  */
1215 static void hif_sleep_entry(void *arg)
1216 {
1217 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1218 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1219 	uint32_t idle_ms;
1220 
1221 	if (scn->recovery)
1222 		return;
1223 
1224 	if (hif_is_driver_unloading(scn))
1225 		return;
1226 
1227 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1228 	if (hif_state->fake_sleep) {
1229 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1230 						    - hif_state->sleep_ticks);
1231 		if (!hif_state->verified_awake &&
1232 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1233 			if (!qdf_atomic_read(&scn->link_suspended)) {
1234 				soc_wake_reset(scn);
1235 				hif_state->fake_sleep = false;
1236 			}
1237 		} else {
1238 			qdf_timer_stop(&hif_state->sleep_timer);
1239 			qdf_timer_start(&hif_state->sleep_timer,
1240 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1241 		}
1242 	}
1243 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1244 }
1245 
1246 #define HIF_HIA_MAX_POLL_LOOP    1000000
1247 #define HIF_HIA_POLLING_DELAY_MS 10
1248 
1249 #ifdef QCA_HIF_HIA_EXTND
1250 
1251 static void hif_set_hia_extnd(struct hif_softc *scn)
1252 {
1253 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1254 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1255 	uint32_t target_type = tgt_info->target_type;
1256 
1257 	hif_info("E");
1258 
1259 	if ((target_type == TARGET_TYPE_AR900B) ||
1260 			target_type == TARGET_TYPE_QCA9984 ||
1261 			target_type == TARGET_TYPE_QCA9888) {
1262 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1263 		 * in RTC space
1264 		 */
1265 		tgt_info->target_revision
1266 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1267 					+ CHIP_ID_ADDRESS));
1268 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1269 			  target_type, tgt_info->target_revision);
1270 	}
1271 
1272 	{
1273 		uint32_t flag2_value = 0;
1274 		uint32_t flag2_targ_addr =
1275 			host_interest_item_address(target_type,
1276 			offsetof(struct host_interest_s, hi_skip_clock_init));
1277 
1278 		if ((ar900b_20_targ_clk != -1) &&
1279 			(frac != -1) && (intval != -1)) {
1280 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1281 				&flag2_value);
1282 			qdf_print("\n Setting clk_override");
1283 			flag2_value |= CLOCK_OVERRIDE;
1284 
1285 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1286 					flag2_value);
1287 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1288 		} else {
1289 			qdf_print("\n CLOCK PLL skipped");
1290 		}
1291 	}
1292 
1293 	if (target_type == TARGET_TYPE_AR900B
1294 			|| target_type == TARGET_TYPE_QCA9984
1295 			|| target_type == TARGET_TYPE_QCA9888) {
1296 
1297 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1298 		 * this would be supplied through module parameters,
1299 		 * if not supplied assumed default or same behavior as 1.0.
1300 		 * Assume 1.0 clock can't be tuned, reset to defaults
1301 		 */
1302 
1303 		qdf_print(KERN_INFO
1304 			  "%s: setting the target pll frac %x intval %x",
1305 			  __func__, frac, intval);
1306 
1307 		/* do not touch frac, and int val, let them be default -1,
1308 		 * if desired, host can supply these through module params
1309 		 */
1310 		if (frac != -1 || intval != -1) {
1311 			uint32_t flag2_value = 0;
1312 			uint32_t flag2_targ_addr;
1313 
1314 			flag2_targ_addr =
1315 				host_interest_item_address(target_type,
1316 				offsetof(struct host_interest_s,
1317 					hi_clock_info));
1318 			hif_diag_read_access(hif_hdl,
1319 				flag2_targ_addr, &flag2_value);
1320 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1321 				  flag2_value);
1322 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1323 			qdf_print("\n INT Val %x  Address %x",
1324 				  intval, flag2_value + 4);
1325 			hif_diag_write_access(hif_hdl,
1326 					flag2_value + 4, intval);
1327 		} else {
1328 			qdf_print(KERN_INFO
1329 				  "%s: no frac provided, skipping pre-configuring PLL",
1330 				  __func__);
1331 		}
1332 
1333 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1334 		if ((target_type == TARGET_TYPE_AR900B)
1335 			&& (tgt_info->target_revision == AR900B_REV_2)
1336 			&& ar900b_20_targ_clk != -1) {
1337 			uint32_t flag2_value = 0;
1338 			uint32_t flag2_targ_addr;
1339 
1340 			flag2_targ_addr
1341 				= host_interest_item_address(target_type,
1342 					offsetof(struct host_interest_s,
1343 					hi_desired_cpu_speed_hz));
1344 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1345 							&flag2_value);
1346 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1347 				  flag2_value);
1348 			hif_diag_write_access(hif_hdl, flag2_value,
1349 				ar900b_20_targ_clk/*300000000u*/);
1350 		} else if (target_type == TARGET_TYPE_QCA9888) {
1351 			uint32_t flag2_targ_addr;
1352 
1353 			if (200000000u != qca9888_20_targ_clk) {
1354 				qca9888_20_targ_clk = 300000000u;
1355 				/* Setting the target clock speed to 300 mhz */
1356 			}
1357 
1358 			flag2_targ_addr
1359 				= host_interest_item_address(target_type,
1360 					offsetof(struct host_interest_s,
1361 					hi_desired_cpu_speed_hz));
1362 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1363 				qca9888_20_targ_clk);
1364 		} else {
1365 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1366 				  __func__);
1367 		}
1368 	} else {
1369 		if (frac != -1 || intval != -1) {
1370 			uint32_t flag2_value = 0;
1371 			uint32_t flag2_targ_addr =
1372 				host_interest_item_address(target_type,
1373 					offsetof(struct host_interest_s,
1374 							hi_clock_info));
1375 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1376 						&flag2_value);
1377 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1378 				  flag2_value);
1379 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1380 			qdf_print("\n INT Val %x  Address %x", intval,
1381 				  flag2_value + 4);
1382 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1383 					      intval);
1384 		}
1385 	}
1386 }
1387 
1388 #else
1389 
1390 static void hif_set_hia_extnd(struct hif_softc *scn)
1391 {
1392 }
1393 
1394 #endif
1395 
1396 /**
1397  * hif_set_hia() - fill out the host interest area
1398  * @scn: hif context
1399  *
1400  * This is replaced by hif_wlan_enable for integrated targets.
1401  * This fills out the host interest area.  The firmware will
1402  * process these memory addresses when it is first brought out
1403  * of reset.
1404  *
1405  * Return: 0 for success.
1406  */
1407 static int hif_set_hia(struct hif_softc *scn)
1408 {
1409 	QDF_STATUS rv;
1410 	uint32_t interconnect_targ_addr = 0;
1411 	uint32_t pcie_state_targ_addr = 0;
1412 	uint32_t pipe_cfg_targ_addr = 0;
1413 	uint32_t svc_to_pipe_map = 0;
1414 	uint32_t pcie_config_flags = 0;
1415 	uint32_t flag2_value = 0;
1416 	uint32_t flag2_targ_addr = 0;
1417 #ifdef QCA_WIFI_3_0
1418 	uint32_t host_interest_area = 0;
1419 	uint8_t i;
1420 #else
1421 	uint32_t ealloc_value = 0;
1422 	uint32_t ealloc_targ_addr = 0;
1423 	uint8_t banks_switched = 1;
1424 	uint32_t chip_id;
1425 #endif
1426 	uint32_t pipe_cfg_addr;
1427 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1428 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1429 	uint32_t target_type = tgt_info->target_type;
1430 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1431 	static struct CE_pipe_config *target_ce_config;
1432 	struct service_to_pipe *target_service_to_ce_map;
1433 
1434 	hif_info("E");
1435 
1436 	hif_get_target_ce_config(scn,
1437 				 &target_ce_config, &target_ce_config_sz,
1438 				 &target_service_to_ce_map,
1439 				 &target_service_to_ce_map_sz,
1440 				 NULL, NULL);
1441 
1442 	if (ADRASTEA_BU)
1443 		return 0;
1444 
1445 #ifdef QCA_WIFI_3_0
1446 	i = 0;
1447 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1448 		host_interest_area = hif_read32_mb(scn, scn->mem +
1449 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1450 		if ((host_interest_area & 0x01) == 0) {
1451 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1452 			host_interest_area = 0;
1453 			i++;
1454 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1455 				hif_err("poll timeout: %d", i);
1456 		} else {
1457 			host_interest_area &= (~0x01);
1458 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1459 			break;
1460 		}
1461 	}
1462 
1463 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1464 		hif_err("hia polling timeout");
1465 		return -EIO;
1466 	}
1467 
1468 	if (host_interest_area == 0) {
1469 		hif_err("host_interest_area = 0");
1470 		return -EIO;
1471 	}
1472 
1473 	interconnect_targ_addr = host_interest_area +
1474 			offsetof(struct host_interest_area_t,
1475 			hi_interconnect_state);
1476 
1477 	flag2_targ_addr = host_interest_area +
1478 			offsetof(struct host_interest_area_t, hi_option_flag2);
1479 
1480 #else
1481 	interconnect_targ_addr = hif_hia_item_address(target_type,
1482 		offsetof(struct host_interest_s, hi_interconnect_state));
1483 	ealloc_targ_addr = hif_hia_item_address(target_type,
1484 		offsetof(struct host_interest_s, hi_early_alloc));
1485 	flag2_targ_addr = hif_hia_item_address(target_type,
1486 		offsetof(struct host_interest_s, hi_option_flag2));
1487 #endif
1488 	/* Supply Target-side CE configuration */
1489 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1490 			  &pcie_state_targ_addr);
1491 	if (rv != QDF_STATUS_SUCCESS) {
1492 		hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
1493 			interconnect_targ_addr, rv);
1494 		goto done;
1495 	}
1496 	if (pcie_state_targ_addr == 0) {
1497 		rv = QDF_STATUS_E_FAILURE;
1498 		hif_err("pcie state addr is 0");
1499 		goto done;
1500 	}
1501 	pipe_cfg_addr = pcie_state_targ_addr +
1502 			  offsetof(struct pcie_state_s,
1503 			  pipe_cfg_addr);
1504 	rv = hif_diag_read_access(hif_hdl,
1505 			  pipe_cfg_addr,
1506 			  &pipe_cfg_targ_addr);
1507 	if (rv != QDF_STATUS_SUCCESS) {
1508 		hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
1509 		goto done;
1510 	}
1511 	if (pipe_cfg_targ_addr == 0) {
1512 		rv = QDF_STATUS_E_FAILURE;
1513 		hif_err("pipe cfg addr is 0");
1514 		goto done;
1515 	}
1516 
1517 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1518 			(uint8_t *) target_ce_config,
1519 			target_ce_config_sz);
1520 
1521 	if (rv != QDF_STATUS_SUCCESS) {
1522 		hif_err("write pipe cfg: %d", rv);
1523 		goto done;
1524 	}
1525 
1526 	rv = hif_diag_read_access(hif_hdl,
1527 			  pcie_state_targ_addr +
1528 			  offsetof(struct pcie_state_s,
1529 			   svc_to_pipe_map),
1530 			  &svc_to_pipe_map);
1531 	if (rv != QDF_STATUS_SUCCESS) {
1532 		hif_err("get svc/pipe map: %d", rv);
1533 		goto done;
1534 	}
1535 	if (svc_to_pipe_map == 0) {
1536 		rv = QDF_STATUS_E_FAILURE;
1537 		hif_err("svc_to_pipe map is 0");
1538 		goto done;
1539 	}
1540 
1541 	rv = hif_diag_write_mem(hif_hdl,
1542 			svc_to_pipe_map,
1543 			(uint8_t *) target_service_to_ce_map,
1544 			target_service_to_ce_map_sz);
1545 	if (rv != QDF_STATUS_SUCCESS) {
1546 		hif_err("write svc/pipe map: %d", rv);
1547 		goto done;
1548 	}
1549 
1550 	rv = hif_diag_read_access(hif_hdl,
1551 			pcie_state_targ_addr +
1552 			offsetof(struct pcie_state_s,
1553 			config_flags),
1554 			&pcie_config_flags);
1555 	if (rv != QDF_STATUS_SUCCESS) {
1556 		hif_err("get pcie config_flags: %d", rv);
1557 		goto done;
1558 	}
1559 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1560 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1561 #else
1562 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1563 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1564 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1565 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1566 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1567 #endif
1568 	rv = hif_diag_write_mem(hif_hdl,
1569 			pcie_state_targ_addr +
1570 			offsetof(struct pcie_state_s,
1571 			config_flags),
1572 			(uint8_t *) &pcie_config_flags,
1573 			sizeof(pcie_config_flags));
1574 	if (rv != QDF_STATUS_SUCCESS) {
1575 		hif_err("write pcie config_flags: %d", rv);
1576 		goto done;
1577 	}
1578 
1579 #ifndef QCA_WIFI_3_0
1580 	/* configure early allocation */
1581 	ealloc_targ_addr = hif_hia_item_address(target_type,
1582 						offsetof(
1583 						struct host_interest_s,
1584 						hi_early_alloc));
1585 
1586 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1587 			&ealloc_value);
1588 	if (rv != QDF_STATUS_SUCCESS) {
1589 		hif_err("get early alloc val: %d", rv);
1590 		goto done;
1591 	}
1592 
1593 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1594 	ealloc_value |=
1595 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1596 		 HI_EARLY_ALLOC_MAGIC_MASK);
1597 
1598 	rv = hif_diag_read_access(hif_hdl,
1599 			  CHIP_ID_ADDRESS |
1600 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1601 	if (rv != QDF_STATUS_SUCCESS) {
1602 		hif_err("get chip id val: %d", rv);
1603 		goto done;
1604 	}
1605 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1606 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1607 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1608 		case 0x2:       /* ROME 1.3 */
1609 			/* 2 banks are switched to IRAM */
1610 			banks_switched = 2;
1611 			break;
1612 		case 0x4:       /* ROME 2.1 */
1613 		case 0x5:       /* ROME 2.2 */
1614 			banks_switched = 6;
1615 			break;
1616 		case 0x8:       /* ROME 3.0 */
1617 		case 0x9:       /* ROME 3.1 */
1618 		case 0xA:       /* ROME 3.2 */
1619 			banks_switched = 9;
1620 			break;
1621 		case 0x0:       /* ROME 1.0 */
1622 		case 0x1:       /* ROME 1.1 */
1623 		default:
1624 			/* 3 banks are switched to IRAM */
1625 			banks_switched = 3;
1626 			break;
1627 		}
1628 	}
1629 
1630 	ealloc_value |=
1631 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1632 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1633 
1634 	rv = hif_diag_write_access(hif_hdl,
1635 				ealloc_targ_addr,
1636 				ealloc_value);
1637 	if (rv != QDF_STATUS_SUCCESS) {
1638 		hif_err("set early alloc val: %d", rv);
1639 		goto done;
1640 	}
1641 #endif
1642 	if ((target_type == TARGET_TYPE_AR900B)
1643 			|| (target_type == TARGET_TYPE_QCA9984)
1644 			|| (target_type == TARGET_TYPE_QCA9888)
1645 			|| (target_type == TARGET_TYPE_AR9888)) {
1646 		hif_set_hia_extnd(scn);
1647 	}
1648 
1649 	/* Tell Target to proceed with initialization */
1650 	flag2_targ_addr = hif_hia_item_address(target_type,
1651 						offsetof(
1652 						struct host_interest_s,
1653 						hi_option_flag2));
1654 
1655 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1656 			  &flag2_value);
1657 	if (rv != QDF_STATUS_SUCCESS) {
1658 		hif_err("get option val: %d", rv);
1659 		goto done;
1660 	}
1661 
1662 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1663 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1664 			   flag2_value);
1665 	if (rv != QDF_STATUS_SUCCESS) {
1666 		hif_err("set option val: %d", rv);
1667 		goto done;
1668 	}
1669 
1670 	hif_wake_target_cpu(scn);
1671 
1672 done:
1673 
1674 	return qdf_status_to_os_return(rv);
1675 }
1676 
1677 /**
1678  * hif_pci_bus_configure() - configure the pcie bus
1679  * @hif_sc: pointer to the hif context.
1680  *
1681  * return: 0 for success. nonzero for failure.
1682  */
1683 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1684 {
1685 	int status = 0;
1686 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1687 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1688 
1689 	hif_ce_prepare_config(hif_sc);
1690 
1691 	/* initialize sleep state adjust variables */
1692 	hif_state->sleep_timer_init = true;
1693 	hif_state->keep_awake_count = 0;
1694 	hif_state->fake_sleep = false;
1695 	hif_state->sleep_ticks = 0;
1696 
1697 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1698 			       hif_sleep_entry, (void *)hif_state,
1699 			       QDF_TIMER_TYPE_WAKE_APPS);
1700 	hif_state->sleep_timer_init = true;
1701 
1702 	status = hif_wlan_enable(hif_sc);
1703 	if (status) {
1704 		hif_err("hif_wlan_enable error: %d", status);
1705 		goto timer_free;
1706 	}
1707 
1708 	A_TARGET_ACCESS_LIKELY(hif_sc);
1709 
1710 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1711 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1712 	    !ce_srng_based(hif_sc)) {
1713 		/*
1714 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1715 		 * prevent sleep when we want to keep firmware always awake
1716 		 * note: when we want to keep firmware always awake,
1717 		 *       hif_target_sleep_state_adjust will point to a dummy
1718 		 *       function, and hif_pci_target_sleep_state_adjust must
1719 		 *       be called instead.
1720 		 * note: bus type check is here because AHB bus is reusing
1721 		 *       hif_pci_bus_configure code.
1722 		 */
1723 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1724 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1725 					false, true) < 0) {
1726 				status = -EACCES;
1727 				goto disable_wlan;
1728 			}
1729 		}
1730 	}
1731 
1732 	/* todo: consider replacing this with an srng field */
1733 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1734 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1735 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1736 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1737 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1738 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1739 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1740 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1741 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1742 		hif_sc->per_ce_irq = true;
1743 	}
1744 
1745 	status = hif_config_ce(hif_sc);
1746 	if (status)
1747 		goto disable_wlan;
1748 
1749 	if (hif_needs_bmi(hif_osc)) {
1750 		status = hif_set_hia(hif_sc);
1751 		if (status)
1752 			goto unconfig_ce;
1753 
1754 		hif_debug("hif_set_hia done");
1755 
1756 	}
1757 
1758 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1759 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1760 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1761 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1762 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1763 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1764 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1765 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1766 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
1767 		hif_debug("Skip irq config for PCI based 8074 target");
1768 	else {
1769 		status = hif_configure_irq(hif_sc);
1770 		if (status < 0)
1771 			goto unconfig_ce;
1772 	}
1773 
1774 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1775 
1776 	return status;
1777 
1778 unconfig_ce:
1779 	hif_unconfig_ce(hif_sc);
1780 disable_wlan:
1781 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1782 	hif_wlan_disable(hif_sc);
1783 
1784 timer_free:
1785 	qdf_timer_stop(&hif_state->sleep_timer);
1786 	qdf_timer_free(&hif_state->sleep_timer);
1787 	hif_state->sleep_timer_init = false;
1788 
1789 	hif_err("Failed, status: %d", status);
1790 	return status;
1791 }
1792 
1793 /**
1794  * hif_pci_close(): hif_bus_close
1795  * @hif_sc: HIF context
1796  *
1797  * Return: n/a
1798  */
1799 void hif_pci_close(struct hif_softc *hif_sc)
1800 {
1801 	hif_rtpm_close(hif_sc);
1802 	hif_ce_close(hif_sc);
1803 }
1804 
1805 #define BAR_NUM 0
1806 
1807 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
1808 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1809 {
1810 	return dma_set_mask(&pci_dev->dev, mask);
1811 }
1812 
1813 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1814 						u64 mask)
1815 {
1816 	return dma_set_coherent_mask(&pci_dev->dev, mask);
1817 }
1818 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
1819 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1820 {
1821 	return pci_set_dma_mask(pci_dev, mask);
1822 }
1823 
1824 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1825 						u64 mask)
1826 {
1827 	return pci_set_consistent_dma_mask(pci_dev, mask);
1828 }
1829 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
1830 
1831 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
1832 				struct pci_dev *pdev,
1833 				const struct pci_device_id *id)
1834 {
1835 	void __iomem *mem;
1836 	int ret = 0;
1837 	uint16_t device_id = 0;
1838 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1839 
1840 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1841 	if (device_id != id->device)  {
1842 		hif_err(
1843 		   "dev id mismatch, config id = 0x%x, probing id = 0x%x",
1844 		   device_id, id->device);
1845 		/* pci link is down, so returning with error code */
1846 		return -EIO;
1847 	}
1848 
1849 	/* FIXME: temp. commenting out assign_resource
1850 	 * call for dev_attach to work on 2.6.38 kernel
1851 	 */
1852 #if (!defined(__LINUX_ARM_ARCH__))
1853 	if (pci_assign_resource(pdev, BAR_NUM)) {
1854 		hif_err("pci_assign_resource error");
1855 		return -EIO;
1856 	}
1857 #endif
1858 	if (pci_enable_device(pdev)) {
1859 		hif_err("pci_enable_device error");
1860 		return -EIO;
1861 	}
1862 
1863 	/* Request MMIO resources */
1864 	ret = pci_request_region(pdev, BAR_NUM, "ath");
1865 	if (ret) {
1866 		hif_err("PCI MMIO reservation error");
1867 		ret = -EIO;
1868 		goto err_region;
1869 	}
1870 
1871 #ifdef CONFIG_ARM_LPAE
1872 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1873 	 * for 32 bits device also.
1874 	 */
1875 	ret =  hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1876 	if (ret) {
1877 		hif_err("Cannot enable 64-bit pci DMA");
1878 		goto err_dma;
1879 	}
1880 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(64));
1881 	if (ret) {
1882 		hif_err("Cannot enable 64-bit DMA");
1883 		goto err_dma;
1884 	}
1885 #else
1886 	ret = hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1887 	if (ret) {
1888 		hif_err("Cannot enable 32-bit pci DMA");
1889 		goto err_dma;
1890 	}
1891 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(32));
1892 	if (ret) {
1893 		hif_err("Cannot enable 32-bit coherent DMA!");
1894 		goto err_dma;
1895 	}
1896 #endif
1897 
1898 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1899 
1900 	/* Set bus master bit in PCI_COMMAND to enable DMA */
1901 	pci_set_master(pdev);
1902 
1903 	/* Arrange for access to Target SoC registers. */
1904 	mem = pci_iomap(pdev, BAR_NUM, 0);
1905 	if (!mem) {
1906 		hif_err("PCI iomap error");
1907 		ret = -EIO;
1908 		goto err_iomap;
1909 	}
1910 
1911 	hif_info("*****BAR is %pK", (void *)mem);
1912 
1913 	sc->mem = mem;
1914 
1915 	/* Hawkeye emulation specific change */
1916 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
1917 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
1918 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
1919 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
1920 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
1921 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
1922 		mem = mem + 0x0c000000;
1923 		sc->mem = mem;
1924 		hif_info("Changing PCI mem base to %pK", sc->mem);
1925 	}
1926 
1927 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
1928 	ol_sc->mem = mem;
1929 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
1930 	sc->pci_enabled = true;
1931 	return ret;
1932 
1933 err_iomap:
1934 	pci_clear_master(pdev);
1935 err_dma:
1936 	pci_release_region(pdev, BAR_NUM);
1937 err_region:
1938 	pci_disable_device(pdev);
1939 	return ret;
1940 }
1941 
1942 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
1943 			      struct pci_dev *pdev,
1944 			      const struct pci_device_id *id)
1945 {
1946 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1947 	sc->pci_enabled = true;
1948 	return 0;
1949 }
1950 
1951 
1952 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
1953 {
1954 	pci_disable_msi(sc->pdev);
1955 	pci_iounmap(sc->pdev, sc->mem);
1956 	pci_clear_master(sc->pdev);
1957 	pci_release_region(sc->pdev, BAR_NUM);
1958 	pci_disable_device(sc->pdev);
1959 }
1960 
1961 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
1962 
1963 static void hif_disable_pci(struct hif_pci_softc *sc)
1964 {
1965 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1966 
1967 	if (!ol_sc) {
1968 		hif_err("ol_sc = NULL");
1969 		return;
1970 	}
1971 	hif_pci_device_reset(sc);
1972 	sc->hif_pci_deinit(sc);
1973 
1974 	sc->mem = NULL;
1975 	ol_sc->mem = NULL;
1976 }
1977 
1978 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1979 {
1980 	int ret = 0;
1981 	int targ_awake_limit = 500;
1982 #ifndef QCA_WIFI_3_0
1983 	uint32_t fw_indicator;
1984 #endif
1985 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1986 
1987 	/*
1988 	 * Verify that the Target was started cleanly.*
1989 	 * The case where this is most likely is with an AUX-powered
1990 	 * Target and a Host in WoW mode. If the Host crashes,
1991 	 * loses power, or is restarted (without unloading the driver)
1992 	 * then the Target is left (aux) powered and running.  On a
1993 	 * subsequent driver load, the Target is in an unexpected state.
1994 	 * We try to catch that here in order to reset the Target and
1995 	 * retry the probe.
1996 	 */
1997 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1998 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1999 	while (!hif_targ_is_awake(scn, sc->mem)) {
2000 		if (0 == targ_awake_limit) {
2001 			hif_err("target awake timeout");
2002 			ret = -EAGAIN;
2003 			goto end;
2004 		}
2005 		qdf_mdelay(1);
2006 		targ_awake_limit--;
2007 	}
2008 
2009 #if PCIE_BAR0_READY_CHECKING
2010 	{
2011 		int wait_limit = 200;
2012 		/* Synchronization point: wait the BAR0 is configured */
2013 		while (wait_limit-- &&
2014 			   !(hif_read32_mb(sc, c->mem +
2015 					  PCIE_LOCAL_BASE_ADDRESS +
2016 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2017 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2018 			qdf_mdelay(10);
2019 		}
2020 		if (wait_limit < 0) {
2021 			/* AR6320v1 doesn't support checking of BAR0
2022 			 * configuration, takes one sec to wait BAR0 ready
2023 			 */
2024 			hif_debug("AR6320v1 waits two sec for BAR0");
2025 		}
2026 	}
2027 #endif
2028 
2029 #ifndef QCA_WIFI_3_0
2030 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2031 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2032 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2033 
2034 	if (fw_indicator & FW_IND_INITIALIZED) {
2035 		hif_err("Target is in an unknown state. EAGAIN");
2036 		ret = -EAGAIN;
2037 		goto end;
2038 	}
2039 #endif
2040 
2041 end:
2042 	return ret;
2043 }
2044 
2045 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2046 {
2047 	int ret = 0;
2048 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2049 	uint32_t target_type = scn->target_info.target_type;
2050 
2051 	hif_info("E");
2052 
2053 	/* do notn support MSI or MSI IRQ failed */
2054 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2055 	ret = request_irq(sc->pdev->irq,
2056 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2057 			  "wlan_pci", sc);
2058 	if (ret) {
2059 		hif_err("request_irq failed, ret: %d", ret);
2060 		goto end;
2061 	}
2062 	scn->wake_irq = sc->pdev->irq;
2063 	/* Use sc->irq instead of sc->pdev-irq
2064 	 * platform_device pdev doesn't have an irq field
2065 	 */
2066 	sc->irq = sc->pdev->irq;
2067 	/* Use Legacy PCI Interrupts */
2068 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2069 		  PCIE_INTR_ENABLE_ADDRESS),
2070 		  HOST_GROUP0_MASK);
2071 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2072 			       PCIE_INTR_ENABLE_ADDRESS));
2073 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2074 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2075 
2076 	if ((target_type == TARGET_TYPE_AR900B)  ||
2077 			(target_type == TARGET_TYPE_QCA9984) ||
2078 			(target_type == TARGET_TYPE_AR9888) ||
2079 			(target_type == TARGET_TYPE_QCA9888) ||
2080 			(target_type == TARGET_TYPE_AR6320V1) ||
2081 			(target_type == TARGET_TYPE_AR6320V2) ||
2082 			(target_type == TARGET_TYPE_AR6320V3)) {
2083 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2084 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2085 	}
2086 end:
2087 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2088 			  "%s: X, ret = %d", __func__, ret);
2089 	return ret;
2090 }
2091 
2092 static int hif_ce_srng_free_irq(struct hif_softc *scn)
2093 {
2094 	int ret = 0;
2095 	int ce_id, irq;
2096 	uint32_t msi_data_start;
2097 	uint32_t msi_data_count;
2098 	uint32_t msi_irq_start;
2099 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2100 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2101 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2102 
2103 	if (!pld_get_enable_intx(scn->qdf_dev->dev)) {
2104 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2105 						  &msi_data_count,
2106 						  &msi_data_start,
2107 						  &msi_irq_start);
2108 		if (ret)
2109 			return ret;
2110 	}
2111 
2112 	/* needs to match the ce_id -> irq data mapping
2113 	 * used in the srng parameter configuration
2114 	 */
2115 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2116 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2117 			continue;
2118 
2119 		if (!ce_sc->tasklets[ce_id].inited)
2120 			continue;
2121 
2122 		irq = sc->ce_irq_num[ce_id];
2123 
2124 		hif_ce_irq_remove_affinity_hint(irq);
2125 
2126 		hif_debug("%s: (ce_id %d, irq %d)", __func__, ce_id, irq);
2127 
2128 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2129 	}
2130 
2131 	return ret;
2132 }
2133 
2134 void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2135 {
2136 	int i, j, irq;
2137 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2138 	struct hif_exec_context *hif_ext_group;
2139 
2140 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2141 		hif_ext_group = hif_state->hif_ext_group[i];
2142 		if (hif_ext_group->irq_requested) {
2143 			hif_ext_group->irq_requested = false;
2144 			for (j = 0; j < hif_ext_group->numirq; j++) {
2145 				irq = hif_ext_group->os_irq[j];
2146 				if (scn->irq_unlazy_disable) {
2147 					qdf_dev_clear_irq_status_flags(
2148 							irq,
2149 							QDF_IRQ_DISABLE_UNLAZY);
2150 				}
2151 				pfrm_free_irq(scn->qdf_dev->dev,
2152 					      irq, hif_ext_group);
2153 			}
2154 			hif_ext_group->numirq = 0;
2155 		}
2156 	}
2157 }
2158 
2159 /**
2160  * hif_pci_nointrs(): disable IRQ
2161  * @scn: struct hif_softc
2162  *
2163  * This function stops interrupt(s)
2164  *
2165  * Return: none
2166  */
2167 void hif_pci_nointrs(struct hif_softc *scn)
2168 {
2169 	int i, ret;
2170 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2171 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2172 
2173 	scn->free_irq_done = true;
2174 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2175 
2176 	if (scn->request_irq_done == false)
2177 		return;
2178 
2179 	hif_pci_deconfigure_grp_irq(scn);
2180 
2181 	ret = hif_ce_srng_free_irq(scn);
2182 	if (ret != -EINVAL) {
2183 		/* ce irqs freed in hif_ce_srng_free_irq */
2184 
2185 		if (scn->wake_irq)
2186 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2187 		scn->wake_irq = 0;
2188 	} else if (sc->num_msi_intrs > 0) {
2189 		/* MSI interrupt(s) */
2190 		for (i = 0; i < sc->num_msi_intrs; i++)
2191 			free_irq(sc->irq + i, sc);
2192 		sc->num_msi_intrs = 0;
2193 	} else {
2194 		/* Legacy PCI line interrupt
2195 		 * Use sc->irq instead of sc->pdev-irq
2196 		 * platform_device pdev doesn't have an irq field
2197 		 */
2198 		free_irq(sc->irq, sc);
2199 	}
2200 	scn->request_irq_done = false;
2201 }
2202 
2203 static inline
2204 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2205 {
2206 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2207 		return true;
2208 	else
2209 		return false;
2210 }
2211 /**
2212  * hif_pci_disable_bus(): hif_disable_bus
2213  * @scn: hif context
2214  *
2215  * This function disables the bus
2216  *
2217  * Return: none
2218  */
2219 void hif_pci_disable_bus(struct hif_softc *scn)
2220 {
2221 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2222 	struct pci_dev *pdev;
2223 	void __iomem *mem;
2224 	struct hif_target_info *tgt_info = &scn->target_info;
2225 
2226 	/* Attach did not succeed, all resources have been
2227 	 * freed in error handler
2228 	 */
2229 	if (!sc)
2230 		return;
2231 
2232 	pdev = sc->pdev;
2233 	if (hif_pci_default_link_up(tgt_info)) {
2234 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2235 
2236 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2237 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2238 			       HOST_GROUP0_MASK);
2239 	}
2240 
2241 #if defined(CPU_WARM_RESET_WAR)
2242 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2243 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2244 	 * verified for AR9888_REV1
2245 	 */
2246 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2247 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2248 		hif_pci_device_warm_reset(sc);
2249 	else
2250 		hif_pci_device_reset(sc);
2251 #else
2252 	hif_pci_device_reset(sc);
2253 #endif
2254 	mem = (void __iomem *)sc->mem;
2255 	if (mem) {
2256 		hif_dump_pipe_debug_count(scn);
2257 		if (scn->athdiag_procfs_inited) {
2258 			athdiag_procfs_remove();
2259 			scn->athdiag_procfs_inited = false;
2260 		}
2261 		sc->hif_pci_deinit(sc);
2262 		scn->mem = NULL;
2263 	}
2264 	hif_info("X");
2265 }
2266 
2267 #define OL_ATH_PCI_PM_CONTROL 0x44
2268 
2269 #ifdef CONFIG_PLD_PCIE_CNSS
2270 /**
2271  * hif_pci_prevent_linkdown(): allow or permit linkdown
2272  * @scn: hif context
2273  * @flag: true prevents linkdown, false allows
2274  *
2275  * Calls into the platform driver to vote against taking down the
2276  * pcie link.
2277  *
2278  * Return: n/a
2279  */
2280 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2281 {
2282 	int errno;
2283 
2284 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2285 	hif_runtime_prevent_linkdown(scn, flag);
2286 
2287 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2288 	if (errno)
2289 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
2290 }
2291 #else
2292 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2293 {
2294 }
2295 #endif
2296 
2297 #ifdef CONFIG_PCI_LOW_POWER_INT_REG
2298 /**
2299  * hif_pci_config_low_power_int_register() - configure pci low power
2300  *                                           interrupt register.
2301  * @scn: hif context
2302  * @enable: true to enable the bits, false clear.
2303  *
2304  * Configure the bits INTR_L1SS and INTR_CLKPM of
2305  * PCIE_LOW_POWER_INT_MASK register.
2306  *
2307  * Return: n/a
2308  */
2309 static void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2310 						  bool enable)
2311 {
2312 	void *address;
2313 	uint32_t value;
2314 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2315 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2316 	uint32_t target_type = tgt_info->target_type;
2317 
2318 	/*
2319 	 * Only configure the bits INTR_L1SS and INTR_CLKPM of
2320 	 * PCIE_LOW_POWER_INT_MASK register for QCA6174 for high
2321 	 * consumption issue. NFA344A power consumption is above 80mA
2322 	 * after entering Modern Standby. But the power will drop to normal
2323 	 * after PERST# de-assert.
2324 	 */
2325 	if ((target_type == TARGET_TYPE_AR6320) ||
2326 	    (target_type == TARGET_TYPE_AR6320V1) ||
2327 	    (target_type == TARGET_TYPE_AR6320V2) ||
2328 	    (target_type == TARGET_TYPE_AR6320V3)) {
2329 		hif_info("Configure PCI low power int mask register");
2330 
2331 		address = scn->mem + PCIE_LOW_POWER_INT_MASK_OFFSET;
2332 
2333 		/* Configure bit3 INTR_L1SS */
2334 		value = hif_read32_mb(scn, address);
2335 		if (enable)
2336 			value |= INTR_L1SS;
2337 		else
2338 			value &= ~INTR_L1SS;
2339 		hif_write32_mb(scn, address, value);
2340 
2341 		/* Configure bit4 INTR_CLKPM */
2342 		value = hif_read32_mb(scn, address);
2343 		if (enable)
2344 			value |= INTR_CLKPM;
2345 		else
2346 			value &= ~INTR_CLKPM;
2347 		hif_write32_mb(scn, address, value);
2348 	}
2349 }
2350 #else
2351 static inline void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2352 							 bool enable)
2353 {
2354 }
2355 #endif
2356 
2357 /**
2358  * hif_pci_bus_suspend(): prepare hif for suspend
2359  * @scn: hif context
2360  *
2361  * Return: Errno
2362  */
2363 int hif_pci_bus_suspend(struct hif_softc *scn)
2364 {
2365 	QDF_STATUS ret;
2366 
2367 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2368 
2369 	ret = hif_try_complete_tasks(scn);
2370 	if (QDF_IS_STATUS_ERROR(ret)) {
2371 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2372 		return -EBUSY;
2373 	}
2374 
2375 	/*
2376 	 * In an unlikely case, if draining becomes infinite loop,
2377 	 * it returns an error, shall abort the bus suspend.
2378 	 */
2379 	ret = hif_drain_fw_diag_ce(scn);
2380 	if (ret) {
2381 		hif_err("draining fw_diag_ce goes infinite, so abort suspend");
2382 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2383 		return -EBUSY;
2384 	}
2385 
2386 	/* Stop the HIF Sleep Timer */
2387 	hif_cancel_deferred_target_sleep(scn);
2388 
2389 	/*
2390 	 * Only need clear the bits INTR_L1SS/INTR_CLKPM after suspend.
2391 	 * No need do enable bits after resume, as firmware will restore
2392 	 * the bits after resume.
2393 	 */
2394 	hif_pci_config_low_power_int_register(scn, false);
2395 
2396 	scn->bus_suspended = true;
2397 
2398 	return 0;
2399 }
2400 
2401 #ifdef PCI_LINK_STATUS_SANITY
2402 /**
2403  * __hif_check_link_status() - API to check if PCIe link is active/not
2404  * @scn: HIF Context
2405  *
2406  * API reads the PCIe config space to verify if PCIe link training is
2407  * successful or not.
2408  *
2409  * Return: Success/Failure
2410  */
2411 static int __hif_check_link_status(struct hif_softc *scn)
2412 {
2413 	uint16_t dev_id = 0;
2414 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2415 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2416 
2417 	if (!sc) {
2418 		hif_err("HIF Bus Context is Invalid");
2419 		return -EINVAL;
2420 	}
2421 
2422 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2423 
2424 	if (dev_id == sc->devid)
2425 		return 0;
2426 
2427 	hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2428 	       dev_id);
2429 
2430 	scn->recovery = true;
2431 
2432 	if (cbk && cbk->set_recovery_in_progress)
2433 		cbk->set_recovery_in_progress(cbk->context, true);
2434 	else
2435 		hif_err("Driver Global Recovery is not set");
2436 
2437 	pld_is_pci_link_down(sc->dev);
2438 	return -EACCES;
2439 }
2440 #else
2441 static inline int __hif_check_link_status(struct hif_softc *scn)
2442 {
2443 	return 0;
2444 }
2445 #endif
2446 
2447 
2448 #ifdef HIF_BUS_LOG_INFO
2449 bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
2450 		       unsigned int *offset)
2451 {
2452 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2453 	struct hang_event_bus_info info = {0};
2454 	size_t size;
2455 
2456 	if (!sc) {
2457 		hif_err("HIF Bus Context is Invalid");
2458 		return false;
2459 	}
2460 
2461 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
2462 
2463 	size = sizeof(info);
2464 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
2465 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
2466 
2467 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
2468 		return false;
2469 
2470 	qdf_mem_copy(data + *offset, &info, size);
2471 	*offset = *offset + size;
2472 
2473 	if (info.dev_id == sc->devid)
2474 		return false;
2475 
2476 	qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
2477 	qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
2478 			     (QDF_WLAN_HANG_FW_OFFSET - size));
2479 	return true;
2480 }
2481 #endif
2482 
2483 /**
2484  * hif_pci_bus_resume(): prepare hif for resume
2485  * @scn: hif context
2486  *
2487  * Return: Errno
2488  */
2489 int hif_pci_bus_resume(struct hif_softc *scn)
2490 {
2491 	int errno;
2492 
2493 	scn->bus_suspended = false;
2494 
2495 	errno = __hif_check_link_status(scn);
2496 	if (errno)
2497 		return errno;
2498 
2499 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2500 
2501 	return 0;
2502 }
2503 
2504 /**
2505  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2506  * @scn: hif context
2507  *
2508  * Ensure that if we received the wakeup message before the irq
2509  * was disabled that the message is processed before suspending.
2510  *
2511  * Return: -EBUSY if we fail to flush the tasklets.
2512  */
2513 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2514 {
2515 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2516 		qdf_atomic_set(&scn->link_suspended, 1);
2517 
2518 	return 0;
2519 }
2520 
2521 /**
2522  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2523  * @scn: hif context
2524  *
2525  * Ensure that if we received the wakeup message before the irq
2526  * was disabled that the message is processed before suspending.
2527  *
2528  * Return: -EBUSY if we fail to flush the tasklets.
2529  */
2530 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2531 {
2532 	/* a vote for link up can come in the middle of the ongoing resume
2533 	 * process. hence, clear the link suspend flag once
2534 	 * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
2535 	 * by this time
2536 	 */
2537 	qdf_atomic_set(&scn->link_suspended, 0);
2538 
2539 	return 0;
2540 }
2541 
2542 #if CONFIG_PCIE_64BIT_MSI
2543 static void hif_free_msi_ctx(struct hif_softc *scn)
2544 {
2545 	struct hif_pci_softc *sc = scn->hif_sc;
2546 	struct hif_msi_info *info = &sc->msi_info;
2547 	struct device *dev = scn->qdf_dev->dev;
2548 
2549 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2550 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2551 	info->magic = NULL;
2552 	info->magic_dma = 0;
2553 }
2554 #else
2555 static void hif_free_msi_ctx(struct hif_softc *scn)
2556 {
2557 }
2558 #endif
2559 
2560 void hif_pci_disable_isr(struct hif_softc *scn)
2561 {
2562 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2563 
2564 	hif_exec_kill(&scn->osc);
2565 	hif_nointrs(scn);
2566 	hif_free_msi_ctx(scn);
2567 	/* Cancel the pending tasklet */
2568 	ce_tasklet_kill(scn);
2569 	tasklet_kill(&sc->intr_tq);
2570 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2571 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2572 }
2573 
2574 /* Function to reset SoC */
2575 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2576 {
2577 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2578 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2579 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2580 
2581 #if defined(CPU_WARM_RESET_WAR)
2582 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2583 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2584 	 * verified for AR9888_REV1
2585 	 */
2586 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2587 		hif_pci_device_warm_reset(sc);
2588 	else
2589 		hif_pci_device_reset(sc);
2590 #else
2591 	hif_pci_device_reset(sc);
2592 #endif
2593 }
2594 
2595 /**
2596  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2597  * @sc: HIF PCIe Context
2598  *
2599  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2600  *
2601  * Return: Failure to caller
2602  */
2603 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2604 {
2605 	uint16_t val = 0;
2606 	uint32_t bar = 0;
2607 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2608 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2609 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2610 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2611 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2612 	A_target_id_t pci_addr = scn->mem;
2613 
2614 	hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
2615 
2616 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2617 
2618 	hif_info("PCI Vendor ID = 0x%04x", val);
2619 
2620 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2621 
2622 	hif_info("PCI Device ID = 0x%04x", val);
2623 
2624 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
2625 
2626 	hif_info("PCI Command = 0x%04x", val);
2627 
2628 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
2629 
2630 	hif_info("PCI Status = 0x%04x", val);
2631 
2632 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2633 
2634 	hif_info("PCI BAR 0 = 0x%08x", bar);
2635 
2636 	hif_info("SOC_WAKE_ADDR 0%08x",
2637 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2638 				PCIE_SOC_WAKE_ADDRESS));
2639 
2640 	hif_info("RTC_STATE_ADDR 0x%08x",
2641 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2642 							RTC_STATE_ADDRESS));
2643 
2644 	hif_info("wakeup target");
2645 
2646 	if (!cfg->enable_self_recovery)
2647 		QDF_BUG(0);
2648 
2649 	scn->recovery = true;
2650 
2651 	if (cbk->set_recovery_in_progress)
2652 		cbk->set_recovery_in_progress(cbk->context, true);
2653 
2654 	pld_is_pci_link_down(sc->dev);
2655 	return -EACCES;
2656 }
2657 
2658 /*
2659  * For now, we use simple on-demand sleep/wake.
2660  * Some possible improvements:
2661  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2662  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2663  *   Careful, though, these functions may be used by
2664  *  interrupt handlers ("atomic")
2665  *  -Don't use host_reg_table for this code; instead use values directly
2666  *  -Use a separate timer to track activity and allow Target to sleep only
2667  *   if it hasn't done anything for a while; may even want to delay some
2668  *   processing for a short while in order to "batch" (e.g.) transmit
2669  *   requests with completion processing into "windows of up time".  Costs
2670  *   some performance, but improves power utilization.
2671  *  -On some platforms, it might be possible to eliminate explicit
2672  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
2673  *   recover from the failure by forcing the Target awake.
2674  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
2675  *   overhead in some cases. Perhaps this makes more sense when
2676  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2677  *   disabled.
2678  *  -It is possible to compile this code out and simply force the Target
2679  *   to remain awake.  That would yield optimal performance at the cost of
2680  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2681  *
2682  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2683  */
2684 
2685 /**
2686  * hif_pci_target_sleep_state_adjust() - on-demand sleep/wake
2687  * @scn: hif_softc pointer.
2688  * @sleep_ok: bool
2689  * @wait_for_it: bool
2690  *
2691  * Output the pipe error counts of each pipe to log file
2692  *
2693  * Return: int
2694  */
2695 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
2696 			      bool sleep_ok, bool wait_for_it)
2697 {
2698 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2699 	A_target_id_t pci_addr = scn->mem;
2700 	static int max_delay;
2701 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2702 	static int debug;
2703 	if (scn->recovery)
2704 		return -EACCES;
2705 
2706 	if (qdf_atomic_read(&scn->link_suspended)) {
2707 		hif_err("Invalid access, PCIe link is down");
2708 		debug = true;
2709 		QDF_ASSERT(0);
2710 		return -EACCES;
2711 	}
2712 
2713 	if (debug) {
2714 		wait_for_it = true;
2715 		hif_err("Invalid access, PCIe link is suspended");
2716 		QDF_ASSERT(0);
2717 	}
2718 
2719 	if (sleep_ok) {
2720 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2721 		hif_state->keep_awake_count--;
2722 		if (hif_state->keep_awake_count == 0) {
2723 			/* Allow sleep */
2724 			hif_state->verified_awake = false;
2725 			hif_state->sleep_ticks = qdf_system_ticks();
2726 		}
2727 		if (hif_state->fake_sleep == false) {
2728 			/* Set the Fake Sleep */
2729 			hif_state->fake_sleep = true;
2730 
2731 			/* Start the Sleep Timer */
2732 			qdf_timer_stop(&hif_state->sleep_timer);
2733 			qdf_timer_start(&hif_state->sleep_timer,
2734 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2735 		}
2736 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2737 	} else {
2738 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2739 
2740 		if (hif_state->fake_sleep) {
2741 			hif_state->verified_awake = true;
2742 		} else {
2743 			if (hif_state->keep_awake_count == 0) {
2744 				/* Force AWAKE */
2745 				hif_write32_mb(sc, pci_addr +
2746 					      PCIE_LOCAL_BASE_ADDRESS +
2747 					      PCIE_SOC_WAKE_ADDRESS,
2748 					      PCIE_SOC_WAKE_V_MASK);
2749 			}
2750 		}
2751 		hif_state->keep_awake_count++;
2752 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2753 
2754 		if (wait_for_it && !hif_state->verified_awake) {
2755 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
2756 			int tot_delay = 0;
2757 			int curr_delay = 5;
2758 
2759 			for (;; ) {
2760 				if (hif_targ_is_awake(scn, pci_addr)) {
2761 					hif_state->verified_awake = true;
2762 					break;
2763 				}
2764 				if (!hif_pci_targ_is_present(scn, pci_addr))
2765 					break;
2766 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2767 					return hif_log_soc_wakeup_timeout(sc);
2768 
2769 				OS_DELAY(curr_delay);
2770 				tot_delay += curr_delay;
2771 
2772 				if (curr_delay < 50)
2773 					curr_delay += 5;
2774 			}
2775 
2776 			/*
2777 			 * NB: If Target has to come out of Deep Sleep,
2778 			 * this may take a few Msecs. Typically, though
2779 			 * this delay should be <30us.
2780 			 */
2781 			if (tot_delay > max_delay)
2782 				max_delay = tot_delay;
2783 		}
2784 	}
2785 
2786 	if (debug && hif_state->verified_awake) {
2787 		debug = 0;
2788 		hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2789 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2790 				PCIE_INTR_ENABLE_ADDRESS),
2791 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2792 				PCIE_INTR_CAUSE_ADDRESS),
2793 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2794 				CPU_INTR_ADDRESS),
2795 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2796 				PCIE_INTR_CLR_ADDRESS),
2797 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
2798 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2805 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
2806 {
2807 	uint32_t value;
2808 	void *addr;
2809 
2810 	addr = scn->mem + offset;
2811 	value = hif_read32_mb(scn, addr);
2812 
2813 	{
2814 		unsigned long irq_flags;
2815 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2816 
2817 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2818 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2819 		pcie_access_log[idx].is_write = false;
2820 		pcie_access_log[idx].addr = addr;
2821 		pcie_access_log[idx].value = value;
2822 		pcie_access_log_seqnum++;
2823 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2824 	}
2825 
2826 	return value;
2827 }
2828 
2829 void
2830 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
2831 {
2832 	void *addr;
2833 
2834 	addr = scn->mem + (offset);
2835 	hif_write32_mb(scn, addr, value);
2836 
2837 	{
2838 		unsigned long irq_flags;
2839 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2840 
2841 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2842 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2843 		pcie_access_log[idx].is_write = true;
2844 		pcie_access_log[idx].addr = addr;
2845 		pcie_access_log[idx].value = value;
2846 		pcie_access_log_seqnum++;
2847 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2848 	}
2849 }
2850 
2851 /**
2852  * hif_target_dump_access_log() - dump access log
2853  *
2854  * dump access log
2855  *
2856  * Return: n/a
2857  */
2858 void hif_target_dump_access_log(void)
2859 {
2860 	int idx, len, start_idx, cur_idx;
2861 	unsigned long irq_flags;
2862 
2863 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2864 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2865 		len = PCIE_ACCESS_LOG_NUM;
2866 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2867 	} else {
2868 		len = pcie_access_log_seqnum;
2869 		start_idx = 0;
2870 	}
2871 
2872 	for (idx = 0; idx < len; idx++) {
2873 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2874 		hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
2875 		       idx,
2876 		       pcie_access_log[cur_idx].seqnum,
2877 		       pcie_access_log[cur_idx].is_write,
2878 		       pcie_access_log[cur_idx].addr,
2879 		       pcie_access_log[cur_idx].value);
2880 	}
2881 
2882 	pcie_access_log_seqnum = 0;
2883 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2884 }
2885 #endif
2886 
2887 #ifndef HIF_AHB
2888 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
2889 {
2890 	QDF_BUG(0);
2891 	return -EINVAL;
2892 }
2893 #endif
2894 
2895 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
2896 {
2897 	struct ce_tasklet_entry *tasklet_entry = context;
2898 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
2899 }
2900 extern const char *ce_name[];
2901 
2902 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
2903 {
2904 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
2905 
2906 	return pci_scn->ce_irq_num[ce_id];
2907 }
2908 
2909 /* hif_srng_msi_irq_disable() - disable the irq for msi
2910  * @hif_sc: hif context
2911  * @ce_id: which ce to disable copy complete interrupts for
2912  *
2913  * since MSI interrupts are not level based, the system can function
2914  * without disabling these interrupts.  Interrupt mitigation can be
2915  * added here for better system performance.
2916  */
2917 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2918 {
2919 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
2920 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2921 }
2922 
2923 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2924 {
2925 	if (__hif_check_link_status(hif_sc))
2926 		return;
2927 
2928 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
2929 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2930 }
2931 
2932 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2933 {
2934 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2935 }
2936 
2937 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2938 {
2939 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2940 }
2941 
2942 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
2943 /**
2944  * hif_ce_configure_legacyirq() - Configure CE interrupts
2945  * @scn: hif_softc pointer
2946  *
2947  * Configure CE legacy interrupts
2948  *
2949  * Return: int
2950  */
2951 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
2952 {
2953 	int ret = 0;
2954 	int irq, ce_id;
2955 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2956 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2957 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
2958 	int pci_slot;
2959 	qdf_device_t qdf_dev = scn->qdf_dev;
2960 
2961 	if (!pld_get_enable_intx(scn->qdf_dev->dev))
2962 		return -EINVAL;
2963 
2964 	scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
2965 	scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
2966 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
2967 
2968 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2969 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2970 			continue;
2971 
2972 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
2973 			continue;
2974 
2975 		ret = pfrm_get_irq(scn->qdf_dev->dev,
2976 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
2977 				   legacy_ic_irqname[ce_id], ce_id, &irq);
2978 		if (ret) {
2979 			dev_err(scn->qdf_dev->dev, "get irq failed\n");
2980 			ret = -EFAULT;
2981 			goto skip;
2982 		}
2983 
2984 		pci_slot = hif_get_pci_slot(scn);
2985 		qdf_scnprintf(ce_irqname[pci_slot][ce_id],
2986 			      DP_IRQ_NAME_LEN, "pci%d_ce_%u", pci_slot, ce_id);
2987 		pci_sc->ce_irq_num[ce_id] = irq;
2988 
2989 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
2990 				       hif_ce_interrupt_handler,
2991 				       IRQF_SHARED,
2992 				       ce_irqname[pci_slot][ce_id],
2993 				       &ce_sc->tasklets[ce_id]);
2994 		if (ret) {
2995 			hif_err("error = %d", ret);
2996 			return -EINVAL;
2997 		}
2998 	}
2999 
3000 skip:
3001 	return ret;
3002 }
3003 #else
3004 /**
3005  * hif_ce_configure_legacyirq() - Configure CE interrupts
3006  * @scn: hif_softc pointer
3007  *
3008  * Configure CE legacy interrupts
3009  *
3010  * Return: int
3011  */
3012 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
3013 {
3014 	return 0;
3015 }
3016 #endif
3017 
3018 int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
3019 {
3020 	int ret = 0;
3021 	int irq;
3022 	uint32_t msi_data_start;
3023 	uint32_t msi_data_count;
3024 	unsigned int msi_data;
3025 	int irq_id;
3026 	uint32_t msi_irq_start;
3027 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3028 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3029 	int pci_slot;
3030 	unsigned long irq_flags;
3031 
3032 	if (ce_id >= CE_COUNT_MAX)
3033 		return -EINVAL;
3034 
3035 	/* do ce irq assignments */
3036 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3037 					  &msi_data_count, &msi_data_start,
3038 					  &msi_irq_start);
3039 
3040 	if (ret) {
3041 		hif_err("Failed to get CE msi config");
3042 		return -EINVAL;
3043 	}
3044 
3045 	irq_id = scn->int_assignment->msi_idx[ce_id];
3046 	/* needs to match the ce_id -> irq data mapping
3047 	 * used in the srng parameter configuration
3048 	 */
3049 	pci_slot = hif_get_pci_slot(scn);
3050 	msi_data = irq_id + msi_irq_start;
3051 	irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3052 	if (pld_is_one_msi(scn->qdf_dev->dev))
3053 		irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
3054 	else
3055 		irq_flags = IRQF_SHARED;
3056 	hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d flag 0x%lx tasklet %pK)",
3057 		  __func__, ce_id, irq_id, msi_data, irq, irq_flags,
3058 		  &ce_sc->tasklets[ce_id]);
3059 
3060 	/* implies the ce is also initialized */
3061 	if (!ce_sc->tasklets[ce_id].inited)
3062 		goto skip;
3063 
3064 	pci_sc->ce_irq_num[ce_id] = irq;
3065 
3066 	qdf_scnprintf(ce_irqname[pci_slot][ce_id],
3067 		      DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u",
3068 		      pci_slot, ce_id);
3069 
3070 	ret = pfrm_request_irq(scn->qdf_dev->dev,
3071 			       irq, hif_ce_interrupt_handler, irq_flags,
3072 			       ce_irqname[pci_slot][ce_id],
3073 			       &ce_sc->tasklets[ce_id]);
3074 	if (ret)
3075 		return -EINVAL;
3076 
3077 skip:
3078 	return ret;
3079 }
3080 
3081 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3082 {
3083 	int ret;
3084 	int ce_id, irq;
3085 	uint32_t msi_data_start;
3086 	uint32_t msi_data_count;
3087 	uint32_t msi_irq_start;
3088 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3089 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
3090 
3091 	if (!scn->ini_cfg.disable_wake_irq) {
3092 		/* do wake irq assignment */
3093 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3094 						  &msi_data_count,
3095 						  &msi_data_start,
3096 						  &msi_irq_start);
3097 		if (ret)
3098 			return ret;
3099 
3100 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
3101 						msi_irq_start);
3102 		scn->wake_irq_type = HIF_PM_MSI_WAKE;
3103 
3104 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
3105 				       hif_wake_interrupt_handler,
3106 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
3107 
3108 		if (ret)
3109 			return ret;
3110 	}
3111 
3112 	/* do ce irq assignments */
3113 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3114 					  &msi_data_count, &msi_data_start,
3115 					  &msi_irq_start);
3116 	if (ret)
3117 		goto free_wake_irq;
3118 
3119 	if (ce_srng_based(scn)) {
3120 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3121 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3122 	} else {
3123 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3124 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3125 	}
3126 
3127 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3128 
3129 	/* needs to match the ce_id -> irq data mapping
3130 	 * used in the srng parameter configuration
3131 	 */
3132 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3133 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3134 			continue;
3135 
3136 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
3137 			continue;
3138 
3139 		ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
3140 		if (ret)
3141 			goto free_irq;
3142 	}
3143 
3144 	return ret;
3145 
3146 free_irq:
3147 	/* the request_irq for the last ce_id failed so skip it. */
3148 	while (ce_id > 0 && ce_id < scn->ce_count) {
3149 		unsigned int msi_data;
3150 
3151 		ce_id--;
3152 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
3153 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3154 		pfrm_free_irq(scn->qdf_dev->dev,
3155 			      irq, &ce_sc->tasklets[ce_id]);
3156 	}
3157 
3158 free_wake_irq:
3159 	if (!scn->ini_cfg.disable_wake_irq) {
3160 		pfrm_free_irq(scn->qdf_dev->dev,
3161 			      scn->wake_irq, scn->qdf_dev->dev);
3162 		scn->wake_irq = 0;
3163 		scn->wake_irq_type = HIF_PM_INVALID_WAKE;
3164 	}
3165 
3166 	return ret;
3167 }
3168 
3169 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3170 {
3171 	int i;
3172 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3173 
3174 	for (i = 0; i < hif_ext_group->numirq; i++)
3175 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
3176 					hif_ext_group->os_irq[i]);
3177 }
3178 
3179 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3180 {
3181 	int i;
3182 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3183 
3184 	for (i = 0; i < hif_ext_group->numirq; i++)
3185 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
3186 }
3187 
3188 /**
3189  * hif_pci_get_irq_name() - get irqname
3190  * This function gives irqnumber to irqname
3191  * mapping.
3192  *
3193  * @irq_no: irq number
3194  *
3195  * Return: irq name
3196  */
3197 const char *hif_pci_get_irq_name(int irq_no)
3198 {
3199 	return "pci-dummy";
3200 }
3201 
3202 #if defined(FEATURE_IRQ_AFFINITY) || defined(HIF_CPU_PERF_AFFINE_MASK)
3203 void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
3204 				   bool perf)
3205 {
3206 	int i, ret;
3207 	unsigned int cpus;
3208 	bool mask_set = false;
3209 	int cpu_cluster = perf ? CPU_CLUSTER_TYPE_PERF :
3210 						CPU_CLUSTER_TYPE_LITTLE;
3211 
3212 	for (i = 0; i < hif_ext_group->numirq; i++)
3213 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
3214 
3215 	for (i = 0; i < hif_ext_group->numirq; i++) {
3216 		qdf_for_each_online_cpu(cpus) {
3217 			if (qdf_topology_physical_package_id(cpus) ==
3218 			    cpu_cluster) {
3219 				qdf_cpumask_set_cpu(cpus,
3220 						    &hif_ext_group->
3221 						    new_cpu_mask[i]);
3222 				mask_set = true;
3223 			}
3224 		}
3225 	}
3226 	for (i = 0; i < hif_ext_group->numirq; i++) {
3227 		if (mask_set) {
3228 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3229 						  IRQ_NO_BALANCING, 0);
3230 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3231 						       (struct qdf_cpu_mask *)
3232 						       &hif_ext_group->
3233 						       new_cpu_mask[i]);
3234 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3235 						  0, IRQ_NO_BALANCING);
3236 			if (ret)
3237 				qdf_debug("Set affinity %*pbl fails for IRQ %d ",
3238 					  qdf_cpumask_pr_args(&hif_ext_group->
3239 							      new_cpu_mask[i]),
3240 					  hif_ext_group->os_irq[i]);
3241 		} else {
3242 			qdf_debug("Offline CPU: Set affinity fails for IRQ: %d",
3243 				  hif_ext_group->os_irq[i]);
3244 		}
3245 	}
3246 }
3247 #endif
3248 
3249 #ifdef HIF_CPU_PERF_AFFINE_MASK
3250 void hif_pci_ce_irq_set_affinity_hint(
3251 	struct hif_softc *scn)
3252 {
3253 	int ret;
3254 	unsigned int cpus;
3255 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3256 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3257 	struct CE_attr *host_ce_conf;
3258 	int ce_id;
3259 	qdf_cpu_mask ce_cpu_mask;
3260 
3261 	host_ce_conf = ce_sc->host_ce_config;
3262 	qdf_cpumask_clear(&ce_cpu_mask);
3263 
3264 	qdf_for_each_online_cpu(cpus) {
3265 		if (qdf_topology_physical_package_id(cpus) ==
3266 			CPU_CLUSTER_TYPE_PERF) {
3267 			qdf_cpumask_set_cpu(cpus,
3268 					    &ce_cpu_mask);
3269 		} else {
3270 			hif_err_rl("Unable to set cpu mask for offline CPU %d"
3271 				   , cpus);
3272 		}
3273 	}
3274 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
3275 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
3276 		return;
3277 	}
3278 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3279 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3280 			continue;
3281 		qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
3282 		qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
3283 				 &ce_cpu_mask);
3284 		qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
3285 					  IRQ_NO_BALANCING, 0);
3286 		ret = qdf_dev_set_irq_affinity(
3287 			pci_sc->ce_irq_num[ce_id],
3288 			(struct qdf_cpu_mask *)&pci_sc->ce_irq_cpu_mask[ce_id]);
3289 		qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
3290 					  0, IRQ_NO_BALANCING);
3291 		if (ret)
3292 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
3293 				   qdf_cpumask_pr_args(
3294 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3295 				   pci_sc->ce_irq_num[ce_id]);
3296 		else
3297 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
3298 				     qdf_cpumask_pr_args(
3299 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3300 				     pci_sc->ce_irq_num[ce_id]);
3301 	}
3302 }
3303 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3304 
3305 #ifdef HIF_CPU_CLEAR_AFFINITY
3306 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
3307 					   int intr_ctxt_id, int cpu)
3308 {
3309 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3310 	struct hif_exec_context *hif_ext_group;
3311 	int i, ret;
3312 
3313 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
3314 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
3315 
3316 		for (i = 0; i < hif_ext_group->numirq; i++) {
3317 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
3318 			qdf_cpumask_clear_cpu(cpu,
3319 					      &hif_ext_group->new_cpu_mask[i]);
3320 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3321 						  IRQ_NO_BALANCING, 0);
3322 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3323 						       (struct qdf_cpu_mask *)
3324 						       &hif_ext_group->
3325 						       new_cpu_mask[i]);
3326 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3327 						  0, IRQ_NO_BALANCING);
3328 			if (ret)
3329 				hif_err("Set affinity %*pbl fails for IRQ %d ",
3330 					qdf_cpumask_pr_args(&hif_ext_group->
3331 							    new_cpu_mask[i]),
3332 					hif_ext_group->os_irq[i]);
3333 			else
3334 				hif_debug("Set affinity %*pbl for IRQ: %d",
3335 					  qdf_cpumask_pr_args(&hif_ext_group->
3336 							      new_cpu_mask[i]),
3337 					  hif_ext_group->os_irq[i]);
3338 		}
3339 	}
3340 }
3341 #endif
3342 
3343 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3344 {
3345 	int i;
3346 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3347 	struct hif_exec_context *hif_ext_group;
3348 
3349 	hif_core_ctl_set_boost(true);
3350 	/* Set IRQ affinity for WLAN DP interrupts*/
3351 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3352 		hif_ext_group = hif_state->hif_ext_group[i];
3353 		hif_pci_irq_set_affinity_hint(hif_ext_group, true);
3354 	}
3355 	/* Set IRQ affinity for CE interrupts*/
3356 	hif_pci_ce_irq_set_affinity_hint(scn);
3357 }
3358 
3359 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3360 /**
3361  * hif_grp_configure_legacyirq() - Configure DP interrupts
3362  * @scn: hif_softc pointer
3363  * @hif_ext_group: hif extended group pointer
3364  *
3365  * Configure DP legacy interrupts
3366  *
3367  * Return: int
3368  */
3369 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3370 				       struct hif_exec_context *hif_ext_group)
3371 {
3372 	int ret = 0;
3373 	int irq = 0;
3374 	int j;
3375 	int pci_slot;
3376 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3377 	struct pci_dev *pdev = sc->pdev;
3378 	qdf_device_t qdf_dev = scn->qdf_dev;
3379 
3380 	for (j = 0; j < hif_ext_group->numirq; j++) {
3381 		ret = pfrm_get_irq(&pdev->dev,
3382 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
3383 				   legacy_ic_irqname[hif_ext_group->irq[j]],
3384 				   hif_ext_group->irq[j], &irq);
3385 		if (ret) {
3386 			dev_err(&pdev->dev, "get irq failed\n");
3387 			return -EFAULT;
3388 		}
3389 		hif_ext_group->os_irq[j] = irq;
3390 	}
3391 
3392 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3393 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3394 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3395 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3396 
3397 	pci_slot = hif_get_pci_slot(scn);
3398 	for (j = 0; j < hif_ext_group->numirq; j++) {
3399 		irq = hif_ext_group->os_irq[j];
3400 		if (scn->irq_unlazy_disable)
3401 			qdf_dev_set_irq_status_flags(irq,
3402 						     QDF_IRQ_DISABLE_UNLAZY);
3403 
3404 		hif_debug("request_irq = %d for grp %d",
3405 			  irq, hif_ext_group->grp_id);
3406 
3407 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
3408 				       hif_ext_group_interrupt_handler,
3409 				       IRQF_SHARED | IRQF_NO_SUSPEND,
3410 				       legacy_ic_irqname[hif_ext_group->irq[j]],
3411 				       hif_ext_group);
3412 		if (ret) {
3413 			hif_err("request_irq failed ret = %d", ret);
3414 			return -EFAULT;
3415 		}
3416 		hif_ext_group->os_irq[j] = irq;
3417 	}
3418 	hif_ext_group->irq_requested = true;
3419 	return 0;
3420 }
3421 #else
3422 /**
3423  * hif_grp_configure_legacyirq() - Configure DP interrupts
3424  * @scn: hif_softc pointer
3425  * @hif_ext_group: hif extended group pointer
3426  *
3427  * Configure DP legacy interrupts
3428  *
3429  * Return: int
3430  */
3431 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3432 				       struct hif_exec_context *hif_ext_group)
3433 {
3434 	return 0;
3435 }
3436 #endif
3437 
3438 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3439 			      struct hif_exec_context *hif_ext_group)
3440 {
3441 	int ret = 0;
3442 	int irq = 0;
3443 	int j;
3444 	int pci_slot;
3445 	unsigned long irq_flags;
3446 
3447 	if (pld_get_enable_intx(scn->qdf_dev->dev))
3448 		return hif_grp_configure_legacyirq(scn, hif_ext_group);
3449 
3450 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3451 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3452 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3453 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3454 
3455 	pci_slot = hif_get_pci_slot(scn);
3456 	for (j = 0; j < hif_ext_group->numirq; j++) {
3457 		irq = hif_ext_group->irq[j];
3458 		if (scn->irq_unlazy_disable)
3459 			qdf_dev_set_irq_status_flags(irq,
3460 						     QDF_IRQ_DISABLE_UNLAZY);
3461 
3462 		if (pld_is_one_msi(scn->qdf_dev->dev))
3463 			irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
3464 		else
3465 			irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
3466 		hif_debug("request_irq = %d for grp %d irq_flags 0x%lx",
3467 			  irq, hif_ext_group->grp_id, irq_flags);
3468 
3469 		qdf_scnprintf(dp_irqname[pci_slot][hif_ext_group->grp_id],
3470 			      DP_IRQ_NAME_LEN, "pci%u_wlan_grp_dp_%u",
3471 			      pci_slot, hif_ext_group->grp_id);
3472 		ret = pfrm_request_irq(
3473 				scn->qdf_dev->dev, irq,
3474 				hif_ext_group_interrupt_handler,
3475 				irq_flags,
3476 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3477 				hif_ext_group);
3478 		if (ret) {
3479 			hif_err("request_irq failed ret = %d", ret);
3480 			return -EFAULT;
3481 		}
3482 		hif_ext_group->os_irq[j] = irq;
3483 	}
3484 	hif_ext_group->irq_requested = true;
3485 	return 0;
3486 }
3487 
3488 #ifdef FEATURE_IRQ_AFFINITY
3489 void hif_pci_set_grp_intr_affinity(struct hif_softc *scn,
3490 				   uint32_t grp_intr_bitmask, bool perf)
3491 {
3492 	int i;
3493 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3494 	struct hif_exec_context *hif_ext_group;
3495 
3496 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3497 		if (!(grp_intr_bitmask & BIT(i)))
3498 			continue;
3499 
3500 		hif_ext_group = hif_state->hif_ext_group[i];
3501 		hif_pci_irq_set_affinity_hint(hif_ext_group, perf);
3502 		qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3503 	}
3504 }
3505 #endif
3506 
3507 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
3508 	defined(QCA_WIFI_KIWI))
3509 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3510 			    uint32_t offset)
3511 {
3512 	return hal_read32_mb(hif_sc->hal_soc, offset);
3513 }
3514 
3515 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3516 			 uint32_t offset,
3517 			 uint32_t value)
3518 {
3519 	hal_write32_mb(hif_sc->hal_soc, offset, value);
3520 }
3521 #else
3522 /* TODO: Need to implement other chips carefully */
3523 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3524 			    uint32_t offset)
3525 {
3526 	return 0;
3527 }
3528 
3529 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3530 			 uint32_t offset,
3531 			 uint32_t value)
3532 {
3533 }
3534 #endif
3535 
3536 /**
3537  * hif_configure_irq() - configure interrupt
3538  * @scn: HIF context
3539  *
3540  * This function configures interrupt(s)
3541  *
3542  * Return: 0 - for success
3543  */
3544 int hif_configure_irq(struct hif_softc *scn)
3545 {
3546 	int ret = 0;
3547 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3548 
3549 	hif_info("E");
3550 
3551 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3552 		scn->request_irq_done = false;
3553 		return 0;
3554 	}
3555 
3556 	hif_init_reschedule_tasklet_work(sc);
3557 
3558 	ret = hif_ce_msi_configure_irq(scn);
3559 	if (ret == 0) {
3560 		goto end;
3561 	}
3562 
3563 	switch (scn->target_info.target_type) {
3564 	case TARGET_TYPE_QCA8074:
3565 	case TARGET_TYPE_QCA8074V2:
3566 	case TARGET_TYPE_QCA6018:
3567 	case TARGET_TYPE_QCA5018:
3568 	case TARGET_TYPE_QCA5332:
3569 	case TARGET_TYPE_QCA9574:
3570 	case TARGET_TYPE_QCN9160:
3571 		ret = hif_ahb_configure_irq(sc);
3572 		break;
3573 	case TARGET_TYPE_QCN9224:
3574 		ret = hif_ce_configure_legacyirq(scn);
3575 		break;
3576 	default:
3577 		ret = hif_pci_configure_legacy_irq(sc);
3578 		break;
3579 	}
3580 	if (ret < 0) {
3581 		hif_err("error = %d", ret);
3582 		return ret;
3583 	}
3584 end:
3585 	scn->request_irq_done = true;
3586 	return 0;
3587 }
3588 
3589 /**
3590  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3591  * @scn: hif control structure
3592  *
3593  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3594  * stuck at a polling loop in pcie_address_config in FW
3595  *
3596  * Return: none
3597  */
3598 static void hif_trigger_timer_irq(struct hif_softc *scn)
3599 {
3600 	int tmp;
3601 	/* Trigger IRQ on Peregrine/Swift by setting
3602 	 * IRQ Bit of LF_TIMER 0
3603 	 */
3604 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3605 						SOC_LF_TIMER_STATUS0_ADDRESS));
3606 	/* Set Raw IRQ Bit */
3607 	tmp |= 1;
3608 	/* SOC_LF_TIMER_STATUS0 */
3609 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3610 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3611 }
3612 
3613 /**
3614  * hif_target_sync() : ensure the target is ready
3615  * @scn: hif control structure
3616  *
3617  * Informs fw that we plan to use legacy interrupts so that
3618  * it can begin booting. Ensures that the fw finishes booting
3619  * before continuing. Should be called before trying to write
3620  * to the targets other registers for the first time.
3621  *
3622  * Return: none
3623  */
3624 static void hif_target_sync(struct hif_softc *scn)
3625 {
3626 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3627 			    PCIE_INTR_ENABLE_ADDRESS),
3628 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3629 	/* read to flush pcie write */
3630 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3631 			PCIE_INTR_ENABLE_ADDRESS));
3632 
3633 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3634 			PCIE_SOC_WAKE_ADDRESS,
3635 			PCIE_SOC_WAKE_V_MASK);
3636 	while (!hif_targ_is_awake(scn, scn->mem))
3637 		;
3638 
3639 	if (HAS_FW_INDICATOR) {
3640 		int wait_limit = 500;
3641 		int fw_ind = 0;
3642 		int retry_count = 0;
3643 		uint32_t target_type = scn->target_info.target_type;
3644 fw_retry:
3645 		hif_info("Loop checking FW signal");
3646 		while (1) {
3647 			fw_ind = hif_read32_mb(scn, scn->mem +
3648 					FW_INDICATOR_ADDRESS);
3649 			if (fw_ind & FW_IND_INITIALIZED)
3650 				break;
3651 			if (wait_limit-- < 0)
3652 				break;
3653 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3654 			    PCIE_INTR_ENABLE_ADDRESS),
3655 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3656 			    /* read to flush pcie write */
3657 			(void)hif_read32_mb(scn, scn->mem +
3658 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3659 
3660 			qdf_mdelay(10);
3661 		}
3662 		if (wait_limit < 0) {
3663 			if (target_type == TARGET_TYPE_AR9888 &&
3664 			    retry_count++ < 2) {
3665 				hif_trigger_timer_irq(scn);
3666 				wait_limit = 500;
3667 				goto fw_retry;
3668 			}
3669 			hif_info("FW signal timed out");
3670 			qdf_assert_always(0);
3671 		} else {
3672 			hif_info("Got FW signal, retries = %x", 500-wait_limit);
3673 		}
3674 	}
3675 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3676 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3677 }
3678 
3679 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3680 				     struct device *dev)
3681 {
3682 	struct pld_soc_info info;
3683 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3684 
3685 	pld_get_soc_info(dev, &info);
3686 	sc->mem = info.v_addr;
3687 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3688 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3689 	sc->device_version.family_number = info.device_version.family_number;
3690 	sc->device_version.device_number = info.device_version.device_number;
3691 	sc->device_version.major_version = info.device_version.major_version;
3692 	sc->device_version.minor_version = info.device_version.minor_version;
3693 
3694 	hif_info("%s: fam num %u dev ver %u maj ver %u min ver %u\n", __func__,
3695 		 sc->device_version.family_number,
3696 		 sc->device_version.device_number,
3697 		 sc->device_version.major_version,
3698 		 sc->device_version.minor_version);
3699 
3700 	/* dev_mem_info[0] is for CMEM */
3701 	scn->cmem_start = info.dev_mem_info[0].start;
3702 	scn->cmem_size = info.dev_mem_info[0].size;
3703 	scn->target_info.target_version = info.soc_id;
3704 	scn->target_info.target_revision = 0;
3705 	scn->target_info.soc_version = info.device_version.major_version;
3706 }
3707 
3708 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3709 				       struct device *dev)
3710 {}
3711 
3712 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3713 				    int device_id)
3714 {
3715 	if (!pld_have_platform_driver_support(sc->dev))
3716 		return false;
3717 
3718 	switch (device_id) {
3719 	case QCA6290_DEVICE_ID:
3720 	case QCN9000_DEVICE_ID:
3721 	case QCN9224_DEVICE_ID:
3722 	case QCA6290_EMULATION_DEVICE_ID:
3723 	case QCA6390_DEVICE_ID:
3724 	case QCA6490_DEVICE_ID:
3725 	case AR6320_DEVICE_ID:
3726 	case QCN7605_DEVICE_ID:
3727 	case KIWI_DEVICE_ID:
3728 	case MANGO_DEVICE_ID:
3729 	case PEACH_DEVICE_ID:
3730 		return true;
3731 	}
3732 	return false;
3733 }
3734 
3735 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3736 					   int device_id)
3737 {
3738 	if (hif_is_pld_based_target(sc, device_id)) {
3739 		sc->hif_enable_pci = hif_enable_pci_pld;
3740 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3741 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3742 	} else {
3743 		sc->hif_enable_pci = hif_enable_pci_nopld;
3744 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3745 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3746 	}
3747 }
3748 
3749 #ifdef HIF_REG_WINDOW_SUPPORT
3750 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3751 					       u32 target_type)
3752 {
3753 	switch (target_type) {
3754 	case TARGET_TYPE_QCN7605:
3755 	case TARGET_TYPE_QCA6490:
3756 	case TARGET_TYPE_QCA6390:
3757 	case TARGET_TYPE_KIWI:
3758 	case TARGET_TYPE_MANGO:
3759 	case TARGET_TYPE_PEACH:
3760 		sc->use_register_windowing = true;
3761 		qdf_spinlock_create(&sc->register_access_lock);
3762 		sc->register_window = 0;
3763 		break;
3764 	default:
3765 		sc->use_register_windowing = false;
3766 	}
3767 }
3768 #else
3769 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3770 					       u32 target_type)
3771 {
3772 	sc->use_register_windowing = false;
3773 }
3774 #endif
3775 
3776 /**
3777  * hif_pci_enable_bus(): enable bus
3778  * @ol_sc: soft_sc struct
3779  * @dev: device pointer
3780  * @bdev: bus dev pointer
3781  * @bid: bus id pointer
3782  * @type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3783  *
3784  * This function enables the bus
3785  *
3786  * Return: QDF_STATUS
3787  */
3788 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3789 			  struct device *dev, void *bdev,
3790 			  const struct hif_bus_id *bid,
3791 			  enum hif_enable_type type)
3792 {
3793 	int ret = 0;
3794 	uint32_t hif_type;
3795 	uint32_t target_type = TARGET_TYPE_UNKNOWN;
3796 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3797 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3798 	uint16_t revision_id = 0;
3799 	int probe_again = 0;
3800 	struct pci_dev *pdev = bdev;
3801 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3802 	struct hif_target_info *tgt_info;
3803 
3804 	if (!ol_sc) {
3805 		hif_err("hif_ctx is NULL");
3806 		return QDF_STATUS_E_NOMEM;
3807 	}
3808 	/* Following print is used by various tools to identify
3809 	 * WLAN SOC (e.g. crash dump analysis and reporting tool).
3810 	 */
3811 	hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
3812 		 hif_get_conparam(ol_sc), id->device);
3813 
3814 	sc->pdev = pdev;
3815 	sc->dev = &pdev->dev;
3816 	sc->devid = id->device;
3817 	sc->cacheline_sz = dma_get_cache_alignment();
3818 	tgt_info = hif_get_target_info_handle(hif_hdl);
3819 	hif_pci_init_deinit_ops_attach(sc, id->device);
3820 	sc->hif_pci_get_soc_info(sc, dev);
3821 again:
3822 	ret = sc->hif_enable_pci(sc, pdev, id);
3823 	if (ret < 0) {
3824 		hif_err("hif_enable_pci error = %d", ret);
3825 		goto err_enable_pci;
3826 	}
3827 	hif_info("hif_enable_pci done");
3828 
3829 	/* Temporary FIX: disable ASPM on peregrine.
3830 	 * Will be removed after the OTP is programmed
3831 	 */
3832 	hif_disable_power_gating(hif_hdl);
3833 
3834 	device_disable_async_suspend(&pdev->dev);
3835 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3836 
3837 	ret = hif_get_device_type(id->device, revision_id,
3838 						&hif_type, &target_type);
3839 	if (ret < 0) {
3840 		hif_err("Invalid device id/revision_id");
3841 		goto err_tgtstate;
3842 	}
3843 	hif_info("hif_type = 0x%x, target_type = 0x%x",
3844 		hif_type, target_type);
3845 
3846 	hif_register_tbl_attach(ol_sc, hif_type);
3847 	hif_target_register_tbl_attach(ol_sc, target_type);
3848 
3849 	hif_pci_init_reg_windowing_support(sc, target_type);
3850 
3851 	tgt_info->target_type = target_type;
3852 
3853 	/*
3854 	 * Disable unlzay interrupt registration for QCN9000
3855 	 */
3856 	if (target_type == TARGET_TYPE_QCN9000 ||
3857 	    target_type == TARGET_TYPE_QCN9224)
3858 		ol_sc->irq_unlazy_disable = 1;
3859 
3860 	if (ce_srng_based(ol_sc)) {
3861 		hif_info("Skip tgt_wake up for srng devices");
3862 	} else {
3863 		ret = hif_pci_probe_tgt_wakeup(sc);
3864 		if (ret < 0) {
3865 			hif_err("hif_pci_prob_wakeup error = %d", ret);
3866 			if (ret == -EAGAIN)
3867 				probe_again++;
3868 			goto err_tgtstate;
3869 		}
3870 		hif_info("hif_pci_probe_tgt_wakeup done");
3871 	}
3872 
3873 	if (!ol_sc->mem_pa) {
3874 		hif_err("BAR0 uninitialized");
3875 		ret = -EIO;
3876 		goto err_tgtstate;
3877 	}
3878 
3879 	if (!ce_srng_based(ol_sc)) {
3880 		hif_target_sync(ol_sc);
3881 
3882 		if (hif_pci_default_link_up(tgt_info))
3883 			hif_vote_link_up(hif_hdl);
3884 	}
3885 
3886 	return QDF_STATUS_SUCCESS;
3887 
3888 err_tgtstate:
3889 	hif_disable_pci(sc);
3890 	sc->pci_enabled = false;
3891 	hif_err("hif_disable_pci done");
3892 	return QDF_STATUS_E_ABORTED;
3893 
3894 err_enable_pci:
3895 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3896 		int delay_time;
3897 
3898 		hif_info("pci reprobe");
3899 		/* 10, 40, 90, 100, 100, ... */
3900 		delay_time = max(100, 10 * (probe_again * probe_again));
3901 		qdf_mdelay(delay_time);
3902 		goto again;
3903 	}
3904 	return qdf_status_from_os_return(ret);
3905 }
3906 
3907 /**
3908  * hif_pci_irq_enable() - ce_irq_enable
3909  * @scn: hif_softc
3910  * @ce_id: ce_id
3911  *
3912  * Return: void
3913  */
3914 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3915 {
3916 	uint32_t tmp = 1 << ce_id;
3917 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3918 
3919 	qdf_spin_lock_irqsave(&sc->irq_lock);
3920 	scn->ce_irq_summary &= ~tmp;
3921 	if (scn->ce_irq_summary == 0) {
3922 		/* Enable Legacy PCI line interrupts */
3923 		if (LEGACY_INTERRUPTS(sc) &&
3924 			(scn->target_status != TARGET_STATUS_RESET) &&
3925 			(!qdf_atomic_read(&scn->link_suspended))) {
3926 
3927 			hif_write32_mb(scn, scn->mem +
3928 				(SOC_CORE_BASE_ADDRESS |
3929 				PCIE_INTR_ENABLE_ADDRESS),
3930 				HOST_GROUP0_MASK);
3931 
3932 			hif_read32_mb(scn, scn->mem +
3933 					(SOC_CORE_BASE_ADDRESS |
3934 					PCIE_INTR_ENABLE_ADDRESS));
3935 		}
3936 	}
3937 	if (scn->hif_init_done == true)
3938 		Q_TARGET_ACCESS_END(scn);
3939 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3940 
3941 	/* check for missed firmware crash */
3942 	hif_fw_interrupt_handler(0, scn);
3943 }
3944 
3945 /**
3946  * hif_pci_irq_disable() - ce_irq_disable
3947  * @scn: hif_softc
3948  * @ce_id: ce_id
3949  *
3950  * only applicable to legacy copy engine...
3951  *
3952  * Return: void
3953  */
3954 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3955 {
3956 	/* For Rome only need to wake up target */
3957 	/* target access is maintained until interrupts are re-enabled */
3958 	Q_TARGET_ACCESS_BEGIN(scn);
3959 }
3960 
3961 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3962 {
3963 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3964 
3965 	/* legacy case only has one irq */
3966 	return pci_scn->irq;
3967 }
3968 
3969 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
3970 {
3971 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3972 	struct hif_target_info *tgt_info;
3973 
3974 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
3975 
3976 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
3977 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
3978 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
3979 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
3980 	    tgt_info->target_type == TARGET_TYPE_QCA8074 ||
3981 	    tgt_info->target_type == TARGET_TYPE_KIWI ||
3982 	    tgt_info->target_type == TARGET_TYPE_MANGO ||
3983 	    tgt_info->target_type == TARGET_TYPE_PEACH) {
3984 		/*
3985 		 * Need to consider offset's memtype for QCA6290/QCA8074,
3986 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
3987 		 * well initialized/defined.
3988 		 */
3989 		return 0;
3990 	}
3991 
3992 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
3993 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
3994 		return 0;
3995 	}
3996 
3997 	hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
3998 		offset, (uint32_t)(offset + sizeof(unsigned int)),
3999 		sc->mem_len);
4000 
4001 	return -EINVAL;
4002 }
4003 
4004 /**
4005  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4006  * @scn: hif context
4007  *
4008  * Return: true if soc needs driver bmi otherwise false
4009  */
4010 bool hif_pci_needs_bmi(struct hif_softc *scn)
4011 {
4012 	return !ce_srng_based(scn);
4013 }
4014 
4015 #ifdef FORCE_WAKE
4016 #if defined(DEVICE_FORCE_WAKE_ENABLE) && !defined(CONFIG_PLD_PCIE_FW_SIM)
4017 
4018 /*
4019  * HIF_POLL_UMAC_WAKE poll value to indicate if UMAC is powered up
4020  * Update the below macro with FW defined one.
4021  */
4022 #define HIF_POLL_UMAC_WAKE 0x2
4023 
4024 /**
4025  * hif_force_wake_request(): Enable the force wake recipe
4026  * @hif_handle: HIF handle
4027  *
4028  * Bring MHI to M0 state and force wake the UMAC by asserting the
4029  * soc wake reg. Poll the scratch reg to check if its set to
4030  * HIF_POLL_UMAC_WAKE. The polled value may return 0x1 in case UMAC
4031  * is powered down.
4032  *
4033  * Return: 0 if handshake is successful or ETIMEDOUT in case of failure
4034  */
4035 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4036 {
4037 	uint32_t timeout, value;
4038 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4039 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4040 	int ret, status = 0;
4041 
4042 	/* Prevent runtime PM or trigger resume firstly */
4043 	if (hif_rtpm_get(HIF_RTPM_GET_SYNC, HIF_RTPM_ID_FORCE_WAKE)) {
4044 		hif_err("runtime pm get failed");
4045 		return -EINVAL;
4046 	}
4047 
4048 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4049 	if (qdf_in_interrupt())
4050 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4051 	else
4052 		timeout = 0;
4053 
4054 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4055 	if (ret) {
4056 		hif_err("force wake request(timeout %u) send failed: %d",
4057 			timeout, ret);
4058 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4059 		status = -EINVAL;
4060 		goto release_rtpm_ref;
4061 	}
4062 
4063 	/* If device's M1 state-change event races here, it can be ignored,
4064 	 * as the device is expected to immediately move from M2 to M0
4065 	 * without entering low power state.
4066 	 */
4067 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4068 		hif_info("state-change event races, ignore");
4069 
4070 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4071 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 1);
4072 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
4073 	/*
4074 	 * do not reset the timeout
4075 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
4076 	 */
4077 	timeout = 0;
4078 	do {
4079 		value = hif_read32_mb(
4080 				scn, scn->mem +
4081 				PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
4082 		if (value == HIF_POLL_UMAC_WAKE)
4083 			break;
4084 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
4085 		timeout += FORCE_WAKE_DELAY_MS;
4086 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
4087 
4088 	if (value != HIF_POLL_UMAC_WAKE) {
4089 		hif_err("force wake handshake failed, reg value = 0x%x",
4090 			value);
4091 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
4092 		status = -ETIMEDOUT;
4093 		goto release_rtpm_ref;
4094 	}
4095 
4096 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
4097 	return 0;
4098 
4099 release_rtpm_ref:
4100 	/* Release runtime PM force wake */
4101 	ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4102 	if (ret) {
4103 		hif_err("runtime pm put failure: %d", ret);
4104 		return ret;
4105 	}
4106 
4107 	return status;
4108 }
4109 
4110 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4111 {
4112 	int ret;
4113 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4114 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4115 
4116 	/* Release umac force wake */
4117 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 0);
4118 
4119 	/* Release MHI force wake */
4120 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4121 	if (ret) {
4122 		hif_err("pld force wake release failure");
4123 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4124 		return ret;
4125 	}
4126 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4127 
4128 	/* Release runtime PM force wake */
4129 	ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4130 	if (ret) {
4131 		hif_err("runtime pm put failure");
4132 		return ret;
4133 	}
4134 
4135 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
4136 	return 0;
4137 }
4138 
4139 #else /* DEVICE_FORCE_WAKE_ENABLE */
4140 /** hif_force_wake_request() - Disable the PCIE scratch register
4141  * write/read
4142  *
4143  * Return: 0
4144  */
4145 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4146 {
4147 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4148 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4149 	uint32_t timeout;
4150 	int ret;
4151 
4152 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4153 
4154 	if (qdf_in_interrupt())
4155 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4156 	else
4157 		timeout = 0;
4158 
4159 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4160 	if (ret) {
4161 		hif_err("force wake request(timeout %u) send failed: %d",
4162 			timeout, ret);
4163 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4164 		return -EINVAL;
4165 	}
4166 
4167 	/* If device's M1 state-change event races here, it can be ignored,
4168 	 * as the device is expected to immediately move from M2 to M0
4169 	 * without entering low power state.
4170 	 */
4171 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4172 		hif_info("state-change event races, ignore");
4173 
4174 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4175 
4176 	return 0;
4177 }
4178 
4179 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4180 {
4181 	int ret;
4182 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4183 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4184 
4185 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4186 	if (ret) {
4187 		hif_err("force wake release failure");
4188 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4189 		return ret;
4190 	}
4191 
4192 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4193 	return 0;
4194 }
4195 #endif /* DEVICE_FORCE_WAKE_ENABLE */
4196 
4197 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
4198 {
4199 	hif_debug("mhi_force_wake_request_vote: %d",
4200 		  pci_handle->stats.mhi_force_wake_request_vote);
4201 	hif_debug("mhi_force_wake_failure: %d",
4202 		  pci_handle->stats.mhi_force_wake_failure);
4203 	hif_debug("mhi_force_wake_success: %d",
4204 		  pci_handle->stats.mhi_force_wake_success);
4205 	hif_debug("soc_force_wake_register_write_success: %d",
4206 		  pci_handle->stats.soc_force_wake_register_write_success);
4207 	hif_debug("soc_force_wake_failure: %d",
4208 		  pci_handle->stats.soc_force_wake_failure);
4209 	hif_debug("soc_force_wake_success: %d",
4210 		  pci_handle->stats.soc_force_wake_success);
4211 	hif_debug("mhi_force_wake_release_failure: %d",
4212 		  pci_handle->stats.mhi_force_wake_release_failure);
4213 	hif_debug("mhi_force_wake_release_success: %d",
4214 		  pci_handle->stats.mhi_force_wake_release_success);
4215 	hif_debug("oc_force_wake_release_success: %d",
4216 		  pci_handle->stats.soc_force_wake_release_success);
4217 }
4218 #endif /* FORCE_WAKE */
4219 
4220 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
4221 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
4222 {
4223 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4224 }
4225 
4226 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
4227 {
4228 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4229 }
4230 #endif
4231