xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_arp.h>
24 #include <linux/of_pci.h>
25 #include <linux/version.h>
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "qdf_platform.h"
43 #include "pld_common.h"
44 #include "mp_dev.h"
45 #include "hif_debug.h"
46 
47 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
48 char *legacy_ic_irqname[] = {
49 "ce0",
50 "ce1",
51 "ce2",
52 "ce3",
53 "ce4",
54 "ce5",
55 "ce6",
56 "ce7",
57 "ce8",
58 "ce9",
59 "ce10",
60 "ce11",
61 "ce12",
62 "ce13",
63 "ce14",
64 "ce15",
65 "reo2sw8_intr2",
66 "reo2sw7_intr2",
67 "reo2sw6_intr2",
68 "reo2sw5_intr2",
69 "reo2sw4_intr2",
70 "reo2sw3_intr2",
71 "reo2sw2_intr2",
72 "reo2sw1_intr2",
73 "reo2sw0_intr2",
74 "reo2sw8_intr",
75 "reo2sw7_intr",
76 "reo2sw6_inrr",
77 "reo2sw5_intr",
78 "reo2sw4_intr",
79 "reo2sw3_intr",
80 "reo2sw2_intr",
81 "reo2sw1_intr",
82 "reo2sw0_intr",
83 "reo2status_intr2",
84 "reo_status",
85 "reo2rxdma_out_2",
86 "reo2rxdma_out_1",
87 "reo_cmd",
88 "sw2reo6",
89 "sw2reo5",
90 "sw2reo1",
91 "sw2reo",
92 "rxdma2reo_mlo_0_dst_ring1",
93 "rxdma2reo_mlo_0_dst_ring0",
94 "rxdma2reo_mlo_1_dst_ring1",
95 "rxdma2reo_mlo_1_dst_ring0",
96 "rxdma2reo_dst_ring1",
97 "rxdma2reo_dst_ring0",
98 "rxdma2sw_dst_ring1",
99 "rxdma2sw_dst_ring0",
100 "rxdma2release_dst_ring1",
101 "rxdma2release_dst_ring0",
102 "sw2rxdma_2_src_ring",
103 "sw2rxdma_1_src_ring",
104 "sw2rxdma_0",
105 "wbm2sw6_release2",
106 "wbm2sw5_release2",
107 "wbm2sw4_release2",
108 "wbm2sw3_release2",
109 "wbm2sw2_release2",
110 "wbm2sw1_release2",
111 "wbm2sw0_release2",
112 "wbm2sw6_release",
113 "wbm2sw5_release",
114 "wbm2sw4_release",
115 "wbm2sw3_release",
116 "wbm2sw2_release",
117 "wbm2sw1_release",
118 "wbm2sw0_release",
119 "wbm2sw_link",
120 "wbm_error_release",
121 "sw2txmon_src_ring",
122 "sw2rxmon_src_ring",
123 "txmon2sw_p1_intr1",
124 "txmon2sw_p1_intr0",
125 "txmon2sw_p0_dest1",
126 "txmon2sw_p0_dest0",
127 "rxmon2sw_p1_intr1",
128 "rxmon2sw_p1_intr0",
129 "rxmon2sw_p0_dest1",
130 "rxmon2sw_p0_dest0",
131 "sw_release",
132 "sw2tcl_credit2",
133 "sw2tcl_credit",
134 "sw2tcl4",
135 "sw2tcl5",
136 "sw2tcl3",
137 "sw2tcl2",
138 "sw2tcl1",
139 "sw2wbm1",
140 "misc_8",
141 "misc_7",
142 "misc_6",
143 "misc_5",
144 "misc_4",
145 "misc_3",
146 "misc_2",
147 "misc_1",
148 "misc_0",
149 };
150 #endif
151 
152 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
153 	defined(QCA_WIFI_KIWI))
154 #include "hal_api.h"
155 #endif
156 
157 #include "if_pci_internal.h"
158 #include "ce_tasklet.h"
159 #include "targaddrs.h"
160 #include "hif_exec.h"
161 
162 #include "pci_api.h"
163 #include "ahb_api.h"
164 #include "wlan_cfg.h"
165 #include "qdf_hang_event_notifier.h"
166 #include "qdf_platform.h"
167 #include "qal_devnode.h"
168 #include "qdf_irq.h"
169 
170 /* Maximum ms timeout for host to wake up target */
171 #define PCIE_WAKE_TIMEOUT 1000
172 #define RAMDUMP_EVENT_TIMEOUT 2500
173 
174 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
175  * PCIe data bus error
176  * As workaround for this issue - changing the reset sequence to
177  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
178  */
179 #define CPU_WARM_RESET_WAR
180 #define WLAN_CFG_MAX_PCIE_GROUPS 4
181 #ifdef QCA_WIFI_QCN9224
182 #define WLAN_CFG_MAX_CE_COUNT 16
183 #else
184 #define WLAN_CFG_MAX_CE_COUNT 12
185 #endif
186 #define DP_IRQ_NAME_LEN 25
187 char dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS][DP_IRQ_NAME_LEN] = {};
188 char ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT][DP_IRQ_NAME_LEN] = {};
189 
190 static inline int hif_get_pci_slot(struct hif_softc *scn)
191 {
192 	int pci_slot = pld_get_pci_slot(scn->qdf_dev->dev);
193 
194 	if (pci_slot < 0) {
195 		hif_err("Invalid PCI SLOT %d", pci_slot);
196 		qdf_assert_always(0);
197 		return 0;
198 	} else {
199 		return pci_slot;
200 	}
201 }
202 
203 /*
204  * Top-level interrupt handler for all PCI interrupts from a Target.
205  * When a block of MSI interrupts is allocated, this top-level handler
206  * is not used; instead, we directly call the correct sub-handler.
207  */
208 struct ce_irq_reg_table {
209 	uint32_t irq_enable;
210 	uint32_t irq_status;
211 };
212 
213 #ifndef QCA_WIFI_3_0_ADRASTEA
214 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
215 {
216 }
217 #else
218 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
219 {
220 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
221 	unsigned int target_enable0, target_enable1;
222 	unsigned int target_cause0, target_cause1;
223 
224 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
225 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
226 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
227 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
228 
229 	if ((target_enable0 & target_cause0) ||
230 	    (target_enable1 & target_cause1)) {
231 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
232 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
233 
234 		if (scn->notice_send)
235 			pld_intr_notify_q6(sc->dev);
236 	}
237 }
238 #endif
239 
240 
241 /**
242  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
243  * @scn: scn
244  *
245  * Return: N/A
246  */
247 static void pci_dispatch_interrupt(struct hif_softc *scn)
248 {
249 	uint32_t intr_summary;
250 	int id;
251 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
252 
253 	if (scn->hif_init_done != true)
254 		return;
255 
256 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
257 		return;
258 
259 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
260 
261 	if (intr_summary == 0) {
262 		if ((scn->target_status != TARGET_STATUS_RESET) &&
263 			(!qdf_atomic_read(&scn->link_suspended))) {
264 
265 			hif_write32_mb(scn, scn->mem +
266 				(SOC_CORE_BASE_ADDRESS |
267 				PCIE_INTR_ENABLE_ADDRESS),
268 				HOST_GROUP0_MASK);
269 
270 			hif_read32_mb(scn, scn->mem +
271 					(SOC_CORE_BASE_ADDRESS |
272 					PCIE_INTR_ENABLE_ADDRESS));
273 		}
274 		Q_TARGET_ACCESS_END(scn);
275 		return;
276 	}
277 	Q_TARGET_ACCESS_END(scn);
278 
279 	scn->ce_irq_summary = intr_summary;
280 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
281 		if (intr_summary & (1 << id)) {
282 			intr_summary &= ~(1 << id);
283 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
284 		}
285 	}
286 }
287 
288 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
289 {
290 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
291 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
292 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
293 
294 	volatile int tmp;
295 	uint16_t val = 0;
296 	uint32_t bar0 = 0;
297 	uint32_t fw_indicator_address, fw_indicator;
298 	bool ssr_irq = false;
299 	unsigned int host_cause, host_enable;
300 
301 	if (LEGACY_INTERRUPTS(sc)) {
302 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
303 			return IRQ_HANDLED;
304 
305 		if (ADRASTEA_BU) {
306 			host_enable = hif_read32_mb(sc, sc->mem +
307 						    PCIE_INTR_ENABLE_ADDRESS);
308 			host_cause = hif_read32_mb(sc, sc->mem +
309 						   PCIE_INTR_CAUSE_ADDRESS);
310 			if (!(host_enable & host_cause)) {
311 				hif_pci_route_adrastea_interrupt(sc);
312 				return IRQ_HANDLED;
313 			}
314 		}
315 
316 		/* Clear Legacy PCI line interrupts
317 		 * IMPORTANT: INTR_CLR register has to be set
318 		 * after INTR_ENABLE is set to 0,
319 		 * otherwise interrupt can not be really cleared
320 		 */
321 		hif_write32_mb(sc, sc->mem +
322 			      (SOC_CORE_BASE_ADDRESS |
323 			       PCIE_INTR_ENABLE_ADDRESS), 0);
324 
325 		hif_write32_mb(sc, sc->mem +
326 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
327 			       ADRASTEA_BU ?
328 			       (host_enable & host_cause) :
329 			      HOST_GROUP0_MASK);
330 
331 		if (ADRASTEA_BU)
332 			hif_write32_mb(sc, sc->mem + 0x2f100c,
333 				       (host_cause >> 1));
334 
335 		/* IMPORTANT: this extra read transaction is required to
336 		 * flush the posted write buffer
337 		 */
338 		if (!ADRASTEA_BU) {
339 		tmp =
340 			hif_read32_mb(sc, sc->mem +
341 				     (SOC_CORE_BASE_ADDRESS |
342 				      PCIE_INTR_ENABLE_ADDRESS));
343 
344 		if (tmp == 0xdeadbeef) {
345 			hif_err("SoC returns 0xdeadbeef!!");
346 
347 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
348 			hif_err("PCI Vendor ID = 0x%04x", val);
349 
350 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
351 			hif_err("PCI Device ID = 0x%04x", val);
352 
353 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
354 			hif_err("PCI Command = 0x%04x", val);
355 
356 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
357 			hif_err("PCI Status = 0x%04x", val);
358 
359 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
360 					      &bar0);
361 			hif_err("PCI BAR0 = 0x%08x", bar0);
362 
363 			hif_err("RTC_STATE_ADDRESS = 0x%08x",
364 				hif_read32_mb(sc, sc->mem +
365 					PCIE_LOCAL_BASE_ADDRESS
366 					+ RTC_STATE_ADDRESS));
367 			hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
368 				hif_read32_mb(sc, sc->mem +
369 					PCIE_LOCAL_BASE_ADDRESS
370 					+ PCIE_SOC_WAKE_ADDRESS));
371 			hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
372 				hif_read32_mb(sc, sc->mem + 0x80008),
373 				hif_read32_mb(sc, sc->mem + 0x8000c));
374 			hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
375 				hif_read32_mb(sc, sc->mem + 0x80010),
376 				hif_read32_mb(sc, sc->mem + 0x80014));
377 			hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
378 				hif_read32_mb(sc, sc->mem + 0x80018),
379 				hif_read32_mb(sc, sc->mem + 0x8001c));
380 			QDF_BUG(0);
381 		}
382 
383 		PCI_CLR_CAUSE0_REGISTER(sc);
384 		}
385 
386 		if (HAS_FW_INDICATOR) {
387 			fw_indicator_address = hif_state->fw_indicator_address;
388 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
389 			if ((fw_indicator != ~0) &&
390 			   (fw_indicator & FW_IND_EVENT_PENDING))
391 				ssr_irq = true;
392 		}
393 
394 		if (Q_TARGET_ACCESS_END(scn) < 0)
395 			return IRQ_HANDLED;
396 	}
397 	/* TBDXXX: Add support for WMAC */
398 
399 	if (ssr_irq) {
400 		sc->irq_event = irq;
401 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
402 
403 		qdf_atomic_inc(&scn->active_tasklet_cnt);
404 		tasklet_schedule(&sc->intr_tq);
405 	} else {
406 		pci_dispatch_interrupt(scn);
407 	}
408 
409 	return IRQ_HANDLED;
410 }
411 
412 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
413 {
414 	return 1;               /* FIX THIS */
415 }
416 
417 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
418 {
419 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
420 	int i = 0;
421 
422 	if (!irq || !size) {
423 		return -EINVAL;
424 	}
425 
426 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
427 		irq[0] = sc->irq;
428 		return 1;
429 	}
430 
431 	if (sc->num_msi_intrs > size) {
432 		qdf_print("Not enough space in irq buffer to return irqs");
433 		return -EINVAL;
434 	}
435 
436 	for (i = 0; i < sc->num_msi_intrs; i++) {
437 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
438 	}
439 
440 	return sc->num_msi_intrs;
441 }
442 
443 
444 /**
445  * hif_pci_cancel_deferred_target_sleep() - cancels the deferred target sleep
446  * @scn: hif_softc
447  *
448  * Return: void
449  */
450 #if CONFIG_ATH_PCIE_MAX_PERF == 0
451 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
452 {
453 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
454 	A_target_id_t pci_addr = scn->mem;
455 
456 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
457 	/*
458 	 * If the deferred sleep timer is running cancel it
459 	 * and put the soc into sleep.
460 	 */
461 	if (hif_state->fake_sleep == true) {
462 		qdf_timer_stop(&hif_state->sleep_timer);
463 		if (hif_state->verified_awake == false) {
464 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
465 				      PCIE_SOC_WAKE_ADDRESS,
466 				      PCIE_SOC_WAKE_RESET);
467 		}
468 		hif_state->fake_sleep = false;
469 	}
470 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
471 }
472 #else
473 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
474 {
475 }
476 #endif
477 
478 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
479 	hif_read32_mb(sc, (char *)(mem) + \
480 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
481 
482 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
483 	hif_write32_mb(sc, ((char *)(mem) + \
484 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
485 
486 #ifdef QCA_WIFI_3_0
487 /**
488  * hif_targ_is_awake() - check to see if the target is awake
489  * @hif_ctx: hif context
490  *
491  * emulation never goes to sleep
492  *
493  * Return: true if target is awake
494  */
495 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
496 {
497 	return true;
498 }
499 #else
500 /**
501  * hif_targ_is_awake() - check to see if the target is awake
502  * @hif_ctx: hif context
503  *
504  * Return: true if the targets clocks are on
505  */
506 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
507 {
508 	uint32_t val;
509 
510 	if (scn->recovery)
511 		return false;
512 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
513 		+ RTC_STATE_ADDRESS);
514 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
515 }
516 #endif
517 
518 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
519 static void hif_pci_device_reset(struct hif_pci_softc *sc)
520 {
521 	void __iomem *mem = sc->mem;
522 	int i;
523 	uint32_t val;
524 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
525 
526 	if (!scn->hostdef)
527 		return;
528 
529 	/* NB: Don't check resetok here.  This form of reset
530 	 * is integral to correct operation.
531 	 */
532 
533 	if (!SOC_GLOBAL_RESET_ADDRESS)
534 		return;
535 
536 	if (!mem)
537 		return;
538 
539 	hif_err("Reset Device");
540 
541 	/*
542 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
543 	 * writing WAKE_V, the Target may scribble over Host memory!
544 	 */
545 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
546 			       PCIE_SOC_WAKE_V_MASK);
547 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
548 		if (hif_targ_is_awake(scn, mem))
549 			break;
550 
551 		qdf_mdelay(1);
552 	}
553 
554 	/* Put Target, including PCIe, into RESET. */
555 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
556 	val |= 1;
557 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
558 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
559 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
560 		    RTC_STATE_COLD_RESET_MASK)
561 			break;
562 
563 		qdf_mdelay(1);
564 	}
565 
566 	/* Pull Target, including PCIe, out of RESET. */
567 	val &= ~1;
568 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
569 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
570 		if (!
571 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
572 		     RTC_STATE_COLD_RESET_MASK))
573 			break;
574 
575 		qdf_mdelay(1);
576 	}
577 
578 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
579 			       PCIE_SOC_WAKE_RESET);
580 }
581 
582 /* CPU warm reset function
583  * Steps:
584  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
585  * 2. Clear the FW_INDICATOR_ADDRESS -so Target CPU initializes FW
586  *    correctly on WARM reset
587  * 3. Clear TARGET CPU LF timer interrupt
588  * 4. Reset all CEs to clear any pending CE tarnsactions
589  * 5. Warm reset CPU
590  */
591 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
592 {
593 	void __iomem *mem = sc->mem;
594 	int i;
595 	uint32_t val;
596 	uint32_t fw_indicator;
597 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
598 
599 	/* NB: Don't check resetok here.  This form of reset is
600 	 * integral to correct operation.
601 	 */
602 
603 	if (!mem)
604 		return;
605 
606 	hif_debug("Target Warm Reset");
607 
608 	/*
609 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
610 	 * writing WAKE_V, the Target may scribble over Host memory!
611 	 */
612 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
613 			       PCIE_SOC_WAKE_V_MASK);
614 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
615 		if (hif_targ_is_awake(scn, mem))
616 			break;
617 		qdf_mdelay(1);
618 	}
619 
620 	/*
621 	 * Disable Pending interrupts
622 	 */
623 	val =
624 		hif_read32_mb(sc, mem +
625 			     (SOC_CORE_BASE_ADDRESS |
626 			      PCIE_INTR_CAUSE_ADDRESS));
627 	hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
628 		  (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
629 	/* Target CPU Intr Cause */
630 	val = hif_read32_mb(sc, mem +
631 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
632 	hif_debug("Target CPU Intr Cause 0x%x", val);
633 
634 	val =
635 		hif_read32_mb(sc, mem +
636 			     (SOC_CORE_BASE_ADDRESS |
637 			      PCIE_INTR_ENABLE_ADDRESS));
638 	hif_write32_mb(sc, (mem +
639 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
640 	hif_write32_mb(sc, (mem +
641 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
642 		       HOST_GROUP0_MASK);
643 
644 	qdf_mdelay(100);
645 
646 	/* Clear FW_INDICATOR_ADDRESS */
647 	if (HAS_FW_INDICATOR) {
648 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
649 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
650 	}
651 
652 	/* Clear Target LF Timer interrupts */
653 	val =
654 		hif_read32_mb(sc, mem +
655 			     (RTC_SOC_BASE_ADDRESS +
656 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
657 	hif_debug("addr 0x%x : 0x%x",
658 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
659 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
660 	hif_write32_mb(sc, mem +
661 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
662 		      val);
663 
664 	/* Reset CE */
665 	val =
666 		hif_read32_mb(sc, mem +
667 			     (RTC_SOC_BASE_ADDRESS |
668 			      SOC_RESET_CONTROL_ADDRESS));
669 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
670 	hif_write32_mb(sc, (mem +
671 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
672 		      val);
673 	val =
674 		hif_read32_mb(sc, mem +
675 			     (RTC_SOC_BASE_ADDRESS |
676 			      SOC_RESET_CONTROL_ADDRESS));
677 	qdf_mdelay(10);
678 
679 	/* CE unreset */
680 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
681 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
682 		       SOC_RESET_CONTROL_ADDRESS), val);
683 	val =
684 		hif_read32_mb(sc, mem +
685 			     (RTC_SOC_BASE_ADDRESS |
686 			      SOC_RESET_CONTROL_ADDRESS));
687 	qdf_mdelay(10);
688 
689 	/* Read Target CPU Intr Cause */
690 	val = hif_read32_mb(sc, mem +
691 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
692 	hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
693 
694 	/* CPU warm RESET */
695 	val =
696 		hif_read32_mb(sc, mem +
697 			     (RTC_SOC_BASE_ADDRESS |
698 			      SOC_RESET_CONTROL_ADDRESS));
699 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
700 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
701 		       SOC_RESET_CONTROL_ADDRESS), val);
702 	val =
703 		hif_read32_mb(sc, mem +
704 			     (RTC_SOC_BASE_ADDRESS |
705 			      SOC_RESET_CONTROL_ADDRESS));
706 	hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
707 
708 	qdf_mdelay(100);
709 	hif_debug("Target Warm reset complete");
710 
711 }
712 
713 #ifndef QCA_WIFI_3_0
714 /* only applicable to legacy ce */
715 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
716 {
717 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
718 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
719 	void __iomem *mem = sc->mem;
720 	uint32_t val;
721 
722 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
723 		return ATH_ISR_NOSCHED;
724 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
725 	if (Q_TARGET_ACCESS_END(scn) < 0)
726 		return ATH_ISR_SCHED;
727 
728 	hif_debug("FW_INDICATOR register is 0x%x", val);
729 
730 	if (val & FW_IND_HELPER)
731 		return 0;
732 
733 	return 1;
734 }
735 #endif
736 
737 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
738 {
739 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
740 	uint16_t device_id = 0;
741 	uint32_t val;
742 	uint16_t timeout_count = 0;
743 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
744 
745 	/* Check device ID from PCIe configuration space for link status */
746 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
747 	if (device_id != sc->devid) {
748 		hif_err("Device ID does match (read 0x%x, expect 0x%x)",
749 			device_id, sc->devid);
750 		return -EACCES;
751 	}
752 
753 	/* Check PCIe local register for bar/memory access */
754 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
755 			   RTC_STATE_ADDRESS);
756 	hif_debug("RTC_STATE_ADDRESS is %08x", val);
757 
758 	/* Try to wake up target if it sleeps */
759 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
760 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
761 	hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
762 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
763 		PCIE_SOC_WAKE_ADDRESS));
764 
765 	/* Check if target can be woken up */
766 	while (!hif_targ_is_awake(scn, sc->mem)) {
767 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
768 			hif_err("wake up timeout, %08x, %08x",
769 				hif_read32_mb(sc, sc->mem +
770 				     PCIE_LOCAL_BASE_ADDRESS +
771 				     RTC_STATE_ADDRESS),
772 				hif_read32_mb(sc, sc->mem +
773 				     PCIE_LOCAL_BASE_ADDRESS +
774 				     PCIE_SOC_WAKE_ADDRESS));
775 			return -EACCES;
776 		}
777 
778 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
779 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
780 
781 		qdf_mdelay(100);
782 		timeout_count += 100;
783 	}
784 
785 	/* Check Power register for SoC internal bus issues */
786 	val =
787 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
788 			     SOC_POWER_REG_OFFSET);
789 	hif_debug("Power register is %08x", val);
790 
791 	return 0;
792 }
793 
794 /**
795  * __hif_pci_dump_registers(): dump other PCI debug registers
796  * @scn: struct hif_softc
797  *
798  * This function dumps pci debug registers.  The parent function
799  * dumps the copy engine registers before calling this function.
800  *
801  * Return: void
802  */
803 static void __hif_pci_dump_registers(struct hif_softc *scn)
804 {
805 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
806 	void __iomem *mem = sc->mem;
807 	uint32_t val, i, j;
808 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
809 	uint32_t ce_base;
810 
811 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
812 		return;
813 
814 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
815 	val =
816 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
817 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
818 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
819 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
820 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
821 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
822 
823 	/* DEBUG_CONTROL_ENABLE = 0x1 */
824 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
825 			   WLAN_DEBUG_CONTROL_OFFSET);
826 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
827 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
828 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
829 		      WLAN_DEBUG_CONTROL_OFFSET, val);
830 
831 	hif_debug("Debug: inputsel: %x dbgctrl: %x",
832 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
833 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
834 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
835 			    WLAN_DEBUG_CONTROL_OFFSET));
836 
837 	hif_debug("Debug CE");
838 	/* Loop CE debug output */
839 	/* AMBA_DEBUG_BUS_SEL = 0xc */
840 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
841 			    AMBA_DEBUG_BUS_OFFSET);
842 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
843 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
844 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
845 		       val);
846 
847 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
848 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
849 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
850 				   CE_WRAPPER_DEBUG_OFFSET);
851 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
852 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
853 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
854 			      CE_WRAPPER_DEBUG_OFFSET, val);
855 
856 		hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
857 			  wrapper_idx[i],
858 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
859 				AMBA_DEBUG_BUS_OFFSET),
860 			  hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
861 				CE_WRAPPER_DEBUG_OFFSET));
862 
863 		if (wrapper_idx[i] <= 7) {
864 			for (j = 0; j <= 5; j++) {
865 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
866 				/* For (j=0~5) write CE_DEBUG_SEL = j */
867 				val =
868 					hif_read32_mb(sc, mem + ce_base +
869 						     CE_DEBUG_OFFSET);
870 				val &= ~CE_DEBUG_SEL_MASK;
871 				val |= CE_DEBUG_SEL_SET(j);
872 				hif_write32_mb(sc, mem + ce_base +
873 					       CE_DEBUG_OFFSET, val);
874 
875 				/* read (@gpio_athr_wlan_reg)
876 				 * WLAN_DEBUG_OUT_DATA
877 				 */
878 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
879 						    + WLAN_DEBUG_OUT_OFFSET);
880 				val = WLAN_DEBUG_OUT_DATA_GET(val);
881 
882 				hif_debug("module%d: cedbg: %x out: %x",
883 					  j,
884 					  hif_read32_mb(sc, mem + ce_base +
885 						CE_DEBUG_OFFSET), val);
886 			}
887 		} else {
888 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
889 			val =
890 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
891 					     WLAN_DEBUG_OUT_OFFSET);
892 			val = WLAN_DEBUG_OUT_DATA_GET(val);
893 
894 			hif_debug("out: %x", val);
895 		}
896 	}
897 
898 	hif_debug("Debug PCIe:");
899 	/* Loop PCIe debug output */
900 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
901 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
902 			    AMBA_DEBUG_BUS_OFFSET);
903 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
904 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
905 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
906 		       AMBA_DEBUG_BUS_OFFSET, val);
907 
908 	for (i = 0; i <= 8; i++) {
909 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
910 		val =
911 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
912 				     AMBA_DEBUG_BUS_OFFSET);
913 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
914 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
915 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
916 			       AMBA_DEBUG_BUS_OFFSET, val);
917 
918 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
919 		val =
920 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
921 				     WLAN_DEBUG_OUT_OFFSET);
922 		val = WLAN_DEBUG_OUT_DATA_GET(val);
923 
924 		hif_debug("amdbg: %x out: %x %x",
925 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
926 				WLAN_DEBUG_OUT_OFFSET), val,
927 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
928 				WLAN_DEBUG_OUT_OFFSET));
929 	}
930 
931 	Q_TARGET_ACCESS_END(scn);
932 }
933 
934 /**
935  * hif_dump_registers(): dump bus debug registers
936  * @scn: struct hif_opaque_softc
937  *
938  * This function dumps hif bus debug registers
939  *
940  * Return: 0 for success or error code
941  */
942 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
943 {
944 	int status;
945 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
946 
947 	status = hif_dump_ce_registers(scn);
948 
949 	if (status)
950 		hif_err("Dump CE Registers Failed");
951 
952 	/* dump non copy engine pci registers */
953 	__hif_pci_dump_registers(scn);
954 
955 	return 0;
956 }
957 
958 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
959 
960 /* worker thread to schedule wlan_tasklet in SLUB debug build */
961 static void reschedule_tasklet_work_handler(void *arg)
962 {
963 	struct hif_pci_softc *sc = arg;
964 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
965 
966 	if (!scn) {
967 		hif_err("hif_softc is NULL");
968 		return;
969 	}
970 
971 	if (scn->hif_init_done == false) {
972 		hif_err("wlan driver is unloaded");
973 		return;
974 	}
975 
976 	tasklet_schedule(&sc->intr_tq);
977 }
978 
979 /**
980  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
981  * work
982  * @sc: HIF PCI Context
983  *
984  * Return: void
985  */
986 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
987 {
988 	qdf_create_work(0, &sc->reschedule_tasklet_work,
989 				reschedule_tasklet_work_handler, NULL);
990 }
991 #else
992 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
993 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
994 
995 void wlan_tasklet(unsigned long data)
996 {
997 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
998 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
999 
1000 	if (scn->hif_init_done == false)
1001 		goto end;
1002 
1003 	if (qdf_atomic_read(&scn->link_suspended))
1004 		goto end;
1005 
1006 	if (!ADRASTEA_BU) {
1007 		hif_fw_interrupt_handler(sc->irq_event, scn);
1008 		if (scn->target_status == TARGET_STATUS_RESET)
1009 			goto end;
1010 	}
1011 
1012 end:
1013 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
1014 	qdf_atomic_dec(&scn->active_tasklet_cnt);
1015 }
1016 
1017 /**
1018  * hif_disable_power_gating() - disable HW power gating
1019  * @hif_ctx: hif context
1020  *
1021  * disables pcie L1 power states
1022  */
1023 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1024 {
1025 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1026 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1027 
1028 	if (!scn) {
1029 		hif_err("Could not disable ASPM scn is null");
1030 		return;
1031 	}
1032 
1033 	/* Disable ASPM when pkt log is enabled */
1034 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1035 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1036 }
1037 
1038 /**
1039  * hif_enable_power_gating() - enable HW power gating
1040  * @hif_ctx: hif context
1041  *
1042  * enables pcie L1 power states
1043  */
1044 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1045 {
1046 	if (!sc) {
1047 		hif_err("Could not disable ASPM scn is null");
1048 		return;
1049 	}
1050 
1051 	/* Re-enable ASPM after firmware/OTP download is complete */
1052 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1053 }
1054 
1055 /**
1056  * hif_enable_power_management() - enable power management
1057  * @hif_ctx: hif context
1058  *
1059  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1060  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1061  *
1062  * note: epping mode does not call this function as it does not
1063  *       care about saving power.
1064  */
1065 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1066 				 bool is_packet_log_enabled)
1067 {
1068 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1069 	uint32_t mode;
1070 
1071 	if (!pci_ctx) {
1072 		hif_err("hif_ctx null");
1073 		return;
1074 	}
1075 
1076 	mode = hif_get_conparam(hif_sc);
1077 	if (mode == QDF_GLOBAL_FTM_MODE) {
1078 		hif_info("Enable power gating for FTM mode");
1079 		hif_enable_power_gating(pci_ctx);
1080 		return;
1081 	}
1082 
1083 	hif_rtpm_start(hif_sc);
1084 
1085 	if (!is_packet_log_enabled)
1086 		hif_enable_power_gating(pci_ctx);
1087 
1088 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1089 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1090 	    !ce_srng_based(hif_sc)) {
1091 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1092 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1093 			hif_err("Failed to set target to sleep");
1094 	}
1095 }
1096 
1097 /**
1098  * hif_disable_power_management() - disable power management
1099  * @hif_ctx: hif context
1100  *
1101  * Currently disables runtime pm. Should be updated to behave
1102  * if runtime pm is not started. Should be updated to take care
1103  * of aspm and soc sleep for driver load.
1104  */
1105 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1106 {
1107 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1108 
1109 	if (!pci_ctx) {
1110 		hif_err("hif_ctx null");
1111 		return;
1112 	}
1113 
1114 	hif_rtpm_stop(hif_ctx);
1115 }
1116 
1117 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1118 {
1119 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1120 
1121 	if (!pci_ctx) {
1122 		hif_err("hif_ctx null");
1123 		return;
1124 	}
1125 	hif_display_ce_stats(hif_ctx);
1126 
1127 	hif_print_pci_stats(pci_ctx);
1128 }
1129 
1130 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1131 {
1132 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1133 
1134 	if (!pci_ctx) {
1135 		hif_err("hif_ctx null");
1136 		return;
1137 	}
1138 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1139 }
1140 
1141 #define ATH_PCI_PROBE_RETRY_MAX 3
1142 /**
1143  * hif_bus_open(): hif_bus_open
1144  * @scn: scn
1145  * @bus_type: bus type
1146  *
1147  * Return: n/a
1148  */
1149 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1150 {
1151 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1152 
1153 	hif_ctx->bus_type = bus_type;
1154 	hif_rtpm_open(hif_ctx);
1155 
1156 	qdf_spinlock_create(&sc->irq_lock);
1157 
1158 	return hif_ce_open(hif_ctx);
1159 }
1160 
1161 /**
1162  * hif_wake_target_cpu() - wake the target's cpu
1163  * @scn: hif context
1164  *
1165  * Send an interrupt to the device to wake up the Target CPU
1166  * so it has an opportunity to notice any changed state.
1167  */
1168 static void hif_wake_target_cpu(struct hif_softc *scn)
1169 {
1170 	QDF_STATUS rv;
1171 	uint32_t core_ctrl;
1172 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1173 
1174 	rv = hif_diag_read_access(hif_hdl,
1175 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1176 				  &core_ctrl);
1177 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1178 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1179 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1180 
1181 	rv = hif_diag_write_access(hif_hdl,
1182 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1183 				   core_ctrl);
1184 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1185 }
1186 
1187 /**
1188  * soc_wake_reset() - allow the target to go to sleep
1189  * @scn: hif_softc
1190  *
1191  * Clear the force wake register.  This is done by
1192  * hif_sleep_entry and cancel deferred timer sleep.
1193  */
1194 static void soc_wake_reset(struct hif_softc *scn)
1195 {
1196 	hif_write32_mb(scn, scn->mem +
1197 		PCIE_LOCAL_BASE_ADDRESS +
1198 		PCIE_SOC_WAKE_ADDRESS,
1199 		PCIE_SOC_WAKE_RESET);
1200 }
1201 
1202 /**
1203  * hif_sleep_entry() - gate target sleep
1204  * @arg: hif context
1205  *
1206  * This function is the callback for the sleep timer.
1207  * Check if last force awake critical section was at least
1208  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1209  * allow the target to go to sleep and cancel the sleep timer.
1210  * otherwise reschedule the sleep timer.
1211  */
1212 static void hif_sleep_entry(void *arg)
1213 {
1214 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1215 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1216 	uint32_t idle_ms;
1217 
1218 	if (scn->recovery)
1219 		return;
1220 
1221 	if (hif_is_driver_unloading(scn))
1222 		return;
1223 
1224 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1225 	if (hif_state->fake_sleep) {
1226 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1227 						    - hif_state->sleep_ticks);
1228 		if (!hif_state->verified_awake &&
1229 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1230 			if (!qdf_atomic_read(&scn->link_suspended)) {
1231 				soc_wake_reset(scn);
1232 				hif_state->fake_sleep = false;
1233 			}
1234 		} else {
1235 			qdf_timer_stop(&hif_state->sleep_timer);
1236 			qdf_timer_start(&hif_state->sleep_timer,
1237 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1238 		}
1239 	}
1240 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1241 }
1242 
1243 #define HIF_HIA_MAX_POLL_LOOP    1000000
1244 #define HIF_HIA_POLLING_DELAY_MS 10
1245 
1246 #ifdef QCA_HIF_HIA_EXTND
1247 
1248 static void hif_set_hia_extnd(struct hif_softc *scn)
1249 {
1250 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1251 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1252 	uint32_t target_type = tgt_info->target_type;
1253 
1254 	hif_info("E");
1255 
1256 	if ((target_type == TARGET_TYPE_AR900B) ||
1257 			target_type == TARGET_TYPE_QCA9984 ||
1258 			target_type == TARGET_TYPE_QCA9888) {
1259 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1260 		 * in RTC space
1261 		 */
1262 		tgt_info->target_revision
1263 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1264 					+ CHIP_ID_ADDRESS));
1265 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1266 			  target_type, tgt_info->target_revision);
1267 	}
1268 
1269 	{
1270 		uint32_t flag2_value = 0;
1271 		uint32_t flag2_targ_addr =
1272 			host_interest_item_address(target_type,
1273 			offsetof(struct host_interest_s, hi_skip_clock_init));
1274 
1275 		if ((ar900b_20_targ_clk != -1) &&
1276 			(frac != -1) && (intval != -1)) {
1277 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1278 				&flag2_value);
1279 			qdf_print("\n Setting clk_override");
1280 			flag2_value |= CLOCK_OVERRIDE;
1281 
1282 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1283 					flag2_value);
1284 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1285 		} else {
1286 			qdf_print("\n CLOCK PLL skipped");
1287 		}
1288 	}
1289 
1290 	if (target_type == TARGET_TYPE_AR900B
1291 			|| target_type == TARGET_TYPE_QCA9984
1292 			|| target_type == TARGET_TYPE_QCA9888) {
1293 
1294 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1295 		 * this would be supplied through module parameters,
1296 		 * if not supplied assumed default or same behavior as 1.0.
1297 		 * Assume 1.0 clock can't be tuned, reset to defaults
1298 		 */
1299 
1300 		qdf_print(KERN_INFO
1301 			  "%s: setting the target pll frac %x intval %x",
1302 			  __func__, frac, intval);
1303 
1304 		/* do not touch frac, and int val, let them be default -1,
1305 		 * if desired, host can supply these through module params
1306 		 */
1307 		if (frac != -1 || intval != -1) {
1308 			uint32_t flag2_value = 0;
1309 			uint32_t flag2_targ_addr;
1310 
1311 			flag2_targ_addr =
1312 				host_interest_item_address(target_type,
1313 				offsetof(struct host_interest_s,
1314 					hi_clock_info));
1315 			hif_diag_read_access(hif_hdl,
1316 				flag2_targ_addr, &flag2_value);
1317 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1318 				  flag2_value);
1319 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1320 			qdf_print("\n INT Val %x  Address %x",
1321 				  intval, flag2_value + 4);
1322 			hif_diag_write_access(hif_hdl,
1323 					flag2_value + 4, intval);
1324 		} else {
1325 			qdf_print(KERN_INFO
1326 				  "%s: no frac provided, skipping pre-configuring PLL",
1327 				  __func__);
1328 		}
1329 
1330 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1331 		if ((target_type == TARGET_TYPE_AR900B)
1332 			&& (tgt_info->target_revision == AR900B_REV_2)
1333 			&& ar900b_20_targ_clk != -1) {
1334 			uint32_t flag2_value = 0;
1335 			uint32_t flag2_targ_addr;
1336 
1337 			flag2_targ_addr
1338 				= host_interest_item_address(target_type,
1339 					offsetof(struct host_interest_s,
1340 					hi_desired_cpu_speed_hz));
1341 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1342 							&flag2_value);
1343 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1344 				  flag2_value);
1345 			hif_diag_write_access(hif_hdl, flag2_value,
1346 				ar900b_20_targ_clk/*300000000u*/);
1347 		} else if (target_type == TARGET_TYPE_QCA9888) {
1348 			uint32_t flag2_targ_addr;
1349 
1350 			if (200000000u != qca9888_20_targ_clk) {
1351 				qca9888_20_targ_clk = 300000000u;
1352 				/* Setting the target clock speed to 300 mhz */
1353 			}
1354 
1355 			flag2_targ_addr
1356 				= host_interest_item_address(target_type,
1357 					offsetof(struct host_interest_s,
1358 					hi_desired_cpu_speed_hz));
1359 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1360 				qca9888_20_targ_clk);
1361 		} else {
1362 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1363 				  __func__);
1364 		}
1365 	} else {
1366 		if (frac != -1 || intval != -1) {
1367 			uint32_t flag2_value = 0;
1368 			uint32_t flag2_targ_addr =
1369 				host_interest_item_address(target_type,
1370 					offsetof(struct host_interest_s,
1371 							hi_clock_info));
1372 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1373 						&flag2_value);
1374 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1375 				  flag2_value);
1376 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1377 			qdf_print("\n INT Val %x  Address %x", intval,
1378 				  flag2_value + 4);
1379 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1380 					      intval);
1381 		}
1382 	}
1383 }
1384 
1385 #else
1386 
1387 static void hif_set_hia_extnd(struct hif_softc *scn)
1388 {
1389 }
1390 
1391 #endif
1392 
1393 /**
1394  * hif_set_hia() - fill out the host interest area
1395  * @scn: hif context
1396  *
1397  * This is replaced by hif_wlan_enable for integrated targets.
1398  * This fills out the host interest area.  The firmware will
1399  * process these memory addresses when it is first brought out
1400  * of reset.
1401  *
1402  * Return: 0 for success.
1403  */
1404 static int hif_set_hia(struct hif_softc *scn)
1405 {
1406 	QDF_STATUS rv;
1407 	uint32_t interconnect_targ_addr = 0;
1408 	uint32_t pcie_state_targ_addr = 0;
1409 	uint32_t pipe_cfg_targ_addr = 0;
1410 	uint32_t svc_to_pipe_map = 0;
1411 	uint32_t pcie_config_flags = 0;
1412 	uint32_t flag2_value = 0;
1413 	uint32_t flag2_targ_addr = 0;
1414 #ifdef QCA_WIFI_3_0
1415 	uint32_t host_interest_area = 0;
1416 	uint8_t i;
1417 #else
1418 	uint32_t ealloc_value = 0;
1419 	uint32_t ealloc_targ_addr = 0;
1420 	uint8_t banks_switched = 1;
1421 	uint32_t chip_id;
1422 #endif
1423 	uint32_t pipe_cfg_addr;
1424 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1425 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1426 	uint32_t target_type = tgt_info->target_type;
1427 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1428 	static struct CE_pipe_config *target_ce_config;
1429 	struct service_to_pipe *target_service_to_ce_map;
1430 
1431 	hif_info("E");
1432 
1433 	hif_get_target_ce_config(scn,
1434 				 &target_ce_config, &target_ce_config_sz,
1435 				 &target_service_to_ce_map,
1436 				 &target_service_to_ce_map_sz,
1437 				 NULL, NULL);
1438 
1439 	if (ADRASTEA_BU)
1440 		return 0;
1441 
1442 #ifdef QCA_WIFI_3_0
1443 	i = 0;
1444 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1445 		host_interest_area = hif_read32_mb(scn, scn->mem +
1446 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1447 		if ((host_interest_area & 0x01) == 0) {
1448 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1449 			host_interest_area = 0;
1450 			i++;
1451 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1452 				hif_err("poll timeout: %d", i);
1453 		} else {
1454 			host_interest_area &= (~0x01);
1455 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1456 			break;
1457 		}
1458 	}
1459 
1460 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1461 		hif_err("hia polling timeout");
1462 		return -EIO;
1463 	}
1464 
1465 	if (host_interest_area == 0) {
1466 		hif_err("host_interest_area = 0");
1467 		return -EIO;
1468 	}
1469 
1470 	interconnect_targ_addr = host_interest_area +
1471 			offsetof(struct host_interest_area_t,
1472 			hi_interconnect_state);
1473 
1474 	flag2_targ_addr = host_interest_area +
1475 			offsetof(struct host_interest_area_t, hi_option_flag2);
1476 
1477 #else
1478 	interconnect_targ_addr = hif_hia_item_address(target_type,
1479 		offsetof(struct host_interest_s, hi_interconnect_state));
1480 	ealloc_targ_addr = hif_hia_item_address(target_type,
1481 		offsetof(struct host_interest_s, hi_early_alloc));
1482 	flag2_targ_addr = hif_hia_item_address(target_type,
1483 		offsetof(struct host_interest_s, hi_option_flag2));
1484 #endif
1485 	/* Supply Target-side CE configuration */
1486 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1487 			  &pcie_state_targ_addr);
1488 	if (rv != QDF_STATUS_SUCCESS) {
1489 		hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
1490 			interconnect_targ_addr, rv);
1491 		goto done;
1492 	}
1493 	if (pcie_state_targ_addr == 0) {
1494 		rv = QDF_STATUS_E_FAILURE;
1495 		hif_err("pcie state addr is 0");
1496 		goto done;
1497 	}
1498 	pipe_cfg_addr = pcie_state_targ_addr +
1499 			  offsetof(struct pcie_state_s,
1500 			  pipe_cfg_addr);
1501 	rv = hif_diag_read_access(hif_hdl,
1502 			  pipe_cfg_addr,
1503 			  &pipe_cfg_targ_addr);
1504 	if (rv != QDF_STATUS_SUCCESS) {
1505 		hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
1506 		goto done;
1507 	}
1508 	if (pipe_cfg_targ_addr == 0) {
1509 		rv = QDF_STATUS_E_FAILURE;
1510 		hif_err("pipe cfg addr is 0");
1511 		goto done;
1512 	}
1513 
1514 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1515 			(uint8_t *) target_ce_config,
1516 			target_ce_config_sz);
1517 
1518 	if (rv != QDF_STATUS_SUCCESS) {
1519 		hif_err("write pipe cfg: %d", rv);
1520 		goto done;
1521 	}
1522 
1523 	rv = hif_diag_read_access(hif_hdl,
1524 			  pcie_state_targ_addr +
1525 			  offsetof(struct pcie_state_s,
1526 			   svc_to_pipe_map),
1527 			  &svc_to_pipe_map);
1528 	if (rv != QDF_STATUS_SUCCESS) {
1529 		hif_err("get svc/pipe map: %d", rv);
1530 		goto done;
1531 	}
1532 	if (svc_to_pipe_map == 0) {
1533 		rv = QDF_STATUS_E_FAILURE;
1534 		hif_err("svc_to_pipe map is 0");
1535 		goto done;
1536 	}
1537 
1538 	rv = hif_diag_write_mem(hif_hdl,
1539 			svc_to_pipe_map,
1540 			(uint8_t *) target_service_to_ce_map,
1541 			target_service_to_ce_map_sz);
1542 	if (rv != QDF_STATUS_SUCCESS) {
1543 		hif_err("write svc/pipe map: %d", rv);
1544 		goto done;
1545 	}
1546 
1547 	rv = hif_diag_read_access(hif_hdl,
1548 			pcie_state_targ_addr +
1549 			offsetof(struct pcie_state_s,
1550 			config_flags),
1551 			&pcie_config_flags);
1552 	if (rv != QDF_STATUS_SUCCESS) {
1553 		hif_err("get pcie config_flags: %d", rv);
1554 		goto done;
1555 	}
1556 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1557 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1558 #else
1559 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1560 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1561 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1562 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1563 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1564 #endif
1565 	rv = hif_diag_write_mem(hif_hdl,
1566 			pcie_state_targ_addr +
1567 			offsetof(struct pcie_state_s,
1568 			config_flags),
1569 			(uint8_t *) &pcie_config_flags,
1570 			sizeof(pcie_config_flags));
1571 	if (rv != QDF_STATUS_SUCCESS) {
1572 		hif_err("write pcie config_flags: %d", rv);
1573 		goto done;
1574 	}
1575 
1576 #ifndef QCA_WIFI_3_0
1577 	/* configure early allocation */
1578 	ealloc_targ_addr = hif_hia_item_address(target_type,
1579 						offsetof(
1580 						struct host_interest_s,
1581 						hi_early_alloc));
1582 
1583 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1584 			&ealloc_value);
1585 	if (rv != QDF_STATUS_SUCCESS) {
1586 		hif_err("get early alloc val: %d", rv);
1587 		goto done;
1588 	}
1589 
1590 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1591 	ealloc_value |=
1592 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1593 		 HI_EARLY_ALLOC_MAGIC_MASK);
1594 
1595 	rv = hif_diag_read_access(hif_hdl,
1596 			  CHIP_ID_ADDRESS |
1597 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1598 	if (rv != QDF_STATUS_SUCCESS) {
1599 		hif_err("get chip id val: %d", rv);
1600 		goto done;
1601 	}
1602 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1603 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1604 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1605 		case 0x2:       /* ROME 1.3 */
1606 			/* 2 banks are switched to IRAM */
1607 			banks_switched = 2;
1608 			break;
1609 		case 0x4:       /* ROME 2.1 */
1610 		case 0x5:       /* ROME 2.2 */
1611 			banks_switched = 6;
1612 			break;
1613 		case 0x8:       /* ROME 3.0 */
1614 		case 0x9:       /* ROME 3.1 */
1615 		case 0xA:       /* ROME 3.2 */
1616 			banks_switched = 9;
1617 			break;
1618 		case 0x0:       /* ROME 1.0 */
1619 		case 0x1:       /* ROME 1.1 */
1620 		default:
1621 			/* 3 banks are switched to IRAM */
1622 			banks_switched = 3;
1623 			break;
1624 		}
1625 	}
1626 
1627 	ealloc_value |=
1628 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1629 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1630 
1631 	rv = hif_diag_write_access(hif_hdl,
1632 				ealloc_targ_addr,
1633 				ealloc_value);
1634 	if (rv != QDF_STATUS_SUCCESS) {
1635 		hif_err("set early alloc val: %d", rv);
1636 		goto done;
1637 	}
1638 #endif
1639 	if ((target_type == TARGET_TYPE_AR900B)
1640 			|| (target_type == TARGET_TYPE_QCA9984)
1641 			|| (target_type == TARGET_TYPE_QCA9888)
1642 			|| (target_type == TARGET_TYPE_AR9888)) {
1643 		hif_set_hia_extnd(scn);
1644 	}
1645 
1646 	/* Tell Target to proceed with initialization */
1647 	flag2_targ_addr = hif_hia_item_address(target_type,
1648 						offsetof(
1649 						struct host_interest_s,
1650 						hi_option_flag2));
1651 
1652 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1653 			  &flag2_value);
1654 	if (rv != QDF_STATUS_SUCCESS) {
1655 		hif_err("get option val: %d", rv);
1656 		goto done;
1657 	}
1658 
1659 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1660 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1661 			   flag2_value);
1662 	if (rv != QDF_STATUS_SUCCESS) {
1663 		hif_err("set option val: %d", rv);
1664 		goto done;
1665 	}
1666 
1667 	hif_wake_target_cpu(scn);
1668 
1669 done:
1670 
1671 	return qdf_status_to_os_return(rv);
1672 }
1673 
1674 /**
1675  * hif_bus_configure() - configure the pcie bus
1676  * @hif_sc: pointer to the hif context.
1677  *
1678  * return: 0 for success. nonzero for failure.
1679  */
1680 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1681 {
1682 	int status = 0;
1683 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1684 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1685 
1686 	hif_ce_prepare_config(hif_sc);
1687 
1688 	/* initialize sleep state adjust variables */
1689 	hif_state->sleep_timer_init = true;
1690 	hif_state->keep_awake_count = 0;
1691 	hif_state->fake_sleep = false;
1692 	hif_state->sleep_ticks = 0;
1693 
1694 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1695 			       hif_sleep_entry, (void *)hif_state,
1696 			       QDF_TIMER_TYPE_WAKE_APPS);
1697 	hif_state->sleep_timer_init = true;
1698 
1699 	status = hif_wlan_enable(hif_sc);
1700 	if (status) {
1701 		hif_err("hif_wlan_enable error: %d", status);
1702 		goto timer_free;
1703 	}
1704 
1705 	A_TARGET_ACCESS_LIKELY(hif_sc);
1706 
1707 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1708 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1709 	    !ce_srng_based(hif_sc)) {
1710 		/*
1711 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1712 		 * prevent sleep when we want to keep firmware always awake
1713 		 * note: when we want to keep firmware always awake,
1714 		 *       hif_target_sleep_state_adjust will point to a dummy
1715 		 *       function, and hif_pci_target_sleep_state_adjust must
1716 		 *       be called instead.
1717 		 * note: bus type check is here because AHB bus is reusing
1718 		 *       hif_pci_bus_configure code.
1719 		 */
1720 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1721 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1722 					false, true) < 0) {
1723 				status = -EACCES;
1724 				goto disable_wlan;
1725 			}
1726 		}
1727 	}
1728 
1729 	/* todo: consider replacing this with an srng field */
1730 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1731 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1732 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1733 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1734 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1735 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1736 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1737 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1738 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1739 		hif_sc->per_ce_irq = true;
1740 	}
1741 
1742 	status = hif_config_ce(hif_sc);
1743 	if (status)
1744 		goto disable_wlan;
1745 
1746 	if (hif_needs_bmi(hif_osc)) {
1747 		status = hif_set_hia(hif_sc);
1748 		if (status)
1749 			goto unconfig_ce;
1750 
1751 		hif_debug("hif_set_hia done");
1752 
1753 	}
1754 
1755 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1756 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1757 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1758 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1759 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1760 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1761 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1762 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
1763 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
1764 		hif_debug("Skip irq config for PCI based 8074 target");
1765 	else {
1766 		status = hif_configure_irq(hif_sc);
1767 		if (status < 0)
1768 			goto unconfig_ce;
1769 	}
1770 
1771 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1772 
1773 	return status;
1774 
1775 unconfig_ce:
1776 	hif_unconfig_ce(hif_sc);
1777 disable_wlan:
1778 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1779 	hif_wlan_disable(hif_sc);
1780 
1781 timer_free:
1782 	qdf_timer_stop(&hif_state->sleep_timer);
1783 	qdf_timer_free(&hif_state->sleep_timer);
1784 	hif_state->sleep_timer_init = false;
1785 
1786 	hif_err("Failed, status: %d", status);
1787 	return status;
1788 }
1789 
1790 /**
1791  * hif_bus_close(): hif_bus_close
1792  *
1793  * Return: n/a
1794  */
1795 void hif_pci_close(struct hif_softc *hif_sc)
1796 {
1797 	hif_rtpm_close(hif_sc);
1798 	hif_ce_close(hif_sc);
1799 }
1800 
1801 #define BAR_NUM 0
1802 
1803 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
1804 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1805 {
1806 	return dma_set_mask(&pci_dev->dev, mask);
1807 }
1808 
1809 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1810 						u64 mask)
1811 {
1812 	return dma_set_coherent_mask(&pci_dev->dev, mask);
1813 }
1814 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
1815 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1816 {
1817 	return pci_set_dma_mask(pci_dev, mask);
1818 }
1819 
1820 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1821 						u64 mask)
1822 {
1823 	return pci_set_consistent_dma_mask(pci_dev, mask);
1824 }
1825 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
1826 
1827 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
1828 				struct pci_dev *pdev,
1829 				const struct pci_device_id *id)
1830 {
1831 	void __iomem *mem;
1832 	int ret = 0;
1833 	uint16_t device_id = 0;
1834 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1835 
1836 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1837 	if (device_id != id->device)  {
1838 		hif_err(
1839 		   "dev id mismatch, config id = 0x%x, probing id = 0x%x",
1840 		   device_id, id->device);
1841 		/* pci link is down, so returning with error code */
1842 		return -EIO;
1843 	}
1844 
1845 	/* FIXME: temp. commenting out assign_resource
1846 	 * call for dev_attach to work on 2.6.38 kernel
1847 	 */
1848 #if (!defined(__LINUX_ARM_ARCH__))
1849 	if (pci_assign_resource(pdev, BAR_NUM)) {
1850 		hif_err("pci_assign_resource error");
1851 		return -EIO;
1852 	}
1853 #endif
1854 	if (pci_enable_device(pdev)) {
1855 		hif_err("pci_enable_device error");
1856 		return -EIO;
1857 	}
1858 
1859 	/* Request MMIO resources */
1860 	ret = pci_request_region(pdev, BAR_NUM, "ath");
1861 	if (ret) {
1862 		hif_err("PCI MMIO reservation error");
1863 		ret = -EIO;
1864 		goto err_region;
1865 	}
1866 
1867 #ifdef CONFIG_ARM_LPAE
1868 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1869 	 * for 32 bits device also.
1870 	 */
1871 	ret =  hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1872 	if (ret) {
1873 		hif_err("Cannot enable 64-bit pci DMA");
1874 		goto err_dma;
1875 	}
1876 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(64));
1877 	if (ret) {
1878 		hif_err("Cannot enable 64-bit DMA");
1879 		goto err_dma;
1880 	}
1881 #else
1882 	ret = hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1883 	if (ret) {
1884 		hif_err("Cannot enable 32-bit pci DMA");
1885 		goto err_dma;
1886 	}
1887 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(32));
1888 	if (ret) {
1889 		hif_err("Cannot enable 32-bit coherent DMA!");
1890 		goto err_dma;
1891 	}
1892 #endif
1893 
1894 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1895 
1896 	/* Set bus master bit in PCI_COMMAND to enable DMA */
1897 	pci_set_master(pdev);
1898 
1899 	/* Arrange for access to Target SoC registers. */
1900 	mem = pci_iomap(pdev, BAR_NUM, 0);
1901 	if (!mem) {
1902 		hif_err("PCI iomap error");
1903 		ret = -EIO;
1904 		goto err_iomap;
1905 	}
1906 
1907 	hif_info("*****BAR is %pK", (void *)mem);
1908 
1909 	sc->mem = mem;
1910 
1911 	/* Hawkeye emulation specific change */
1912 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
1913 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
1914 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
1915 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
1916 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
1917 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
1918 		mem = mem + 0x0c000000;
1919 		sc->mem = mem;
1920 		hif_info("Changing PCI mem base to %pK", sc->mem);
1921 	}
1922 
1923 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
1924 	ol_sc->mem = mem;
1925 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
1926 	sc->pci_enabled = true;
1927 	return ret;
1928 
1929 err_iomap:
1930 	pci_clear_master(pdev);
1931 err_dma:
1932 	pci_release_region(pdev, BAR_NUM);
1933 err_region:
1934 	pci_disable_device(pdev);
1935 	return ret;
1936 }
1937 
1938 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
1939 			      struct pci_dev *pdev,
1940 			      const struct pci_device_id *id)
1941 {
1942 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1943 	sc->pci_enabled = true;
1944 	return 0;
1945 }
1946 
1947 
1948 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
1949 {
1950 	pci_disable_msi(sc->pdev);
1951 	pci_iounmap(sc->pdev, sc->mem);
1952 	pci_clear_master(sc->pdev);
1953 	pci_release_region(sc->pdev, BAR_NUM);
1954 	pci_disable_device(sc->pdev);
1955 }
1956 
1957 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
1958 
1959 static void hif_disable_pci(struct hif_pci_softc *sc)
1960 {
1961 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1962 
1963 	if (!ol_sc) {
1964 		hif_err("ol_sc = NULL");
1965 		return;
1966 	}
1967 	hif_pci_device_reset(sc);
1968 	sc->hif_pci_deinit(sc);
1969 
1970 	sc->mem = NULL;
1971 	ol_sc->mem = NULL;
1972 }
1973 
1974 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1975 {
1976 	int ret = 0;
1977 	int targ_awake_limit = 500;
1978 #ifndef QCA_WIFI_3_0
1979 	uint32_t fw_indicator;
1980 #endif
1981 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1982 
1983 	/*
1984 	 * Verify that the Target was started cleanly.*
1985 	 * The case where this is most likely is with an AUX-powered
1986 	 * Target and a Host in WoW mode. If the Host crashes,
1987 	 * loses power, or is restarted (without unloading the driver)
1988 	 * then the Target is left (aux) powered and running.  On a
1989 	 * subsequent driver load, the Target is in an unexpected state.
1990 	 * We try to catch that here in order to reset the Target and
1991 	 * retry the probe.
1992 	 */
1993 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
1994 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
1995 	while (!hif_targ_is_awake(scn, sc->mem)) {
1996 		if (0 == targ_awake_limit) {
1997 			hif_err("target awake timeout");
1998 			ret = -EAGAIN;
1999 			goto end;
2000 		}
2001 		qdf_mdelay(1);
2002 		targ_awake_limit--;
2003 	}
2004 
2005 #if PCIE_BAR0_READY_CHECKING
2006 	{
2007 		int wait_limit = 200;
2008 		/* Synchronization point: wait the BAR0 is configured */
2009 		while (wait_limit-- &&
2010 			   !(hif_read32_mb(sc, c->mem +
2011 					  PCIE_LOCAL_BASE_ADDRESS +
2012 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2013 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2014 			qdf_mdelay(10);
2015 		}
2016 		if (wait_limit < 0) {
2017 			/* AR6320v1 doesn't support checking of BAR0
2018 			 * configuration, takes one sec to wait BAR0 ready
2019 			 */
2020 			hif_debug("AR6320v1 waits two sec for BAR0");
2021 		}
2022 	}
2023 #endif
2024 
2025 #ifndef QCA_WIFI_3_0
2026 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2027 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2028 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2029 
2030 	if (fw_indicator & FW_IND_INITIALIZED) {
2031 		hif_err("Target is in an unknown state. EAGAIN");
2032 		ret = -EAGAIN;
2033 		goto end;
2034 	}
2035 #endif
2036 
2037 end:
2038 	return ret;
2039 }
2040 
2041 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2042 {
2043 	int ret = 0;
2044 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2045 	uint32_t target_type = scn->target_info.target_type;
2046 
2047 	hif_info("E");
2048 
2049 	/* do notn support MSI or MSI IRQ failed */
2050 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2051 	ret = request_irq(sc->pdev->irq,
2052 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2053 			  "wlan_pci", sc);
2054 	if (ret) {
2055 		hif_err("request_irq failed, ret: %d", ret);
2056 		goto end;
2057 	}
2058 	scn->wake_irq = sc->pdev->irq;
2059 	/* Use sc->irq instead of sc->pdev-irq
2060 	 * platform_device pdev doesn't have an irq field
2061 	 */
2062 	sc->irq = sc->pdev->irq;
2063 	/* Use Legacy PCI Interrupts */
2064 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2065 		  PCIE_INTR_ENABLE_ADDRESS),
2066 		  HOST_GROUP0_MASK);
2067 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2068 			       PCIE_INTR_ENABLE_ADDRESS));
2069 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2070 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2071 
2072 	if ((target_type == TARGET_TYPE_AR900B)  ||
2073 			(target_type == TARGET_TYPE_QCA9984) ||
2074 			(target_type == TARGET_TYPE_AR9888) ||
2075 			(target_type == TARGET_TYPE_QCA9888) ||
2076 			(target_type == TARGET_TYPE_AR6320V1) ||
2077 			(target_type == TARGET_TYPE_AR6320V2) ||
2078 			(target_type == TARGET_TYPE_AR6320V3)) {
2079 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2080 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2081 	}
2082 end:
2083 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2084 			  "%s: X, ret = %d", __func__, ret);
2085 	return ret;
2086 }
2087 
2088 static int hif_ce_srng_free_irq(struct hif_softc *scn)
2089 {
2090 	int ret = 0;
2091 	int ce_id, irq;
2092 	uint32_t msi_data_start;
2093 	uint32_t msi_data_count;
2094 	uint32_t msi_irq_start;
2095 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2096 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2097 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2098 
2099 	if (!pld_get_enable_intx(scn->qdf_dev->dev)) {
2100 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2101 						  &msi_data_count,
2102 						  &msi_data_start,
2103 						  &msi_irq_start);
2104 		if (ret)
2105 			return ret;
2106 	}
2107 
2108 	/* needs to match the ce_id -> irq data mapping
2109 	 * used in the srng parameter configuration
2110 	 */
2111 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2112 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2113 			continue;
2114 
2115 		if (!ce_sc->tasklets[ce_id].inited)
2116 			continue;
2117 
2118 		irq = sc->ce_irq_num[ce_id];
2119 
2120 		hif_ce_irq_remove_affinity_hint(irq);
2121 
2122 		hif_debug("%s: (ce_id %d, irq %d)", __func__, ce_id, irq);
2123 
2124 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2125 	}
2126 
2127 	return ret;
2128 }
2129 
2130 void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2131 {
2132 	int i, j, irq;
2133 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2134 	struct hif_exec_context *hif_ext_group;
2135 
2136 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2137 		hif_ext_group = hif_state->hif_ext_group[i];
2138 		if (hif_ext_group->irq_requested) {
2139 			hif_ext_group->irq_requested = false;
2140 			for (j = 0; j < hif_ext_group->numirq; j++) {
2141 				irq = hif_ext_group->os_irq[j];
2142 				if (scn->irq_unlazy_disable) {
2143 					qdf_dev_clear_irq_status_flags(
2144 							irq,
2145 							QDF_IRQ_DISABLE_UNLAZY);
2146 				}
2147 				pfrm_free_irq(scn->qdf_dev->dev,
2148 					      irq, hif_ext_group);
2149 			}
2150 			hif_ext_group->numirq = 0;
2151 		}
2152 	}
2153 }
2154 
2155 /**
2156  * hif_nointrs(): disable IRQ
2157  *
2158  * This function stops interrupt(s)
2159  *
2160  * @scn: struct hif_softc
2161  *
2162  * Return: none
2163  */
2164 void hif_pci_nointrs(struct hif_softc *scn)
2165 {
2166 	int i, ret;
2167 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2168 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2169 
2170 	scn->free_irq_done = true;
2171 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2172 
2173 	if (scn->request_irq_done == false)
2174 		return;
2175 
2176 	hif_pci_deconfigure_grp_irq(scn);
2177 
2178 	ret = hif_ce_srng_free_irq(scn);
2179 	if (ret != -EINVAL) {
2180 		/* ce irqs freed in hif_ce_srng_free_irq */
2181 
2182 		if (scn->wake_irq)
2183 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2184 		scn->wake_irq = 0;
2185 	} else if (sc->num_msi_intrs > 0) {
2186 		/* MSI interrupt(s) */
2187 		for (i = 0; i < sc->num_msi_intrs; i++)
2188 			free_irq(sc->irq + i, sc);
2189 		sc->num_msi_intrs = 0;
2190 	} else {
2191 		/* Legacy PCI line interrupt
2192 		 * Use sc->irq instead of sc->pdev-irq
2193 		 * platform_device pdev doesn't have an irq field
2194 		 */
2195 		free_irq(sc->irq, sc);
2196 	}
2197 	scn->request_irq_done = false;
2198 }
2199 
2200 static inline
2201 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2202 {
2203 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2204 		return true;
2205 	else
2206 		return false;
2207 }
2208 /**
2209  * hif_disable_bus(): hif_disable_bus
2210  *
2211  * This function disables the bus
2212  *
2213  * @bdev: bus dev
2214  *
2215  * Return: none
2216  */
2217 void hif_pci_disable_bus(struct hif_softc *scn)
2218 {
2219 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2220 	struct pci_dev *pdev;
2221 	void __iomem *mem;
2222 	struct hif_target_info *tgt_info = &scn->target_info;
2223 
2224 	/* Attach did not succeed, all resources have been
2225 	 * freed in error handler
2226 	 */
2227 	if (!sc)
2228 		return;
2229 
2230 	pdev = sc->pdev;
2231 	if (hif_pci_default_link_up(tgt_info)) {
2232 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2233 
2234 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2235 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2236 			       HOST_GROUP0_MASK);
2237 	}
2238 
2239 #if defined(CPU_WARM_RESET_WAR)
2240 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2241 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2242 	 * verified for AR9888_REV1
2243 	 */
2244 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2245 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2246 		hif_pci_device_warm_reset(sc);
2247 	else
2248 		hif_pci_device_reset(sc);
2249 #else
2250 	hif_pci_device_reset(sc);
2251 #endif
2252 	mem = (void __iomem *)sc->mem;
2253 	if (mem) {
2254 		hif_dump_pipe_debug_count(scn);
2255 		if (scn->athdiag_procfs_inited) {
2256 			athdiag_procfs_remove();
2257 			scn->athdiag_procfs_inited = false;
2258 		}
2259 		sc->hif_pci_deinit(sc);
2260 		scn->mem = NULL;
2261 	}
2262 	hif_info("X");
2263 }
2264 
2265 #define OL_ATH_PCI_PM_CONTROL 0x44
2266 
2267 #ifdef CONFIG_PLD_PCIE_CNSS
2268 /**
2269  * hif_pci_prevent_linkdown(): allow or permit linkdown
2270  * @flag: true prevents linkdown, false allows
2271  *
2272  * Calls into the platform driver to vote against taking down the
2273  * pcie link.
2274  *
2275  * Return: n/a
2276  */
2277 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2278 {
2279 	int errno;
2280 
2281 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2282 	hif_runtime_prevent_linkdown(scn, flag);
2283 
2284 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2285 	if (errno)
2286 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
2287 }
2288 #else
2289 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2290 {
2291 }
2292 #endif
2293 
2294 #ifdef CONFIG_PCI_LOW_POWER_INT_REG
2295 /**
2296  * hif_pci_config_low_power_int_register(): configure pci low power
2297  * interrupt  register.
2298  * @enable: true to enable the bits, false clear.
2299  *
2300  * Configure the bits INTR_L1SS and INTR_CLKPM of
2301  * PCIE_LOW_POWER_INT_MASK register.
2302  *
2303  * Return: n/a
2304  */
2305 static void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2306 						  bool enable)
2307 {
2308 	void *address;
2309 	uint32_t value;
2310 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2311 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2312 	uint32_t target_type = tgt_info->target_type;
2313 
2314 	/*
2315 	 * Only configure the bits INTR_L1SS and INTR_CLKPM of
2316 	 * PCIE_LOW_POWER_INT_MASK register for QCA6174 for high
2317 	 * consumption issue. NFA344A power consumption is above 80mA
2318 	 * after entering Modern Standby. But the power will drop to normal
2319 	 * after PERST# de-assert.
2320 	 */
2321 	if ((target_type == TARGET_TYPE_AR6320) ||
2322 	    (target_type == TARGET_TYPE_AR6320V1) ||
2323 	    (target_type == TARGET_TYPE_AR6320V2) ||
2324 	    (target_type == TARGET_TYPE_AR6320V3)) {
2325 		hif_info("Configure PCI low power int mask register");
2326 
2327 		address = scn->mem + PCIE_LOW_POWER_INT_MASK_OFFSET;
2328 
2329 		/* Configure bit3 INTR_L1SS */
2330 		value = hif_read32_mb(scn, address);
2331 		if (enable)
2332 			value |= INTR_L1SS;
2333 		else
2334 			value &= ~INTR_L1SS;
2335 		hif_write32_mb(scn, address, value);
2336 
2337 		/* Configure bit4 INTR_CLKPM */
2338 		value = hif_read32_mb(scn, address);
2339 		if (enable)
2340 			value |= INTR_CLKPM;
2341 		else
2342 			value &= ~INTR_CLKPM;
2343 		hif_write32_mb(scn, address, value);
2344 	}
2345 }
2346 #else
2347 static inline void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2348 							 bool enable)
2349 {
2350 }
2351 #endif
2352 
2353 /**
2354  * hif_pci_bus_suspend(): prepare hif for suspend
2355  *
2356  * Return: Errno
2357  */
2358 int hif_pci_bus_suspend(struct hif_softc *scn)
2359 {
2360 	QDF_STATUS ret;
2361 
2362 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2363 
2364 	ret = hif_try_complete_tasks(scn);
2365 	if (QDF_IS_STATUS_ERROR(ret)) {
2366 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2367 		return -EBUSY;
2368 	}
2369 
2370 	/*
2371 	 * In an unlikely case, if draining becomes infinite loop,
2372 	 * it returns an error, shall abort the bus suspend.
2373 	 */
2374 	ret = hif_drain_fw_diag_ce(scn);
2375 	if (ret) {
2376 		hif_err("draining fw_diag_ce goes infinite, so abort suspend");
2377 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2378 		return -EBUSY;
2379 	}
2380 
2381 	/* Stop the HIF Sleep Timer */
2382 	hif_cancel_deferred_target_sleep(scn);
2383 
2384 	/*
2385 	 * Only need clear the bits INTR_L1SS/INTR_CLKPM after suspend.
2386 	 * No need do enable bits after resume, as firmware will restore
2387 	 * the bits after resume.
2388 	 */
2389 	hif_pci_config_low_power_int_register(scn, false);
2390 
2391 	scn->bus_suspended = true;
2392 
2393 	return 0;
2394 }
2395 
2396 #ifdef PCI_LINK_STATUS_SANITY
2397 /**
2398  * __hif_check_link_status() - API to check if PCIe link is active/not
2399  * @scn: HIF Context
2400  *
2401  * API reads the PCIe config space to verify if PCIe link training is
2402  * successful or not.
2403  *
2404  * Return: Success/Failure
2405  */
2406 static int __hif_check_link_status(struct hif_softc *scn)
2407 {
2408 	uint16_t dev_id = 0;
2409 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2410 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2411 
2412 	if (!sc) {
2413 		hif_err("HIF Bus Context is Invalid");
2414 		return -EINVAL;
2415 	}
2416 
2417 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2418 
2419 	if (dev_id == sc->devid)
2420 		return 0;
2421 
2422 	hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2423 	       dev_id);
2424 
2425 	scn->recovery = true;
2426 
2427 	if (cbk && cbk->set_recovery_in_progress)
2428 		cbk->set_recovery_in_progress(cbk->context, true);
2429 	else
2430 		hif_err("Driver Global Recovery is not set");
2431 
2432 	pld_is_pci_link_down(sc->dev);
2433 	return -EACCES;
2434 }
2435 #else
2436 static inline int __hif_check_link_status(struct hif_softc *scn)
2437 {
2438 	return 0;
2439 }
2440 #endif
2441 
2442 
2443 #ifdef HIF_BUS_LOG_INFO
2444 bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
2445 		       unsigned int *offset)
2446 {
2447 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2448 	struct hang_event_bus_info info = {0};
2449 	size_t size;
2450 
2451 	if (!sc) {
2452 		hif_err("HIF Bus Context is Invalid");
2453 		return false;
2454 	}
2455 
2456 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
2457 
2458 	size = sizeof(info);
2459 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
2460 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
2461 
2462 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
2463 		return false;
2464 
2465 	qdf_mem_copy(data + *offset, &info, size);
2466 	*offset = *offset + size;
2467 
2468 	if (info.dev_id == sc->devid)
2469 		return false;
2470 
2471 	qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
2472 	qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
2473 			     (QDF_WLAN_HANG_FW_OFFSET - size));
2474 	return true;
2475 }
2476 #endif
2477 
2478 /**
2479  * hif_pci_bus_resume(): prepare hif for resume
2480  *
2481  * Return: Errno
2482  */
2483 int hif_pci_bus_resume(struct hif_softc *scn)
2484 {
2485 	int errno;
2486 
2487 	scn->bus_suspended = false;
2488 
2489 	errno = __hif_check_link_status(scn);
2490 	if (errno)
2491 		return errno;
2492 
2493 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2494 
2495 	return 0;
2496 }
2497 
2498 /**
2499  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2500  * @scn: hif context
2501  *
2502  * Ensure that if we received the wakeup message before the irq
2503  * was disabled that the message is processed before suspending.
2504  *
2505  * Return: -EBUSY if we fail to flush the tasklets.
2506  */
2507 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2508 {
2509 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2510 		qdf_atomic_set(&scn->link_suspended, 1);
2511 
2512 	return 0;
2513 }
2514 
2515 /**
2516  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2517  * @scn: hif context
2518  *
2519  * Ensure that if we received the wakeup message before the irq
2520  * was disabled that the message is processed before suspending.
2521  *
2522  * Return: -EBUSY if we fail to flush the tasklets.
2523  */
2524 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2525 {
2526 	/* a vote for link up can come in the middle of the ongoing resume
2527 	 * process. hence, clear the link suspend flag once
2528 	 * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
2529 	 * by this time
2530 	 */
2531 	qdf_atomic_set(&scn->link_suspended, 0);
2532 
2533 	return 0;
2534 }
2535 
2536 #if CONFIG_PCIE_64BIT_MSI
2537 static void hif_free_msi_ctx(struct hif_softc *scn)
2538 {
2539 	struct hif_pci_softc *sc = scn->hif_sc;
2540 	struct hif_msi_info *info = &sc->msi_info;
2541 	struct device *dev = scn->qdf_dev->dev;
2542 
2543 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2544 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2545 	info->magic = NULL;
2546 	info->magic_dma = 0;
2547 }
2548 #else
2549 static void hif_free_msi_ctx(struct hif_softc *scn)
2550 {
2551 }
2552 #endif
2553 
2554 void hif_pci_disable_isr(struct hif_softc *scn)
2555 {
2556 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2557 
2558 	hif_exec_kill(&scn->osc);
2559 	hif_nointrs(scn);
2560 	hif_free_msi_ctx(scn);
2561 	/* Cancel the pending tasklet */
2562 	ce_tasklet_kill(scn);
2563 	tasklet_kill(&sc->intr_tq);
2564 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2565 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2566 }
2567 
2568 /* Function to reset SoC */
2569 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2570 {
2571 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2572 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2573 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2574 
2575 #if defined(CPU_WARM_RESET_WAR)
2576 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2577 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2578 	 * verified for AR9888_REV1
2579 	 */
2580 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2581 		hif_pci_device_warm_reset(sc);
2582 	else
2583 		hif_pci_device_reset(sc);
2584 #else
2585 	hif_pci_device_reset(sc);
2586 #endif
2587 }
2588 
2589 /**
2590  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2591  * @sc: HIF PCIe Context
2592  *
2593  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2594  *
2595  * Return: Failure to caller
2596  */
2597 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2598 {
2599 	uint16_t val = 0;
2600 	uint32_t bar = 0;
2601 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2602 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2603 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2604 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2605 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2606 	A_target_id_t pci_addr = scn->mem;
2607 
2608 	hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
2609 
2610 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2611 
2612 	hif_info("PCI Vendor ID = 0x%04x", val);
2613 
2614 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2615 
2616 	hif_info("PCI Device ID = 0x%04x", val);
2617 
2618 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
2619 
2620 	hif_info("PCI Command = 0x%04x", val);
2621 
2622 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
2623 
2624 	hif_info("PCI Status = 0x%04x", val);
2625 
2626 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2627 
2628 	hif_info("PCI BAR 0 = 0x%08x", bar);
2629 
2630 	hif_info("SOC_WAKE_ADDR 0%08x",
2631 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2632 				PCIE_SOC_WAKE_ADDRESS));
2633 
2634 	hif_info("RTC_STATE_ADDR 0x%08x",
2635 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2636 							RTC_STATE_ADDRESS));
2637 
2638 	hif_info("wakeup target");
2639 
2640 	if (!cfg->enable_self_recovery)
2641 		QDF_BUG(0);
2642 
2643 	scn->recovery = true;
2644 
2645 	if (cbk->set_recovery_in_progress)
2646 		cbk->set_recovery_in_progress(cbk->context, true);
2647 
2648 	pld_is_pci_link_down(sc->dev);
2649 	return -EACCES;
2650 }
2651 
2652 /*
2653  * For now, we use simple on-demand sleep/wake.
2654  * Some possible improvements:
2655  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2656  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2657  *   Careful, though, these functions may be used by
2658  *  interrupt handlers ("atomic")
2659  *  -Don't use host_reg_table for this code; instead use values directly
2660  *  -Use a separate timer to track activity and allow Target to sleep only
2661  *   if it hasn't done anything for a while; may even want to delay some
2662  *   processing for a short while in order to "batch" (e.g.) transmit
2663  *   requests with completion processing into "windows of up time".  Costs
2664  *   some performance, but improves power utilization.
2665  *  -On some platforms, it might be possible to eliminate explicit
2666  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
2667  *   recover from the failure by forcing the Target awake.
2668  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
2669  *   overhead in some cases. Perhaps this makes more sense when
2670  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2671  *   disabled.
2672  *  -It is possible to compile this code out and simply force the Target
2673  *   to remain awake.  That would yield optimal performance at the cost of
2674  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2675  *
2676  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2677  */
2678 /**
2679  * hif_target_sleep_state_adjust() - on-demand sleep/wake
2680  * @scn: hif_softc pointer.
2681  * @sleep_ok: bool
2682  * @wait_for_it: bool
2683  *
2684  * Output the pipe error counts of each pipe to log file
2685  *
2686  * Return: int
2687  */
2688 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
2689 			      bool sleep_ok, bool wait_for_it)
2690 {
2691 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2692 	A_target_id_t pci_addr = scn->mem;
2693 	static int max_delay;
2694 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2695 	static int debug;
2696 	if (scn->recovery)
2697 		return -EACCES;
2698 
2699 	if (qdf_atomic_read(&scn->link_suspended)) {
2700 		hif_err("Invalid access, PCIe link is down");
2701 		debug = true;
2702 		QDF_ASSERT(0);
2703 		return -EACCES;
2704 	}
2705 
2706 	if (debug) {
2707 		wait_for_it = true;
2708 		hif_err("Invalid access, PCIe link is suspended");
2709 		QDF_ASSERT(0);
2710 	}
2711 
2712 	if (sleep_ok) {
2713 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2714 		hif_state->keep_awake_count--;
2715 		if (hif_state->keep_awake_count == 0) {
2716 			/* Allow sleep */
2717 			hif_state->verified_awake = false;
2718 			hif_state->sleep_ticks = qdf_system_ticks();
2719 		}
2720 		if (hif_state->fake_sleep == false) {
2721 			/* Set the Fake Sleep */
2722 			hif_state->fake_sleep = true;
2723 
2724 			/* Start the Sleep Timer */
2725 			qdf_timer_stop(&hif_state->sleep_timer);
2726 			qdf_timer_start(&hif_state->sleep_timer,
2727 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2728 		}
2729 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2730 	} else {
2731 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2732 
2733 		if (hif_state->fake_sleep) {
2734 			hif_state->verified_awake = true;
2735 		} else {
2736 			if (hif_state->keep_awake_count == 0) {
2737 				/* Force AWAKE */
2738 				hif_write32_mb(sc, pci_addr +
2739 					      PCIE_LOCAL_BASE_ADDRESS +
2740 					      PCIE_SOC_WAKE_ADDRESS,
2741 					      PCIE_SOC_WAKE_V_MASK);
2742 			}
2743 		}
2744 		hif_state->keep_awake_count++;
2745 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2746 
2747 		if (wait_for_it && !hif_state->verified_awake) {
2748 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
2749 			int tot_delay = 0;
2750 			int curr_delay = 5;
2751 
2752 			for (;; ) {
2753 				if (hif_targ_is_awake(scn, pci_addr)) {
2754 					hif_state->verified_awake = true;
2755 					break;
2756 				}
2757 				if (!hif_pci_targ_is_present(scn, pci_addr))
2758 					break;
2759 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2760 					return hif_log_soc_wakeup_timeout(sc);
2761 
2762 				OS_DELAY(curr_delay);
2763 				tot_delay += curr_delay;
2764 
2765 				if (curr_delay < 50)
2766 					curr_delay += 5;
2767 			}
2768 
2769 			/*
2770 			 * NB: If Target has to come out of Deep Sleep,
2771 			 * this may take a few Msecs. Typically, though
2772 			 * this delay should be <30us.
2773 			 */
2774 			if (tot_delay > max_delay)
2775 				max_delay = tot_delay;
2776 		}
2777 	}
2778 
2779 	if (debug && hif_state->verified_awake) {
2780 		debug = 0;
2781 		hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2782 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2783 				PCIE_INTR_ENABLE_ADDRESS),
2784 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2785 				PCIE_INTR_CAUSE_ADDRESS),
2786 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2787 				CPU_INTR_ADDRESS),
2788 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2789 				PCIE_INTR_CLR_ADDRESS),
2790 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
2791 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2792 	}
2793 
2794 	return 0;
2795 }
2796 
2797 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
2798 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
2799 {
2800 	uint32_t value;
2801 	void *addr;
2802 
2803 	addr = scn->mem + offset;
2804 	value = hif_read32_mb(scn, addr);
2805 
2806 	{
2807 		unsigned long irq_flags;
2808 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2809 
2810 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2811 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2812 		pcie_access_log[idx].is_write = false;
2813 		pcie_access_log[idx].addr = addr;
2814 		pcie_access_log[idx].value = value;
2815 		pcie_access_log_seqnum++;
2816 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2817 	}
2818 
2819 	return value;
2820 }
2821 
2822 void
2823 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
2824 {
2825 	void *addr;
2826 
2827 	addr = scn->mem + (offset);
2828 	hif_write32_mb(scn, addr, value);
2829 
2830 	{
2831 		unsigned long irq_flags;
2832 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2833 
2834 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2835 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2836 		pcie_access_log[idx].is_write = true;
2837 		pcie_access_log[idx].addr = addr;
2838 		pcie_access_log[idx].value = value;
2839 		pcie_access_log_seqnum++;
2840 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2841 	}
2842 }
2843 
2844 /**
2845  * hif_target_dump_access_log() - dump access log
2846  *
2847  * dump access log
2848  *
2849  * Return: n/a
2850  */
2851 void hif_target_dump_access_log(void)
2852 {
2853 	int idx, len, start_idx, cur_idx;
2854 	unsigned long irq_flags;
2855 
2856 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2857 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2858 		len = PCIE_ACCESS_LOG_NUM;
2859 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2860 	} else {
2861 		len = pcie_access_log_seqnum;
2862 		start_idx = 0;
2863 	}
2864 
2865 	for (idx = 0; idx < len; idx++) {
2866 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2867 		hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
2868 		       idx,
2869 		       pcie_access_log[cur_idx].seqnum,
2870 		       pcie_access_log[cur_idx].is_write,
2871 		       pcie_access_log[cur_idx].addr,
2872 		       pcie_access_log[cur_idx].value);
2873 	}
2874 
2875 	pcie_access_log_seqnum = 0;
2876 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2877 }
2878 #endif
2879 
2880 #ifndef HIF_AHB
2881 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
2882 {
2883 	QDF_BUG(0);
2884 	return -EINVAL;
2885 }
2886 #endif
2887 
2888 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
2889 {
2890 	struct ce_tasklet_entry *tasklet_entry = context;
2891 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
2892 }
2893 extern const char *ce_name[];
2894 
2895 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
2896 {
2897 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
2898 
2899 	return pci_scn->ce_irq_num[ce_id];
2900 }
2901 
2902 /* hif_srng_msi_irq_disable() - disable the irq for msi
2903  * @hif_sc: hif context
2904  * @ce_id: which ce to disable copy complete interrupts for
2905  *
2906  * since MSI interrupts are not level based, the system can function
2907  * without disabling these interrupts.  Interrupt mitigation can be
2908  * added here for better system performance.
2909  */
2910 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2911 {
2912 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
2913 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2914 }
2915 
2916 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2917 {
2918 	if (__hif_check_link_status(hif_sc))
2919 		return;
2920 
2921 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
2922 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2923 }
2924 
2925 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2926 {
2927 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2928 }
2929 
2930 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2931 {
2932 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2933 }
2934 
2935 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
2936 /**
2937  * hif_ce_configure_legacyirq() - Configure CE interrupts
2938  * @scn: hif_softc pointer
2939  *
2940  * Configure CE legacy interrupts
2941  *
2942  * Return: int
2943  */
2944 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
2945 {
2946 	int ret = 0;
2947 	int irq, ce_id;
2948 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2949 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2950 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
2951 	int pci_slot;
2952 	qdf_device_t qdf_dev = scn->qdf_dev;
2953 
2954 	if (!pld_get_enable_intx(scn->qdf_dev->dev))
2955 		return -EINVAL;
2956 
2957 	scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
2958 	scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
2959 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
2960 
2961 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2962 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2963 			continue;
2964 
2965 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
2966 			continue;
2967 
2968 		ret = pfrm_get_irq(scn->qdf_dev->dev,
2969 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
2970 				   legacy_ic_irqname[ce_id], ce_id, &irq);
2971 		if (ret) {
2972 			dev_err(scn->qdf_dev->dev, "get irq failed\n");
2973 			ret = -EFAULT;
2974 			goto skip;
2975 		}
2976 
2977 		pci_slot = hif_get_pci_slot(scn);
2978 		qdf_scnprintf(ce_irqname[pci_slot][ce_id],
2979 			      DP_IRQ_NAME_LEN, "pci%d_ce_%u", pci_slot, ce_id);
2980 		pci_sc->ce_irq_num[ce_id] = irq;
2981 
2982 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
2983 				       hif_ce_interrupt_handler,
2984 				       IRQF_SHARED,
2985 				       ce_irqname[pci_slot][ce_id],
2986 				       &ce_sc->tasklets[ce_id]);
2987 		if (ret) {
2988 			hif_err("error = %d", ret);
2989 			return -EINVAL;
2990 		}
2991 	}
2992 
2993 skip:
2994 	return ret;
2995 }
2996 #else
2997 /**
2998  * hif_ce_configure_legacyirq() - Configure CE interrupts
2999  * @scn: hif_softc pointer
3000  *
3001  * Configure CE legacy interrupts
3002  *
3003  * Return: int
3004  */
3005 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
3006 {
3007 	return 0;
3008 }
3009 #endif
3010 
3011 int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
3012 {
3013 	int ret = 0;
3014 	int irq;
3015 	uint32_t msi_data_start;
3016 	uint32_t msi_data_count;
3017 	unsigned int msi_data;
3018 	int irq_id;
3019 	uint32_t msi_irq_start;
3020 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3021 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3022 	int pci_slot;
3023 
3024 	if (ce_id >= CE_COUNT_MAX)
3025 		return -EINVAL;
3026 
3027 	/* do ce irq assignments */
3028 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3029 					  &msi_data_count, &msi_data_start,
3030 					  &msi_irq_start);
3031 
3032 	if (ret) {
3033 		hif_err("Failed to get CE msi config");
3034 		return -EINVAL;
3035 	}
3036 
3037 	irq_id = scn->int_assignment->msi_idx[ce_id];
3038 	/* needs to match the ce_id -> irq data mapping
3039 	 * used in the srng parameter configuration
3040 	 */
3041 	pci_slot = hif_get_pci_slot(scn);
3042 	msi_data = irq_id + msi_irq_start;
3043 	irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3044 	hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d tasklet %pK)",
3045 		  __func__, ce_id, irq_id, msi_data, irq,
3046 		  &ce_sc->tasklets[ce_id]);
3047 
3048 	/* implies the ce is also initialized */
3049 	if (!ce_sc->tasklets[ce_id].inited)
3050 		goto skip;
3051 
3052 	pci_sc->ce_irq_num[ce_id] = irq;
3053 
3054 	qdf_scnprintf(ce_irqname[pci_slot][ce_id],
3055 		      DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u",
3056 		      pci_slot, ce_id);
3057 
3058 	ret = pfrm_request_irq(scn->qdf_dev->dev,
3059 			       irq, hif_ce_interrupt_handler, IRQF_SHARED,
3060 			       ce_irqname[pci_slot][ce_id],
3061 			       &ce_sc->tasklets[ce_id]);
3062 	if (ret)
3063 		return -EINVAL;
3064 
3065 skip:
3066 	return ret;
3067 }
3068 
3069 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3070 {
3071 	int ret;
3072 	int ce_id, irq;
3073 	uint32_t msi_data_start;
3074 	uint32_t msi_data_count;
3075 	uint32_t msi_irq_start;
3076 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3077 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
3078 
3079 	if (!scn->disable_wake_irq) {
3080 		/* do wake irq assignment */
3081 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3082 						  &msi_data_count,
3083 						  &msi_data_start,
3084 						  &msi_irq_start);
3085 		if (ret)
3086 			return ret;
3087 
3088 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
3089 						msi_irq_start);
3090 		scn->wake_irq_type = HIF_PM_MSI_WAKE;
3091 
3092 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
3093 				       hif_wake_interrupt_handler,
3094 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
3095 
3096 		if (ret)
3097 			return ret;
3098 	}
3099 
3100 	/* do ce irq assignments */
3101 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3102 					  &msi_data_count, &msi_data_start,
3103 					  &msi_irq_start);
3104 	if (ret)
3105 		goto free_wake_irq;
3106 
3107 	if (ce_srng_based(scn)) {
3108 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3109 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3110 	} else {
3111 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3112 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3113 	}
3114 
3115 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3116 
3117 	/* needs to match the ce_id -> irq data mapping
3118 	 * used in the srng parameter configuration
3119 	 */
3120 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3121 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3122 			continue;
3123 
3124 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
3125 			continue;
3126 
3127 		ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
3128 		if (ret)
3129 			goto free_irq;
3130 	}
3131 
3132 	return ret;
3133 
3134 free_irq:
3135 	/* the request_irq for the last ce_id failed so skip it. */
3136 	while (ce_id > 0 && ce_id < scn->ce_count) {
3137 		unsigned int msi_data;
3138 
3139 		ce_id--;
3140 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
3141 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3142 		pfrm_free_irq(scn->qdf_dev->dev,
3143 			      irq, &ce_sc->tasklets[ce_id]);
3144 	}
3145 
3146 free_wake_irq:
3147 	if (!scn->disable_wake_irq) {
3148 		pfrm_free_irq(scn->qdf_dev->dev,
3149 			      scn->wake_irq, scn->qdf_dev->dev);
3150 		scn->wake_irq = 0;
3151 		scn->wake_irq_type = HIF_PM_INVALID_WAKE;
3152 	}
3153 
3154 	return ret;
3155 }
3156 
3157 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3158 {
3159 	int i;
3160 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3161 
3162 	for (i = 0; i < hif_ext_group->numirq; i++)
3163 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
3164 					hif_ext_group->os_irq[i]);
3165 }
3166 
3167 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3168 {
3169 	int i;
3170 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3171 
3172 	for (i = 0; i < hif_ext_group->numirq; i++)
3173 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
3174 }
3175 
3176 /**
3177  * hif_pci_get_irq_name() - get irqname
3178  * This function gives irqnumber to irqname
3179  * mapping.
3180  *
3181  * @irq_no: irq number
3182  *
3183  * Return: irq name
3184  */
3185 const char *hif_pci_get_irq_name(int irq_no)
3186 {
3187 	return "pci-dummy";
3188 }
3189 
3190 #if defined(FEATURE_IRQ_AFFINITY) || defined(HIF_CPU_PERF_AFFINE_MASK)
3191 void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
3192 				   bool perf)
3193 {
3194 	int i, ret;
3195 	unsigned int cpus;
3196 	bool mask_set = false;
3197 	int cpu_cluster = perf ? CPU_CLUSTER_TYPE_PERF :
3198 						CPU_CLUSTER_TYPE_LITTLE;
3199 
3200 	for (i = 0; i < hif_ext_group->numirq; i++)
3201 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
3202 
3203 	for (i = 0; i < hif_ext_group->numirq; i++) {
3204 		qdf_for_each_online_cpu(cpus) {
3205 			if (qdf_topology_physical_package_id(cpus) ==
3206 			    cpu_cluster) {
3207 				qdf_cpumask_set_cpu(cpus,
3208 						    &hif_ext_group->
3209 						    new_cpu_mask[i]);
3210 				mask_set = true;
3211 			}
3212 		}
3213 	}
3214 	for (i = 0; i < hif_ext_group->numirq; i++) {
3215 		if (mask_set) {
3216 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3217 						  IRQ_NO_BALANCING, 0);
3218 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3219 						       (struct qdf_cpu_mask *)
3220 						       &hif_ext_group->
3221 						       new_cpu_mask[i]);
3222 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3223 						  0, IRQ_NO_BALANCING);
3224 			if (ret)
3225 				qdf_debug("Set affinity %*pbl fails for IRQ %d ",
3226 					  qdf_cpumask_pr_args(&hif_ext_group->
3227 							      new_cpu_mask[i]),
3228 					  hif_ext_group->os_irq[i]);
3229 		} else {
3230 			qdf_debug("Offline CPU: Set affinity fails for IRQ: %d",
3231 				  hif_ext_group->os_irq[i]);
3232 		}
3233 	}
3234 }
3235 #endif
3236 
3237 #ifdef HIF_CPU_PERF_AFFINE_MASK
3238 void hif_pci_ce_irq_set_affinity_hint(
3239 	struct hif_softc *scn)
3240 {
3241 	int ret;
3242 	unsigned int cpus;
3243 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3244 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3245 	struct CE_attr *host_ce_conf;
3246 	int ce_id;
3247 	qdf_cpu_mask ce_cpu_mask;
3248 
3249 	host_ce_conf = ce_sc->host_ce_config;
3250 	qdf_cpumask_clear(&ce_cpu_mask);
3251 
3252 	qdf_for_each_online_cpu(cpus) {
3253 		if (qdf_topology_physical_package_id(cpus) ==
3254 			CPU_CLUSTER_TYPE_PERF) {
3255 			qdf_cpumask_set_cpu(cpus,
3256 					    &ce_cpu_mask);
3257 		} else {
3258 			hif_err_rl("Unable to set cpu mask for offline CPU %d"
3259 				   , cpus);
3260 		}
3261 	}
3262 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
3263 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
3264 		return;
3265 	}
3266 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3267 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3268 			continue;
3269 		qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
3270 		qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
3271 				 &ce_cpu_mask);
3272 		qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
3273 					  IRQ_NO_BALANCING, 0);
3274 		ret = qdf_dev_set_irq_affinity(
3275 			pci_sc->ce_irq_num[ce_id],
3276 			(struct qdf_cpu_mask *)&pci_sc->ce_irq_cpu_mask[ce_id]);
3277 		qdf_dev_modify_irq_status(pci_sc->ce_irq_num[ce_id],
3278 					  0, IRQ_NO_BALANCING);
3279 		if (ret)
3280 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
3281 				   qdf_cpumask_pr_args(
3282 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3283 				   pci_sc->ce_irq_num[ce_id]);
3284 		else
3285 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
3286 				     qdf_cpumask_pr_args(
3287 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3288 				     pci_sc->ce_irq_num[ce_id]);
3289 	}
3290 }
3291 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3292 
3293 #ifdef HIF_CPU_CLEAR_AFFINITY
3294 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
3295 					   int intr_ctxt_id, int cpu)
3296 {
3297 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3298 	struct hif_exec_context *hif_ext_group;
3299 	int i, ret;
3300 
3301 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
3302 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
3303 
3304 		for (i = 0; i < hif_ext_group->numirq; i++) {
3305 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
3306 			qdf_cpumask_clear_cpu(cpu,
3307 					      &hif_ext_group->new_cpu_mask[i]);
3308 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3309 						  IRQ_NO_BALANCING, 0);
3310 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
3311 						       (struct qdf_cpu_mask *)
3312 						       &hif_ext_group->
3313 						       new_cpu_mask[i]);
3314 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
3315 						  0, IRQ_NO_BALANCING);
3316 			if (ret)
3317 				hif_err("Set affinity %*pbl fails for IRQ %d ",
3318 					qdf_cpumask_pr_args(&hif_ext_group->
3319 							    new_cpu_mask[i]),
3320 					hif_ext_group->os_irq[i]);
3321 			else
3322 				hif_debug("Set affinity %*pbl for IRQ: %d",
3323 					  qdf_cpumask_pr_args(&hif_ext_group->
3324 							      new_cpu_mask[i]),
3325 					  hif_ext_group->os_irq[i]);
3326 		}
3327 	}
3328 }
3329 #endif
3330 
3331 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3332 {
3333 	int i;
3334 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3335 	struct hif_exec_context *hif_ext_group;
3336 
3337 	hif_core_ctl_set_boost(true);
3338 	/* Set IRQ affinity for WLAN DP interrupts*/
3339 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3340 		hif_ext_group = hif_state->hif_ext_group[i];
3341 		hif_pci_irq_set_affinity_hint(hif_ext_group, true);
3342 	}
3343 	/* Set IRQ affinity for CE interrupts*/
3344 	hif_pci_ce_irq_set_affinity_hint(scn);
3345 }
3346 
3347 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3348 /**
3349  * hif_grp_configure_legacyirq() - Configure DP interrupts
3350  * @scn: hif_softc pointer
3351  * @hif_ext_group: hif extended group pointer
3352  *
3353  * Configure DP legacy interrupts
3354  *
3355  * Return: int
3356  */
3357 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3358 				       struct hif_exec_context *hif_ext_group)
3359 {
3360 	int ret = 0;
3361 	int irq = 0;
3362 	int j;
3363 	int pci_slot;
3364 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3365 	struct pci_dev *pdev = sc->pdev;
3366 	qdf_device_t qdf_dev = scn->qdf_dev;
3367 
3368 	for (j = 0; j < hif_ext_group->numirq; j++) {
3369 		ret = pfrm_get_irq(&pdev->dev,
3370 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
3371 				   legacy_ic_irqname[hif_ext_group->irq[j]],
3372 				   hif_ext_group->irq[j], &irq);
3373 		if (ret) {
3374 			dev_err(&pdev->dev, "get irq failed\n");
3375 			return -EFAULT;
3376 		}
3377 		hif_ext_group->os_irq[j] = irq;
3378 	}
3379 
3380 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3381 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3382 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3383 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3384 
3385 	pci_slot = hif_get_pci_slot(scn);
3386 	for (j = 0; j < hif_ext_group->numirq; j++) {
3387 		irq = hif_ext_group->os_irq[j];
3388 		if (scn->irq_unlazy_disable)
3389 			qdf_dev_set_irq_status_flags(irq,
3390 						     QDF_IRQ_DISABLE_UNLAZY);
3391 
3392 		hif_debug("request_irq = %d for grp %d",
3393 			  irq, hif_ext_group->grp_id);
3394 
3395 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
3396 				       hif_ext_group_interrupt_handler,
3397 				       IRQF_SHARED | IRQF_NO_SUSPEND,
3398 				       legacy_ic_irqname[hif_ext_group->irq[j]],
3399 				       hif_ext_group);
3400 		if (ret) {
3401 			hif_err("request_irq failed ret = %d", ret);
3402 			return -EFAULT;
3403 		}
3404 		hif_ext_group->os_irq[j] = irq;
3405 	}
3406 	hif_ext_group->irq_requested = true;
3407 	return 0;
3408 }
3409 #else
3410 /**
3411  * hif_grp_configure_legacyirq() - Configure DP interrupts
3412  * @scn: hif_softc pointer
3413  * @hif_ext_group: hif extended group pointer
3414  *
3415  * Configure DP legacy interrupts
3416  *
3417  * Return: int
3418  */
3419 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3420 				       struct hif_exec_context *hif_ext_group)
3421 {
3422 	return 0;
3423 }
3424 #endif
3425 
3426 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3427 			      struct hif_exec_context *hif_ext_group)
3428 {
3429 	int ret = 0;
3430 	int irq = 0;
3431 	int j;
3432 	int pci_slot;
3433 
3434 	if (pld_get_enable_intx(scn->qdf_dev->dev))
3435 		return hif_grp_configure_legacyirq(scn, hif_ext_group);
3436 
3437 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3438 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3439 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3440 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3441 
3442 	pci_slot = hif_get_pci_slot(scn);
3443 	for (j = 0; j < hif_ext_group->numirq; j++) {
3444 		irq = hif_ext_group->irq[j];
3445 		if (scn->irq_unlazy_disable)
3446 			qdf_dev_set_irq_status_flags(irq,
3447 						     QDF_IRQ_DISABLE_UNLAZY);
3448 
3449 		hif_debug("request_irq = %d for grp %d",
3450 			  irq, hif_ext_group->grp_id);
3451 
3452 		qdf_scnprintf(dp_irqname[pci_slot][hif_ext_group->grp_id],
3453 			      DP_IRQ_NAME_LEN, "pci%u_wlan_grp_dp_%u",
3454 			      pci_slot, hif_ext_group->grp_id);
3455 		ret = pfrm_request_irq(
3456 				scn->qdf_dev->dev, irq,
3457 				hif_ext_group_interrupt_handler,
3458 				IRQF_SHARED | IRQF_NO_SUSPEND,
3459 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3460 				hif_ext_group);
3461 		if (ret) {
3462 			hif_err("request_irq failed ret = %d", ret);
3463 			return -EFAULT;
3464 		}
3465 		hif_ext_group->os_irq[j] = irq;
3466 	}
3467 	hif_ext_group->irq_requested = true;
3468 	return 0;
3469 }
3470 
3471 #ifdef FEATURE_IRQ_AFFINITY
3472 void hif_pci_set_grp_intr_affinity(struct hif_softc *scn,
3473 				   uint32_t grp_intr_bitmask, bool perf)
3474 {
3475 	int i;
3476 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3477 	struct hif_exec_context *hif_ext_group;
3478 
3479 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3480 		if (!(grp_intr_bitmask & BIT(i)))
3481 			continue;
3482 
3483 		hif_ext_group = hif_state->hif_ext_group[i];
3484 		hif_pci_irq_set_affinity_hint(hif_ext_group, perf);
3485 		qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3486 	}
3487 }
3488 #endif
3489 
3490 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
3491 	defined(QCA_WIFI_KIWI))
3492 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3493 			    uint32_t offset)
3494 {
3495 	return hal_read32_mb(hif_sc->hal_soc, offset);
3496 }
3497 
3498 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3499 			 uint32_t offset,
3500 			 uint32_t value)
3501 {
3502 	hal_write32_mb(hif_sc->hal_soc, offset, value);
3503 }
3504 #else
3505 /* TODO: Need to implement other chips carefully */
3506 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3507 			    uint32_t offset)
3508 {
3509 	return 0;
3510 }
3511 
3512 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3513 			 uint32_t offset,
3514 			 uint32_t value)
3515 {
3516 }
3517 #endif
3518 
3519 /**
3520  * hif_configure_irq() - configure interrupt
3521  *
3522  * This function configures interrupt(s)
3523  *
3524  * @sc: PCIe control struct
3525  * @hif_hdl: struct HIF_CE_state
3526  *
3527  * Return: 0 - for success
3528  */
3529 int hif_configure_irq(struct hif_softc *scn)
3530 {
3531 	int ret = 0;
3532 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3533 
3534 	hif_info("E");
3535 
3536 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3537 		scn->request_irq_done = false;
3538 		return 0;
3539 	}
3540 
3541 	hif_init_reschedule_tasklet_work(sc);
3542 
3543 	ret = hif_ce_msi_configure_irq(scn);
3544 	if (ret == 0) {
3545 		goto end;
3546 	}
3547 
3548 	switch (scn->target_info.target_type) {
3549 	case TARGET_TYPE_QCA8074:
3550 	case TARGET_TYPE_QCA8074V2:
3551 	case TARGET_TYPE_QCA6018:
3552 	case TARGET_TYPE_QCA5018:
3553 	case TARGET_TYPE_QCA5332:
3554 	case TARGET_TYPE_QCA9574:
3555 	case TARGET_TYPE_QCN9160:
3556 		ret = hif_ahb_configure_irq(sc);
3557 		break;
3558 	case TARGET_TYPE_QCN9224:
3559 		ret = hif_ce_configure_legacyirq(scn);
3560 		break;
3561 	default:
3562 		ret = hif_pci_configure_legacy_irq(sc);
3563 		break;
3564 	}
3565 	if (ret < 0) {
3566 		hif_err("error = %d", ret);
3567 		return ret;
3568 	}
3569 end:
3570 	scn->request_irq_done = true;
3571 	return 0;
3572 }
3573 
3574 /**
3575  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3576  * @scn: hif control structure
3577  *
3578  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3579  * stuck at a polling loop in pcie_address_config in FW
3580  *
3581  * Return: none
3582  */
3583 static void hif_trigger_timer_irq(struct hif_softc *scn)
3584 {
3585 	int tmp;
3586 	/* Trigger IRQ on Peregrine/Swift by setting
3587 	 * IRQ Bit of LF_TIMER 0
3588 	 */
3589 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3590 						SOC_LF_TIMER_STATUS0_ADDRESS));
3591 	/* Set Raw IRQ Bit */
3592 	tmp |= 1;
3593 	/* SOC_LF_TIMER_STATUS0 */
3594 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3595 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3596 }
3597 
3598 /**
3599  * hif_target_sync() : ensure the target is ready
3600  * @scn: hif control structure
3601  *
3602  * Informs fw that we plan to use legacy interrupts so that
3603  * it can begin booting. Ensures that the fw finishes booting
3604  * before continuing. Should be called before trying to write
3605  * to the targets other registers for the first time.
3606  *
3607  * Return: none
3608  */
3609 static void hif_target_sync(struct hif_softc *scn)
3610 {
3611 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3612 			    PCIE_INTR_ENABLE_ADDRESS),
3613 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3614 	/* read to flush pcie write */
3615 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3616 			PCIE_INTR_ENABLE_ADDRESS));
3617 
3618 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3619 			PCIE_SOC_WAKE_ADDRESS,
3620 			PCIE_SOC_WAKE_V_MASK);
3621 	while (!hif_targ_is_awake(scn, scn->mem))
3622 		;
3623 
3624 	if (HAS_FW_INDICATOR) {
3625 		int wait_limit = 500;
3626 		int fw_ind = 0;
3627 		int retry_count = 0;
3628 		uint32_t target_type = scn->target_info.target_type;
3629 fw_retry:
3630 		hif_info("Loop checking FW signal");
3631 		while (1) {
3632 			fw_ind = hif_read32_mb(scn, scn->mem +
3633 					FW_INDICATOR_ADDRESS);
3634 			if (fw_ind & FW_IND_INITIALIZED)
3635 				break;
3636 			if (wait_limit-- < 0)
3637 				break;
3638 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3639 			    PCIE_INTR_ENABLE_ADDRESS),
3640 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3641 			    /* read to flush pcie write */
3642 			(void)hif_read32_mb(scn, scn->mem +
3643 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3644 
3645 			qdf_mdelay(10);
3646 		}
3647 		if (wait_limit < 0) {
3648 			if (target_type == TARGET_TYPE_AR9888 &&
3649 			    retry_count++ < 2) {
3650 				hif_trigger_timer_irq(scn);
3651 				wait_limit = 500;
3652 				goto fw_retry;
3653 			}
3654 			hif_info("FW signal timed out");
3655 			qdf_assert_always(0);
3656 		} else {
3657 			hif_info("Got FW signal, retries = %x", 500-wait_limit);
3658 		}
3659 	}
3660 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3661 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3662 }
3663 
3664 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3665 				     struct device *dev)
3666 {
3667 	struct pld_soc_info info;
3668 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3669 
3670 	pld_get_soc_info(dev, &info);
3671 	sc->mem = info.v_addr;
3672 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3673 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3674 	sc->device_version.family_number = info.device_version.family_number;
3675 	sc->device_version.device_number = info.device_version.device_number;
3676 	sc->device_version.major_version = info.device_version.major_version;
3677 	sc->device_version.minor_version = info.device_version.minor_version;
3678 
3679 	hif_info("%s: fam num %u dev ver %u maj ver %u min ver %u\n", __func__,
3680 		 sc->device_version.family_number,
3681 		 sc->device_version.device_number,
3682 		 sc->device_version.major_version,
3683 		 sc->device_version.minor_version);
3684 
3685 	/* dev_mem_info[0] is for CMEM */
3686 	scn->cmem_start = info.dev_mem_info[0].start;
3687 	scn->cmem_size = info.dev_mem_info[0].size;
3688 	scn->target_info.target_version = info.soc_id;
3689 	scn->target_info.target_revision = 0;
3690 	scn->target_info.soc_version = info.device_version.major_version;
3691 }
3692 
3693 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3694 				       struct device *dev)
3695 {}
3696 
3697 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3698 				    int device_id)
3699 {
3700 	if (!pld_have_platform_driver_support(sc->dev))
3701 		return false;
3702 
3703 	switch (device_id) {
3704 	case QCA6290_DEVICE_ID:
3705 	case QCN9000_DEVICE_ID:
3706 	case QCN9224_DEVICE_ID:
3707 	case QCA6290_EMULATION_DEVICE_ID:
3708 	case QCA6390_DEVICE_ID:
3709 	case QCA6490_DEVICE_ID:
3710 	case AR6320_DEVICE_ID:
3711 	case QCN7605_DEVICE_ID:
3712 	case KIWI_DEVICE_ID:
3713 	case MANGO_DEVICE_ID:
3714 		return true;
3715 	}
3716 	return false;
3717 }
3718 
3719 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3720 					   int device_id)
3721 {
3722 	if (hif_is_pld_based_target(sc, device_id)) {
3723 		sc->hif_enable_pci = hif_enable_pci_pld;
3724 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3725 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3726 	} else {
3727 		sc->hif_enable_pci = hif_enable_pci_nopld;
3728 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3729 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3730 	}
3731 }
3732 
3733 #ifdef HIF_REG_WINDOW_SUPPORT
3734 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3735 					       u32 target_type)
3736 {
3737 	switch (target_type) {
3738 	case TARGET_TYPE_QCN7605:
3739 	case TARGET_TYPE_QCA6490:
3740 	case TARGET_TYPE_QCA6390:
3741 	case TARGET_TYPE_KIWI:
3742 	case TARGET_TYPE_MANGO:
3743 		sc->use_register_windowing = true;
3744 		qdf_spinlock_create(&sc->register_access_lock);
3745 		sc->register_window = 0;
3746 		break;
3747 	default:
3748 		sc->use_register_windowing = false;
3749 	}
3750 }
3751 #else
3752 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3753 					       u32 target_type)
3754 {
3755 	sc->use_register_windowing = false;
3756 }
3757 #endif
3758 
3759 /**
3760  * hif_enable_bus(): enable bus
3761  *
3762  * This function enables the bus
3763  *
3764  * @ol_sc: soft_sc struct
3765  * @dev: device pointer
3766  * @bdev: bus dev pointer
3767  * bid: bus id pointer
3768  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3769  * Return: QDF_STATUS
3770  */
3771 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3772 			  struct device *dev, void *bdev,
3773 			  const struct hif_bus_id *bid,
3774 			  enum hif_enable_type type)
3775 {
3776 	int ret = 0;
3777 	uint32_t hif_type;
3778 	uint32_t target_type = TARGET_TYPE_UNKNOWN;
3779 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3780 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3781 	uint16_t revision_id = 0;
3782 	int probe_again = 0;
3783 	struct pci_dev *pdev = bdev;
3784 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3785 	struct hif_target_info *tgt_info;
3786 
3787 	if (!ol_sc) {
3788 		hif_err("hif_ctx is NULL");
3789 		return QDF_STATUS_E_NOMEM;
3790 	}
3791 	/* Following print is used by various tools to identify
3792 	 * WLAN SOC (e.g. crash dump analysis and reporting tool).
3793 	 */
3794 	hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
3795 		 hif_get_conparam(ol_sc), id->device);
3796 
3797 	sc->pdev = pdev;
3798 	sc->dev = &pdev->dev;
3799 	sc->devid = id->device;
3800 	sc->cacheline_sz = dma_get_cache_alignment();
3801 	tgt_info = hif_get_target_info_handle(hif_hdl);
3802 	hif_pci_init_deinit_ops_attach(sc, id->device);
3803 	sc->hif_pci_get_soc_info(sc, dev);
3804 again:
3805 	ret = sc->hif_enable_pci(sc, pdev, id);
3806 	if (ret < 0) {
3807 		hif_err("hif_enable_pci error = %d", ret);
3808 		goto err_enable_pci;
3809 	}
3810 	hif_info("hif_enable_pci done");
3811 
3812 	/* Temporary FIX: disable ASPM on peregrine.
3813 	 * Will be removed after the OTP is programmed
3814 	 */
3815 	hif_disable_power_gating(hif_hdl);
3816 
3817 	device_disable_async_suspend(&pdev->dev);
3818 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3819 
3820 	ret = hif_get_device_type(id->device, revision_id,
3821 						&hif_type, &target_type);
3822 	if (ret < 0) {
3823 		hif_err("Invalid device id/revision_id");
3824 		goto err_tgtstate;
3825 	}
3826 	hif_info("hif_type = 0x%x, target_type = 0x%x",
3827 		hif_type, target_type);
3828 
3829 	hif_register_tbl_attach(ol_sc, hif_type);
3830 	hif_target_register_tbl_attach(ol_sc, target_type);
3831 
3832 	hif_pci_init_reg_windowing_support(sc, target_type);
3833 
3834 	tgt_info->target_type = target_type;
3835 
3836 	/*
3837 	 * Disable unlzay interrupt registration for QCN9000
3838 	 */
3839 	if (target_type == TARGET_TYPE_QCN9000 ||
3840 	    target_type == TARGET_TYPE_QCN9224)
3841 		ol_sc->irq_unlazy_disable = 1;
3842 
3843 	if (ce_srng_based(ol_sc)) {
3844 		hif_info("Skip tgt_wake up for srng devices");
3845 	} else {
3846 		ret = hif_pci_probe_tgt_wakeup(sc);
3847 		if (ret < 0) {
3848 			hif_err("hif_pci_prob_wakeup error = %d", ret);
3849 			if (ret == -EAGAIN)
3850 				probe_again++;
3851 			goto err_tgtstate;
3852 		}
3853 		hif_info("hif_pci_probe_tgt_wakeup done");
3854 	}
3855 
3856 	if (!ol_sc->mem_pa) {
3857 		hif_err("BAR0 uninitialized");
3858 		ret = -EIO;
3859 		goto err_tgtstate;
3860 	}
3861 
3862 	if (!ce_srng_based(ol_sc)) {
3863 		hif_target_sync(ol_sc);
3864 
3865 		if (hif_pci_default_link_up(tgt_info))
3866 			hif_vote_link_up(hif_hdl);
3867 	}
3868 
3869 	return QDF_STATUS_SUCCESS;
3870 
3871 err_tgtstate:
3872 	hif_disable_pci(sc);
3873 	sc->pci_enabled = false;
3874 	hif_err("hif_disable_pci done");
3875 	return QDF_STATUS_E_ABORTED;
3876 
3877 err_enable_pci:
3878 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3879 		int delay_time;
3880 
3881 		hif_info("pci reprobe");
3882 		/* 10, 40, 90, 100, 100, ... */
3883 		delay_time = max(100, 10 * (probe_again * probe_again));
3884 		qdf_mdelay(delay_time);
3885 		goto again;
3886 	}
3887 	return qdf_status_from_os_return(ret);
3888 }
3889 
3890 /**
3891  * hif_pci_irq_enable() - ce_irq_enable
3892  * @scn: hif_softc
3893  * @ce_id: ce_id
3894  *
3895  * Return: void
3896  */
3897 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3898 {
3899 	uint32_t tmp = 1 << ce_id;
3900 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3901 
3902 	qdf_spin_lock_irqsave(&sc->irq_lock);
3903 	scn->ce_irq_summary &= ~tmp;
3904 	if (scn->ce_irq_summary == 0) {
3905 		/* Enable Legacy PCI line interrupts */
3906 		if (LEGACY_INTERRUPTS(sc) &&
3907 			(scn->target_status != TARGET_STATUS_RESET) &&
3908 			(!qdf_atomic_read(&scn->link_suspended))) {
3909 
3910 			hif_write32_mb(scn, scn->mem +
3911 				(SOC_CORE_BASE_ADDRESS |
3912 				PCIE_INTR_ENABLE_ADDRESS),
3913 				HOST_GROUP0_MASK);
3914 
3915 			hif_read32_mb(scn, scn->mem +
3916 					(SOC_CORE_BASE_ADDRESS |
3917 					PCIE_INTR_ENABLE_ADDRESS));
3918 		}
3919 	}
3920 	if (scn->hif_init_done == true)
3921 		Q_TARGET_ACCESS_END(scn);
3922 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3923 
3924 	/* check for missed firmware crash */
3925 	hif_fw_interrupt_handler(0, scn);
3926 }
3927 
3928 /**
3929  * hif_pci_irq_disable() - ce_irq_disable
3930  * @scn: hif_softc
3931  * @ce_id: ce_id
3932  *
3933  * only applicable to legacy copy engine...
3934  *
3935  * Return: void
3936  */
3937 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3938 {
3939 	/* For Rome only need to wake up target */
3940 	/* target access is maintained until interrupts are re-enabled */
3941 	Q_TARGET_ACCESS_BEGIN(scn);
3942 }
3943 
3944 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3945 {
3946 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3947 
3948 	/* legacy case only has one irq */
3949 	return pci_scn->irq;
3950 }
3951 
3952 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
3953 {
3954 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3955 	struct hif_target_info *tgt_info;
3956 
3957 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
3958 
3959 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
3960 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
3961 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
3962 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
3963 	    tgt_info->target_type == TARGET_TYPE_QCA8074 ||
3964 	    tgt_info->target_type == TARGET_TYPE_KIWI ||
3965 	    tgt_info->target_type == TARGET_TYPE_MANGO) {
3966 		/*
3967 		 * Need to consider offset's memtype for QCA6290/QCA8074,
3968 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
3969 		 * well initialized/defined.
3970 		 */
3971 		return 0;
3972 	}
3973 
3974 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
3975 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
3976 		return 0;
3977 	}
3978 
3979 	hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
3980 		offset, (uint32_t)(offset + sizeof(unsigned int)),
3981 		sc->mem_len);
3982 
3983 	return -EINVAL;
3984 }
3985 
3986 /**
3987  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
3988  * @scn: hif context
3989  *
3990  * Return: true if soc needs driver bmi otherwise false
3991  */
3992 bool hif_pci_needs_bmi(struct hif_softc *scn)
3993 {
3994 	return !ce_srng_based(scn);
3995 }
3996 
3997 #ifdef FORCE_WAKE
3998 #if defined(DEVICE_FORCE_WAKE_ENABLE) && !defined(CONFIG_PLD_PCIE_FW_SIM)
3999 
4000 /**
4001  * HIF_POLL_UMAC_WAKE poll value to indicate if UMAC is powered up
4002  * Update the below macro with FW defined one.
4003  */
4004 #define HIF_POLL_UMAC_WAKE 0x2
4005 
4006 /**
4007  * hif_force_wake_request(): Enable the force wake recipe
4008  * @hif_handle: HIF handle
4009  *
4010  * Bring MHI to M0 state and force wake the UMAC by asserting the
4011  * soc wake reg. Poll the scratch reg to check if its set to
4012  * HIF_POLL_UMAC_WAKE. The polled value may return 0x1 in case UMAC
4013  * is powered down.
4014  *
4015  * Return: 0 if handshake is successful or ETIMEDOUT in case of failure
4016  */
4017 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4018 {
4019 	uint32_t timeout, value;
4020 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4021 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4022 	int ret, status = 0;
4023 
4024 	/* Prevent runtime PM or trigger resume firstly */
4025 	if (hif_rtpm_get(HIF_RTPM_GET_SYNC, HIF_RTPM_ID_FORCE_WAKE)) {
4026 		hif_err("runtime pm get failed");
4027 		return -EINVAL;
4028 	}
4029 
4030 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4031 	if (qdf_in_interrupt())
4032 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4033 	else
4034 		timeout = 0;
4035 
4036 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4037 	if (ret) {
4038 		hif_err("force wake request(timeout %u) send failed: %d",
4039 			timeout, ret);
4040 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4041 		status = -EINVAL;
4042 		goto release_rtpm_ref;
4043 	}
4044 
4045 	/* If device's M1 state-change event races here, it can be ignored,
4046 	 * as the device is expected to immediately move from M2 to M0
4047 	 * without entering low power state.
4048 	 */
4049 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4050 		hif_info("state-change event races, ignore");
4051 
4052 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4053 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 1);
4054 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
4055 	/*
4056 	 * do not reset the timeout
4057 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
4058 	 */
4059 	timeout = 0;
4060 	do {
4061 		value = hif_read32_mb(
4062 				scn, scn->mem +
4063 				PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
4064 		if (value == HIF_POLL_UMAC_WAKE)
4065 			break;
4066 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
4067 		timeout += FORCE_WAKE_DELAY_MS;
4068 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
4069 
4070 	if (value != HIF_POLL_UMAC_WAKE) {
4071 		hif_err("force wake handshake failed, reg value = 0x%x",
4072 			value);
4073 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
4074 		status = -ETIMEDOUT;
4075 		goto release_rtpm_ref;
4076 	}
4077 
4078 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
4079 	return 0;
4080 
4081 release_rtpm_ref:
4082 	/* Release runtime PM force wake */
4083 	ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4084 	if (ret) {
4085 		hif_err("runtime pm put failure: %d", ret);
4086 		return ret;
4087 	}
4088 
4089 	return status;
4090 }
4091 
4092 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4093 {
4094 	int ret;
4095 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4096 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4097 
4098 	/* Release umac force wake */
4099 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 0);
4100 
4101 	/* Release MHI force wake */
4102 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4103 	if (ret) {
4104 		hif_err("pld force wake release failure");
4105 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4106 		return ret;
4107 	}
4108 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4109 
4110 	/* Release runtime PM force wake */
4111 	ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4112 	if (ret) {
4113 		hif_err("runtime pm put failure");
4114 		return ret;
4115 	}
4116 
4117 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
4118 	return 0;
4119 }
4120 
4121 #else /* DEVICE_FORCE_WAKE_ENABLE */
4122 /** hif_force_wake_request() - Disable the PCIE scratch register
4123  * write/read
4124  *
4125  * Return: 0
4126  */
4127 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4128 {
4129 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4130 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4131 	uint32_t timeout;
4132 	int ret;
4133 
4134 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4135 
4136 	if (qdf_in_interrupt())
4137 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4138 	else
4139 		timeout = 0;
4140 
4141 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4142 	if (ret) {
4143 		hif_err("force wake request(timeout %u) send failed: %d",
4144 			timeout, ret);
4145 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4146 		return -EINVAL;
4147 	}
4148 
4149 	/* If device's M1 state-change event races here, it can be ignored,
4150 	 * as the device is expected to immediately move from M2 to M0
4151 	 * without entering low power state.
4152 	 */
4153 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4154 		hif_info("state-change event races, ignore");
4155 
4156 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4157 
4158 	return 0;
4159 }
4160 
4161 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4162 {
4163 	int ret;
4164 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4165 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4166 
4167 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4168 	if (ret) {
4169 		hif_err("force wake release failure");
4170 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4171 		return ret;
4172 	}
4173 
4174 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4175 	return 0;
4176 }
4177 #endif /* DEVICE_FORCE_WAKE_ENABLE */
4178 
4179 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
4180 {
4181 	hif_debug("mhi_force_wake_request_vote: %d",
4182 		  pci_handle->stats.mhi_force_wake_request_vote);
4183 	hif_debug("mhi_force_wake_failure: %d",
4184 		  pci_handle->stats.mhi_force_wake_failure);
4185 	hif_debug("mhi_force_wake_success: %d",
4186 		  pci_handle->stats.mhi_force_wake_success);
4187 	hif_debug("soc_force_wake_register_write_success: %d",
4188 		  pci_handle->stats.soc_force_wake_register_write_success);
4189 	hif_debug("soc_force_wake_failure: %d",
4190 		  pci_handle->stats.soc_force_wake_failure);
4191 	hif_debug("soc_force_wake_success: %d",
4192 		  pci_handle->stats.soc_force_wake_success);
4193 	hif_debug("mhi_force_wake_release_failure: %d",
4194 		  pci_handle->stats.mhi_force_wake_release_failure);
4195 	hif_debug("mhi_force_wake_release_success: %d",
4196 		  pci_handle->stats.mhi_force_wake_release_success);
4197 	hif_debug("oc_force_wake_release_success: %d",
4198 		  pci_handle->stats.soc_force_wake_release_success);
4199 }
4200 #endif /* FORCE_WAKE */
4201 
4202 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
4203 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
4204 {
4205 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4206 }
4207 
4208 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
4209 {
4210 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4211 }
4212 #endif
4213