xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 /*
66  * Top-level interrupt handler for all PCI interrupts from a Target.
67  * When a block of MSI interrupts is allocated, this top-level handler
68  * is not used; instead, we directly call the correct sub-handler.
69  */
70 struct ce_irq_reg_table {
71 	uint32_t irq_enable;
72 	uint32_t irq_status;
73 };
74 
75 #ifndef QCA_WIFI_3_0_ADRASTEA
76 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
77 {
78 }
79 #else
80 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
83 	unsigned int target_enable0, target_enable1;
84 	unsigned int target_cause0, target_cause1;
85 
86 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
87 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
88 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
89 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
90 
91 	if ((target_enable0 & target_cause0) ||
92 	    (target_enable1 & target_cause1)) {
93 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
94 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
95 
96 		if (scn->notice_send)
97 			pld_intr_notify_q6(sc->dev);
98 	}
99 }
100 #endif
101 
102 
103 /**
104  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
105  * @scn: scn
106  *
107  * Return: N/A
108  */
109 static void pci_dispatch_interrupt(struct hif_softc *scn)
110 {
111 	uint32_t intr_summary;
112 	int id;
113 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
114 
115 	if (scn->hif_init_done != true)
116 		return;
117 
118 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
119 		return;
120 
121 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
122 
123 	if (intr_summary == 0) {
124 		if ((scn->target_status != TARGET_STATUS_RESET) &&
125 			(!qdf_atomic_read(&scn->link_suspended))) {
126 
127 			hif_write32_mb(scn, scn->mem +
128 				(SOC_CORE_BASE_ADDRESS |
129 				PCIE_INTR_ENABLE_ADDRESS),
130 				HOST_GROUP0_MASK);
131 
132 			hif_read32_mb(scn, scn->mem +
133 					(SOC_CORE_BASE_ADDRESS |
134 					PCIE_INTR_ENABLE_ADDRESS));
135 		}
136 		Q_TARGET_ACCESS_END(scn);
137 		return;
138 	}
139 	Q_TARGET_ACCESS_END(scn);
140 
141 	scn->ce_irq_summary = intr_summary;
142 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
143 		if (intr_summary & (1 << id)) {
144 			intr_summary &= ~(1 << id);
145 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
146 		}
147 	}
148 }
149 
150 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
151 {
152 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
153 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
154 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
155 
156 	volatile int tmp;
157 	uint16_t val = 0;
158 	uint32_t bar0 = 0;
159 	uint32_t fw_indicator_address, fw_indicator;
160 	bool ssr_irq = false;
161 	unsigned int host_cause, host_enable;
162 
163 	if (LEGACY_INTERRUPTS(sc)) {
164 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
165 			return IRQ_HANDLED;
166 
167 		if (ADRASTEA_BU) {
168 			host_enable = hif_read32_mb(sc, sc->mem +
169 						    PCIE_INTR_ENABLE_ADDRESS);
170 			host_cause = hif_read32_mb(sc, sc->mem +
171 						   PCIE_INTR_CAUSE_ADDRESS);
172 			if (!(host_enable & host_cause)) {
173 				hif_pci_route_adrastea_interrupt(sc);
174 				return IRQ_HANDLED;
175 			}
176 		}
177 
178 		/* Clear Legacy PCI line interrupts
179 		 * IMPORTANT: INTR_CLR regiser has to be set
180 		 * after INTR_ENABLE is set to 0,
181 		 * otherwise interrupt can not be really cleared
182 		 */
183 		hif_write32_mb(sc, sc->mem +
184 			      (SOC_CORE_BASE_ADDRESS |
185 			       PCIE_INTR_ENABLE_ADDRESS), 0);
186 
187 		hif_write32_mb(sc, sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
189 			       ADRASTEA_BU ?
190 			       (host_enable & host_cause) :
191 			      HOST_GROUP0_MASK);
192 
193 		if (ADRASTEA_BU)
194 			hif_write32_mb(sc, sc->mem + 0x2f100c,
195 				       (host_cause >> 1));
196 
197 		/* IMPORTANT: this extra read transaction is required to
198 		 * flush the posted write buffer
199 		 */
200 		if (!ADRASTEA_BU) {
201 		tmp =
202 			hif_read32_mb(sc, sc->mem +
203 				     (SOC_CORE_BASE_ADDRESS |
204 				      PCIE_INTR_ENABLE_ADDRESS));
205 
206 		if (tmp == 0xdeadbeef) {
207 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
208 			       __func__);
209 
210 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
211 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
212 			       __func__, val);
213 
214 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
215 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
216 			       __func__, val);
217 
218 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
219 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
220 			       val);
221 
222 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
223 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
224 			       val);
225 
226 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
227 					      &bar0);
228 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
229 			       bar0);
230 
231 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
232 				  __func__,
233 				  hif_read32_mb(sc, sc->mem +
234 						PCIE_LOCAL_BASE_ADDRESS
235 						+ RTC_STATE_ADDRESS));
236 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
237 				  __func__,
238 				  hif_read32_mb(sc, sc->mem +
239 						PCIE_LOCAL_BASE_ADDRESS
240 						+ PCIE_SOC_WAKE_ADDRESS));
241 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
242 				  __func__,
243 				  hif_read32_mb(sc, sc->mem + 0x80008),
244 				  hif_read32_mb(sc, sc->mem + 0x8000c));
245 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
246 				  __func__,
247 				  hif_read32_mb(sc, sc->mem + 0x80010),
248 				  hif_read32_mb(sc, sc->mem + 0x80014));
249 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
250 				  __func__,
251 				  hif_read32_mb(sc, sc->mem + 0x80018),
252 				  hif_read32_mb(sc, sc->mem + 0x8001c));
253 			QDF_BUG(0);
254 		}
255 
256 		PCI_CLR_CAUSE0_REGISTER(sc);
257 		}
258 
259 		if (HAS_FW_INDICATOR) {
260 			fw_indicator_address = hif_state->fw_indicator_address;
261 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
262 			if ((fw_indicator != ~0) &&
263 			   (fw_indicator & FW_IND_EVENT_PENDING))
264 				ssr_irq = true;
265 		}
266 
267 		if (Q_TARGET_ACCESS_END(scn) < 0)
268 			return IRQ_HANDLED;
269 	}
270 	/* TBDXXX: Add support for WMAC */
271 
272 	if (ssr_irq) {
273 		sc->irq_event = irq;
274 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
275 
276 		qdf_atomic_inc(&scn->active_tasklet_cnt);
277 		tasklet_schedule(&sc->intr_tq);
278 	} else {
279 		pci_dispatch_interrupt(scn);
280 	}
281 
282 	return IRQ_HANDLED;
283 }
284 
285 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
286 {
287 	return 1;               /* FIX THIS */
288 }
289 
290 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
291 {
292 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
293 	int i = 0;
294 
295 	if (!irq || !size) {
296 		return -EINVAL;
297 	}
298 
299 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
300 		irq[0] = sc->irq;
301 		return 1;
302 	}
303 
304 	if (sc->num_msi_intrs > size) {
305 		qdf_print("Not enough space in irq buffer to return irqs");
306 		return -EINVAL;
307 	}
308 
309 	for (i = 0; i < sc->num_msi_intrs; i++) {
310 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
311 	}
312 
313 	return sc->num_msi_intrs;
314 }
315 
316 
317 /**
318  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
319  * @scn: hif_softc
320  *
321  * Return: void
322  */
323 #if CONFIG_ATH_PCIE_MAX_PERF == 0
324 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
325 {
326 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
327 	A_target_id_t pci_addr = scn->mem;
328 
329 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
330 	/*
331 	 * If the deferred sleep timer is running cancel it
332 	 * and put the soc into sleep.
333 	 */
334 	if (hif_state->fake_sleep == true) {
335 		qdf_timer_stop(&hif_state->sleep_timer);
336 		if (hif_state->verified_awake == false) {
337 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
338 				      PCIE_SOC_WAKE_ADDRESS,
339 				      PCIE_SOC_WAKE_RESET);
340 		}
341 		hif_state->fake_sleep = false;
342 	}
343 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
344 }
345 #else
346 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
347 {
348 }
349 #endif
350 
351 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
352 	hif_read32_mb(sc, (char *)(mem) + \
353 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
354 
355 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
356 	hif_write32_mb(sc, ((char *)(mem) + \
357 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
358 
359 #ifdef QCA_WIFI_3_0
360 /**
361  * hif_targ_is_awake() - check to see if the target is awake
362  * @hif_ctx: hif context
363  *
364  * emulation never goes to sleep
365  *
366  * Return: true if target is awake
367  */
368 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
369 {
370 	return true;
371 }
372 #else
373 /**
374  * hif_targ_is_awake() - check to see if the target is awake
375  * @hif_ctx: hif context
376  *
377  * Return: true if the targets clocks are on
378  */
379 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
380 {
381 	uint32_t val;
382 
383 	if (scn->recovery)
384 		return false;
385 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
386 		+ RTC_STATE_ADDRESS);
387 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
388 }
389 #endif
390 
391 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
392 static void hif_pci_device_reset(struct hif_pci_softc *sc)
393 {
394 	void __iomem *mem = sc->mem;
395 	int i;
396 	uint32_t val;
397 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
398 
399 	if (!scn->hostdef)
400 		return;
401 
402 	/* NB: Don't check resetok here.  This form of reset
403 	 * is integral to correct operation.
404 	 */
405 
406 	if (!SOC_GLOBAL_RESET_ADDRESS)
407 		return;
408 
409 	if (!mem)
410 		return;
411 
412 	HIF_ERROR("%s: Reset Device", __func__);
413 
414 	/*
415 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
416 	 * writing WAKE_V, the Target may scribble over Host memory!
417 	 */
418 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
419 			       PCIE_SOC_WAKE_V_MASK);
420 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
421 		if (hif_targ_is_awake(scn, mem))
422 			break;
423 
424 		qdf_mdelay(1);
425 	}
426 
427 	/* Put Target, including PCIe, into RESET. */
428 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
429 	val |= 1;
430 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
431 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
432 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
433 		    RTC_STATE_COLD_RESET_MASK)
434 			break;
435 
436 		qdf_mdelay(1);
437 	}
438 
439 	/* Pull Target, including PCIe, out of RESET. */
440 	val &= ~1;
441 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
442 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
443 		if (!
444 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
445 		     RTC_STATE_COLD_RESET_MASK))
446 			break;
447 
448 		qdf_mdelay(1);
449 	}
450 
451 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
452 			       PCIE_SOC_WAKE_RESET);
453 }
454 
455 /* CPU warm reset function
456  * Steps:
457  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
458  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
459  *    correctly on WARM reset
460  * 3. Clear TARGET CPU LF timer interrupt
461  * 4. Reset all CEs to clear any pending CE tarnsactions
462  * 5. Warm reset CPU
463  */
464 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
465 {
466 	void __iomem *mem = sc->mem;
467 	int i;
468 	uint32_t val;
469 	uint32_t fw_indicator;
470 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
471 
472 	/* NB: Don't check resetok here.  This form of reset is
473 	 * integral to correct operation.
474 	 */
475 
476 	if (!mem)
477 		return;
478 
479 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
480 
481 	/*
482 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
483 	 * writing WAKE_V, the Target may scribble over Host memory!
484 	 */
485 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
486 			       PCIE_SOC_WAKE_V_MASK);
487 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
488 		if (hif_targ_is_awake(scn, mem))
489 			break;
490 		qdf_mdelay(1);
491 	}
492 
493 	/*
494 	 * Disable Pending interrupts
495 	 */
496 	val =
497 		hif_read32_mb(sc, mem +
498 			     (SOC_CORE_BASE_ADDRESS |
499 			      PCIE_INTR_CAUSE_ADDRESS));
500 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
501 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
502 	/* Target CPU Intr Cause */
503 	val = hif_read32_mb(sc, mem +
504 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
505 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
506 
507 	val =
508 		hif_read32_mb(sc, mem +
509 			     (SOC_CORE_BASE_ADDRESS |
510 			      PCIE_INTR_ENABLE_ADDRESS));
511 	hif_write32_mb(sc, (mem +
512 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
513 	hif_write32_mb(sc, (mem +
514 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
515 		       HOST_GROUP0_MASK);
516 
517 	qdf_mdelay(100);
518 
519 	/* Clear FW_INDICATOR_ADDRESS */
520 	if (HAS_FW_INDICATOR) {
521 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
522 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
523 	}
524 
525 	/* Clear Target LF Timer interrupts */
526 	val =
527 		hif_read32_mb(sc, mem +
528 			     (RTC_SOC_BASE_ADDRESS +
529 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
530 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
531 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
532 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
533 	hif_write32_mb(sc, mem +
534 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
535 		      val);
536 
537 	/* Reset CE */
538 	val =
539 		hif_read32_mb(sc, mem +
540 			     (RTC_SOC_BASE_ADDRESS |
541 			      SOC_RESET_CONTROL_ADDRESS));
542 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
543 	hif_write32_mb(sc, (mem +
544 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
545 		      val);
546 	val =
547 		hif_read32_mb(sc, mem +
548 			     (RTC_SOC_BASE_ADDRESS |
549 			      SOC_RESET_CONTROL_ADDRESS));
550 	qdf_mdelay(10);
551 
552 	/* CE unreset */
553 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
554 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
555 		       SOC_RESET_CONTROL_ADDRESS), val);
556 	val =
557 		hif_read32_mb(sc, mem +
558 			     (RTC_SOC_BASE_ADDRESS |
559 			      SOC_RESET_CONTROL_ADDRESS));
560 	qdf_mdelay(10);
561 
562 	/* Read Target CPU Intr Cause */
563 	val = hif_read32_mb(sc, mem +
564 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
565 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
566 		    __func__, val);
567 
568 	/* CPU warm RESET */
569 	val =
570 		hif_read32_mb(sc, mem +
571 			     (RTC_SOC_BASE_ADDRESS |
572 			      SOC_RESET_CONTROL_ADDRESS));
573 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
574 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
575 		       SOC_RESET_CONTROL_ADDRESS), val);
576 	val =
577 		hif_read32_mb(sc, mem +
578 			     (RTC_SOC_BASE_ADDRESS |
579 			      SOC_RESET_CONTROL_ADDRESS));
580 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
581 		    __func__, val);
582 
583 	qdf_mdelay(100);
584 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
585 
586 }
587 
588 #ifndef QCA_WIFI_3_0
589 /* only applicable to legacy ce */
590 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
591 {
592 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
593 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
594 	void __iomem *mem = sc->mem;
595 	uint32_t val;
596 
597 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
598 		return ATH_ISR_NOSCHED;
599 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
600 	if (Q_TARGET_ACCESS_END(scn) < 0)
601 		return ATH_ISR_SCHED;
602 
603 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
604 
605 	if (val & FW_IND_HELPER)
606 		return 0;
607 
608 	return 1;
609 }
610 #endif
611 
612 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
613 {
614 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
615 	uint16_t device_id = 0;
616 	uint32_t val;
617 	uint16_t timeout_count = 0;
618 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
619 
620 	/* Check device ID from PCIe configuration space for link status */
621 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
622 	if (device_id != sc->devid) {
623 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
624 			  __func__, device_id, sc->devid);
625 		return -EACCES;
626 	}
627 
628 	/* Check PCIe local register for bar/memory access */
629 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
630 			   RTC_STATE_ADDRESS);
631 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
632 
633 	/* Try to wake up taget if it sleeps */
634 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
635 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
636 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
637 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
638 		PCIE_SOC_WAKE_ADDRESS));
639 
640 	/* Check if taget can be woken up */
641 	while (!hif_targ_is_awake(scn, sc->mem)) {
642 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
643 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
644 				__func__,
645 				hif_read32_mb(sc, sc->mem +
646 					     PCIE_LOCAL_BASE_ADDRESS +
647 					     RTC_STATE_ADDRESS),
648 				hif_read32_mb(sc, sc->mem +
649 					     PCIE_LOCAL_BASE_ADDRESS +
650 					PCIE_SOC_WAKE_ADDRESS));
651 			return -EACCES;
652 		}
653 
654 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
655 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
656 
657 		qdf_mdelay(100);
658 		timeout_count += 100;
659 	}
660 
661 	/* Check Power register for SoC internal bus issues */
662 	val =
663 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
664 			     SOC_POWER_REG_OFFSET);
665 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
666 
667 	return 0;
668 }
669 
670 /**
671  * __hif_pci_dump_registers(): dump other PCI debug registers
672  * @scn: struct hif_softc
673  *
674  * This function dumps pci debug registers.  The parrent function
675  * dumps the copy engine registers before calling this function.
676  *
677  * Return: void
678  */
679 static void __hif_pci_dump_registers(struct hif_softc *scn)
680 {
681 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
682 	void __iomem *mem = sc->mem;
683 	uint32_t val, i, j;
684 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
685 	uint32_t ce_base;
686 
687 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
688 		return;
689 
690 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
691 	val =
692 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
693 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
694 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
695 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
696 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
697 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
698 
699 	/* DEBUG_CONTROL_ENABLE = 0x1 */
700 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
701 			   WLAN_DEBUG_CONTROL_OFFSET);
702 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
703 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
704 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
705 		      WLAN_DEBUG_CONTROL_OFFSET, val);
706 
707 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
708 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
709 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
710 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
711 			    WLAN_DEBUG_CONTROL_OFFSET));
712 
713 	HIF_INFO_MED("%s: Debug CE", __func__);
714 	/* Loop CE debug output */
715 	/* AMBA_DEBUG_BUS_SEL = 0xc */
716 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
717 			    AMBA_DEBUG_BUS_OFFSET);
718 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
719 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
720 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
721 		       val);
722 
723 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
724 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
725 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
726 				   CE_WRAPPER_DEBUG_OFFSET);
727 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
728 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
729 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
730 			      CE_WRAPPER_DEBUG_OFFSET, val);
731 
732 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
733 			    __func__, wrapper_idx[i],
734 			    hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
735 				AMBA_DEBUG_BUS_OFFSET),
736 			    hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
737 				CE_WRAPPER_DEBUG_OFFSET));
738 
739 		if (wrapper_idx[i] <= 7) {
740 			for (j = 0; j <= 5; j++) {
741 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
742 				/* For (j=0~5) write CE_DEBUG_SEL = j */
743 				val =
744 					hif_read32_mb(sc, mem + ce_base +
745 						     CE_DEBUG_OFFSET);
746 				val &= ~CE_DEBUG_SEL_MASK;
747 				val |= CE_DEBUG_SEL_SET(j);
748 				hif_write32_mb(sc, mem + ce_base +
749 					       CE_DEBUG_OFFSET, val);
750 
751 				/* read (@gpio_athr_wlan_reg)
752 				 * WLAN_DEBUG_OUT_DATA
753 				 */
754 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
755 						    + WLAN_DEBUG_OUT_OFFSET);
756 				val = WLAN_DEBUG_OUT_DATA_GET(val);
757 
758 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
759 					    __func__, j,
760 					    hif_read32_mb(sc, mem + ce_base +
761 						    CE_DEBUG_OFFSET), val);
762 			}
763 		} else {
764 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
765 			val =
766 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
767 					     WLAN_DEBUG_OUT_OFFSET);
768 			val = WLAN_DEBUG_OUT_DATA_GET(val);
769 
770 			HIF_INFO_MED("%s: out: %x", __func__, val);
771 		}
772 	}
773 
774 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
775 	/* Loop PCIe debug output */
776 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
777 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
778 			    AMBA_DEBUG_BUS_OFFSET);
779 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
780 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
781 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
782 		       AMBA_DEBUG_BUS_OFFSET, val);
783 
784 	for (i = 0; i <= 8; i++) {
785 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
786 		val =
787 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
788 				     AMBA_DEBUG_BUS_OFFSET);
789 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
790 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
791 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
792 			       AMBA_DEBUG_BUS_OFFSET, val);
793 
794 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
795 		val =
796 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
797 				     WLAN_DEBUG_OUT_OFFSET);
798 		val = WLAN_DEBUG_OUT_DATA_GET(val);
799 
800 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
801 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
802 				    WLAN_DEBUG_OUT_OFFSET), val,
803 		       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
804 				    WLAN_DEBUG_OUT_OFFSET));
805 	}
806 
807 	Q_TARGET_ACCESS_END(scn);
808 }
809 
810 /**
811  * hif_dump_registers(): dump bus debug registers
812  * @scn: struct hif_opaque_softc
813  *
814  * This function dumps hif bus debug registers
815  *
816  * Return: 0 for success or error code
817  */
818 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
819 {
820 	int status;
821 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
822 
823 	status = hif_dump_ce_registers(scn);
824 
825 	if (status)
826 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
827 
828 	/* dump non copy engine pci registers */
829 	__hif_pci_dump_registers(scn);
830 
831 	return 0;
832 }
833 
834 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
835 
836 /* worker thread to schedule wlan_tasklet in SLUB debug build */
837 static void reschedule_tasklet_work_handler(void *arg)
838 {
839 	struct hif_pci_softc *sc = arg;
840 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
841 
842 	if (!scn) {
843 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
844 		return;
845 	}
846 
847 	if (scn->hif_init_done == false) {
848 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
849 		return;
850 	}
851 
852 	tasklet_schedule(&sc->intr_tq);
853 }
854 
855 /**
856  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
857  * work
858  * @sc: HIF PCI Context
859  *
860  * Return: void
861  */
862 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
863 {
864 	qdf_create_work(0, &sc->reschedule_tasklet_work,
865 				reschedule_tasklet_work_handler, NULL);
866 }
867 #else
868 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
869 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
870 
871 void wlan_tasklet(unsigned long data)
872 {
873 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
874 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
875 
876 	if (scn->hif_init_done == false)
877 		goto end;
878 
879 	if (qdf_atomic_read(&scn->link_suspended))
880 		goto end;
881 
882 	if (!ADRASTEA_BU) {
883 		hif_fw_interrupt_handler(sc->irq_event, scn);
884 		if (scn->target_status == TARGET_STATUS_RESET)
885 			goto end;
886 	}
887 
888 end:
889 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
890 	qdf_atomic_dec(&scn->active_tasklet_cnt);
891 }
892 
893 #ifdef FEATURE_RUNTIME_PM
894 static const char *hif_pm_runtime_state_to_string(uint32_t state)
895 {
896 	switch (state) {
897 	case HIF_PM_RUNTIME_STATE_NONE:
898 		return "INIT_STATE";
899 	case HIF_PM_RUNTIME_STATE_ON:
900 		return "ON";
901 	case HIF_PM_RUNTIME_STATE_RESUMING:
902 		return "RESUMING";
903 	case HIF_PM_RUNTIME_STATE_SUSPENDING:
904 		return "SUSPENDING";
905 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
906 		return "SUSPENDED";
907 	default:
908 		return "INVALID STATE";
909 	}
910 }
911 
912 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
913 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
914 /**
915  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
916  * @sc: hif_pci_softc context
917  * @msg: log message
918  *
919  * log runtime pm stats when something seems off.
920  *
921  * Return: void
922  */
923 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
924 {
925 	struct hif_pm_runtime_lock *ctx;
926 
927 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
928 			msg, atomic_read(&sc->dev->power.usage_count),
929 			hif_pm_runtime_state_to_string(
930 					atomic_read(&sc->pm_state)),
931 			sc->prevent_suspend_cnt);
932 
933 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
934 			sc->dev->power.runtime_status,
935 			sc->dev->power.runtime_error,
936 			sc->dev->power.disable_depth,
937 			sc->dev->power.autosuspend_delay);
938 
939 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
940 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
941 			sc->pm_stats.request_resume);
942 
943 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
944 			sc->pm_stats.allow_suspend,
945 			sc->pm_stats.prevent_suspend);
946 
947 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
948 			sc->pm_stats.prevent_suspend_timeout,
949 			sc->pm_stats.allow_suspend_timeout);
950 
951 	HIF_ERROR("Suspended: %u, resumed: %u count",
952 			sc->pm_stats.suspended,
953 			sc->pm_stats.resumed);
954 
955 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
956 			sc->pm_stats.suspend_err,
957 			sc->pm_stats.runtime_get_err);
958 
959 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
960 
961 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
962 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
963 	}
964 
965 	WARN_ON(1);
966 }
967 
968 /**
969  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
970  * @s: file to print to
971  * @data: unused
972  *
973  * debugging tool added to the debug fs for displaying runtimepm stats
974  *
975  * Return: 0
976  */
977 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
978 {
979 	struct hif_pci_softc *sc = s->private;
980 	static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
981 		"SUSPENDING", "SUSPENDED"};
982 	unsigned int msecs_age;
983 	qdf_time_t usecs_age;
984 	int pm_state = atomic_read(&sc->pm_state);
985 	unsigned long timer_expires;
986 	struct hif_pm_runtime_lock *ctx;
987 
988 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
989 		   autopm_state[pm_state]);
990 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
991 		   sc->pm_stats.last_resume_caller);
992 	seq_printf(s, "%30s: %pf\n", "Last Busy Marker",
993 		   sc->pm_stats.last_busy_marker);
994 
995 	usecs_age = qdf_get_log_timestamp_usecs() -
996 		sc->pm_stats.last_busy_timestamp;
997 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
998 		   sc->pm_stats.last_busy_timestamp / 1000000,
999 		   sc->pm_stats.last_busy_timestamp % 1000000);
1000 	seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
1001 		   usecs_age / 1000000, usecs_age % 1000000);
1002 
1003 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
1004 		msecs_age = jiffies_to_msecs(jiffies -
1005 					     sc->pm_stats.suspend_jiffies);
1006 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1007 			   msecs_age / 1000, msecs_age % 1000);
1008 	}
1009 
1010 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1011 		   atomic_read(&sc->dev->power.usage_count));
1012 
1013 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1014 		   sc->prevent_suspend_cnt);
1015 
1016 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1017 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1018 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1019 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1020 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1021 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1022 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1023 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1024 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1025 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1026 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1027 
1028 	timer_expires = sc->runtime_timer_expires;
1029 	if (timer_expires > 0) {
1030 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1031 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1032 			   msecs_age / 1000, msecs_age % 1000);
1033 	}
1034 
1035 	spin_lock_bh(&sc->runtime_lock);
1036 	if (list_empty(&sc->prevent_suspend_list)) {
1037 		spin_unlock_bh(&sc->runtime_lock);
1038 		return 0;
1039 	}
1040 
1041 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1042 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1043 		seq_printf(s, "%s", ctx->name);
1044 		if (ctx->timeout)
1045 			seq_printf(s, "(%d ms)", ctx->timeout);
1046 		seq_puts(s, " ");
1047 	}
1048 	seq_puts(s, "\n");
1049 	spin_unlock_bh(&sc->runtime_lock);
1050 
1051 	return 0;
1052 }
1053 #undef HIF_PCI_RUNTIME_PM_STATS
1054 
1055 /**
1056  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1057  * @inode
1058  * @file
1059  *
1060  * Return: linux error code of single_open.
1061  */
1062 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1063 {
1064 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1065 			inode->i_private);
1066 }
1067 
1068 static const struct file_operations hif_pci_runtime_pm_fops = {
1069 	.owner          = THIS_MODULE,
1070 	.open           = hif_pci_runtime_pm_open,
1071 	.release        = single_release,
1072 	.read           = seq_read,
1073 	.llseek         = seq_lseek,
1074 };
1075 
1076 /**
1077  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1078  * @sc: pci context
1079  *
1080  * creates a debugfs entry to debug the runtime pm feature.
1081  */
1082 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1083 {
1084 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1085 					0400, NULL, sc,
1086 					&hif_pci_runtime_pm_fops);
1087 }
1088 
1089 /**
1090  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1091  * @sc: pci context
1092  *
1093  * removes the debugfs entry to debug the runtime pm feature.
1094  */
1095 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1096 {
1097 	debugfs_remove(sc->pm_dentry);
1098 }
1099 
1100 static void hif_runtime_init(struct device *dev, int delay)
1101 {
1102 	pm_runtime_set_autosuspend_delay(dev, delay);
1103 	pm_runtime_use_autosuspend(dev);
1104 	pm_runtime_allow(dev);
1105 	pm_runtime_mark_last_busy(dev);
1106 	pm_runtime_put_noidle(dev);
1107 	pm_suspend_ignore_children(dev, true);
1108 }
1109 
1110 static void hif_runtime_exit(struct device *dev)
1111 {
1112 	pm_runtime_get_noresume(dev);
1113 	pm_runtime_set_active(dev);
1114 }
1115 
1116 static void hif_pm_runtime_lock_timeout_fn(void *data);
1117 
1118 /**
1119  * hif_pm_runtime_start(): start the runtime pm
1120  * @sc: pci context
1121  *
1122  * After this call, runtime pm will be active.
1123  */
1124 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1125 {
1126 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1127 	uint32_t mode = hif_get_conparam(ol_sc);
1128 
1129 	if (!ol_sc->hif_config.enable_runtime_pm) {
1130 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1131 		return;
1132 	}
1133 
1134 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
1135 	    mode == QDF_GLOBAL_MONITOR_MODE) {
1136 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1137 				__func__);
1138 		return;
1139 	}
1140 
1141 	qdf_timer_init(NULL, &sc->runtime_timer,
1142 		       hif_pm_runtime_lock_timeout_fn,
1143 		       sc, QDF_TIMER_TYPE_WAKE_APPS);
1144 
1145 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1146 			ol_sc->hif_config.runtime_pm_delay);
1147 
1148 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1149 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1150 	hif_runtime_pm_debugfs_create(sc);
1151 }
1152 
1153 /**
1154  * hif_pm_runtime_stop(): stop runtime pm
1155  * @sc: pci context
1156  *
1157  * Turns off runtime pm and frees corresponding resources
1158  * that were acquired by hif_runtime_pm_start().
1159  */
1160 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1161 {
1162 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1163 	uint32_t mode = hif_get_conparam(ol_sc);
1164 
1165 	if (!ol_sc->hif_config.enable_runtime_pm)
1166 		return;
1167 
1168 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
1169 	    mode == QDF_GLOBAL_MONITOR_MODE)
1170 		return;
1171 
1172 	hif_runtime_exit(sc->dev);
1173 
1174 	hif_pm_runtime_sync_resume(GET_HIF_OPAQUE_HDL(sc));
1175 
1176 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1177 
1178 	hif_runtime_pm_debugfs_remove(sc);
1179 	qdf_timer_free(&sc->runtime_timer);
1180 	/* doesn't wait for penting trafic unlike cld-2.0 */
1181 }
1182 
1183 /**
1184  * hif_pm_runtime_open(): initialize runtime pm
1185  * @sc: pci data structure
1186  *
1187  * Early initialization
1188  */
1189 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1190 {
1191 	spin_lock_init(&sc->runtime_lock);
1192 
1193 	qdf_atomic_init(&sc->pm_state);
1194 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1195 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1196 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1197 }
1198 
1199 /**
1200  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1201  * @sc: pci context
1202  *
1203  * Ensure we have only one vote against runtime suspend before closing
1204  * the runtime suspend feature.
1205  *
1206  * all gets by the wlan driver should have been returned
1207  * one vote should remain as part of cnss_runtime_exit
1208  *
1209  * needs to be revisited if we share the root complex.
1210  */
1211 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1212 {
1213 	struct hif_pm_runtime_lock *ctx, *tmp;
1214 
1215 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1216 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1217 	else
1218 		return;
1219 
1220 	spin_lock_bh(&sc->runtime_lock);
1221 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1222 		spin_unlock_bh(&sc->runtime_lock);
1223 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1224 		spin_lock_bh(&sc->runtime_lock);
1225 	}
1226 	spin_unlock_bh(&sc->runtime_lock);
1227 
1228 	/* ensure 1 and only 1 usage count so that when the wlan
1229 	 * driver is re-insmodded runtime pm won't be
1230 	 * disabled also ensures runtime pm doesn't get
1231 	 * broken on by being less than 1.
1232 	 */
1233 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1234 		atomic_set(&sc->dev->power.usage_count, 1);
1235 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1236 		hif_pm_runtime_put_auto(sc->dev);
1237 }
1238 
1239 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1240 					  struct hif_pm_runtime_lock *lock);
1241 
1242 /**
1243  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1244  * @sc: PCIe Context
1245  *
1246  * API is used to empty the runtime pm prevent suspend list.
1247  *
1248  * Return: void
1249  */
1250 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1251 {
1252 	struct hif_pm_runtime_lock *ctx, *tmp;
1253 
1254 	spin_lock_bh(&sc->runtime_lock);
1255 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1256 		__hif_pm_runtime_allow_suspend(sc, ctx);
1257 	}
1258 	spin_unlock_bh(&sc->runtime_lock);
1259 }
1260 
1261 /**
1262  * hif_pm_runtime_close(): close runtime pm
1263  * @sc: pci bus handle
1264  *
1265  * ensure runtime_pm is stopped before closing the driver
1266  */
1267 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1268 {
1269 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1270 
1271 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1272 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1273 		return;
1274 
1275 	hif_pm_runtime_stop(sc);
1276 
1277 	hif_is_recovery_in_progress(scn) ?
1278 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1279 		hif_pm_runtime_sanitize_on_exit(sc);
1280 }
1281 
1282 int hif_pm_runtime_sync_resume(struct hif_opaque_softc *hif_ctx)
1283 {
1284 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1285 	int pm_state;
1286 
1287 	if (!sc)
1288 		return -EINVAL;
1289 
1290 	if (!pm_runtime_enabled(sc->dev))
1291 		return 0;
1292 
1293 	pm_state = qdf_atomic_read(&sc->pm_state);
1294 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
1295 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
1296 		HIF_INFO("Runtime PM resume is requested by %ps",
1297 			 (void *)_RET_IP_);
1298 
1299 	sc->pm_stats.request_resume++;
1300 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
1301 
1302 	return pm_runtime_resume(sc->dev);
1303 }
1304 #else
1305 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1306 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1307 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1308 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1309 #endif
1310 
1311 /**
1312  * hif_disable_power_gating() - disable HW power gating
1313  * @hif_ctx: hif context
1314  *
1315  * disables pcie L1 power states
1316  */
1317 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1318 {
1319 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1320 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1321 
1322 	if (!scn) {
1323 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1324 		       __func__);
1325 		return;
1326 	}
1327 
1328 	/* Disable ASPM when pkt log is enabled */
1329 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1330 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1331 }
1332 
1333 /**
1334  * hif_enable_power_gating() - enable HW power gating
1335  * @hif_ctx: hif context
1336  *
1337  * enables pcie L1 power states
1338  */
1339 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1340 {
1341 	if (!sc) {
1342 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1343 		       __func__);
1344 		return;
1345 	}
1346 
1347 	/* Re-enable ASPM after firmware/OTP download is complete */
1348 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1349 }
1350 
1351 /**
1352  * hif_enable_power_management() - enable power management
1353  * @hif_ctx: hif context
1354  *
1355  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1356  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1357  *
1358  * note: epping mode does not call this function as it does not
1359  *       care about saving power.
1360  */
1361 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1362 				 bool is_packet_log_enabled)
1363 {
1364 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1365 	uint32_t mode;
1366 
1367 	if (!pci_ctx) {
1368 		HIF_ERROR("%s, hif_ctx null", __func__);
1369 		return;
1370 	}
1371 
1372 	mode = hif_get_conparam(hif_sc);
1373 	if (mode == QDF_GLOBAL_FTM_MODE) {
1374 		HIF_INFO("%s: Enable power gating for FTM mode", __func__);
1375 		hif_enable_power_gating(pci_ctx);
1376 		return;
1377 	}
1378 
1379 	hif_pm_runtime_start(pci_ctx);
1380 
1381 	if (!is_packet_log_enabled)
1382 		hif_enable_power_gating(pci_ctx);
1383 
1384 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1385 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1386 	    !ce_srng_based(hif_sc)) {
1387 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1388 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1389 			HIF_ERROR("%s, failed to set target to sleep",
1390 				  __func__);
1391 	}
1392 }
1393 
1394 /**
1395  * hif_disable_power_management() - disable power management
1396  * @hif_ctx: hif context
1397  *
1398  * Currently disables runtime pm. Should be updated to behave
1399  * if runtime pm is not started. Should be updated to take care
1400  * of aspm and soc sleep for driver load.
1401  */
1402 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1403 {
1404 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1405 
1406 	if (!pci_ctx) {
1407 		HIF_ERROR("%s, hif_ctx null", __func__);
1408 		return;
1409 	}
1410 
1411 	hif_pm_runtime_stop(pci_ctx);
1412 }
1413 
1414 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1415 {
1416 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1417 
1418 	if (!pci_ctx) {
1419 		HIF_ERROR("%s, hif_ctx null", __func__);
1420 		return;
1421 	}
1422 	hif_display_ce_stats(hif_ctx);
1423 
1424 	hif_print_pci_stats(pci_ctx);
1425 }
1426 
1427 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1428 {
1429 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1430 
1431 	if (!pci_ctx) {
1432 		HIF_ERROR("%s, hif_ctx null", __func__);
1433 		return;
1434 	}
1435 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1436 }
1437 
1438 #define ATH_PCI_PROBE_RETRY_MAX 3
1439 /**
1440  * hif_bus_open(): hif_bus_open
1441  * @scn: scn
1442  * @bus_type: bus type
1443  *
1444  * Return: n/a
1445  */
1446 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1447 {
1448 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1449 
1450 	hif_ctx->bus_type = bus_type;
1451 	hif_pm_runtime_open(sc);
1452 
1453 	qdf_spinlock_create(&sc->irq_lock);
1454 
1455 	return hif_ce_open(hif_ctx);
1456 }
1457 
1458 /**
1459  * hif_wake_target_cpu() - wake the target's cpu
1460  * @scn: hif context
1461  *
1462  * Send an interrupt to the device to wake up the Target CPU
1463  * so it has an opportunity to notice any changed state.
1464  */
1465 static void hif_wake_target_cpu(struct hif_softc *scn)
1466 {
1467 	QDF_STATUS rv;
1468 	uint32_t core_ctrl;
1469 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1470 
1471 	rv = hif_diag_read_access(hif_hdl,
1472 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1473 				  &core_ctrl);
1474 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1475 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1476 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1477 
1478 	rv = hif_diag_write_access(hif_hdl,
1479 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1480 				   core_ctrl);
1481 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1482 }
1483 
1484 /**
1485  * soc_wake_reset() - allow the target to go to sleep
1486  * @scn: hif_softc
1487  *
1488  * Clear the force wake register.  This is done by
1489  * hif_sleep_entry and cancel defered timer sleep.
1490  */
1491 static void soc_wake_reset(struct hif_softc *scn)
1492 {
1493 	hif_write32_mb(scn, scn->mem +
1494 		PCIE_LOCAL_BASE_ADDRESS +
1495 		PCIE_SOC_WAKE_ADDRESS,
1496 		PCIE_SOC_WAKE_RESET);
1497 }
1498 
1499 /**
1500  * hif_sleep_entry() - gate target sleep
1501  * @arg: hif context
1502  *
1503  * This function is the callback for the sleep timer.
1504  * Check if last force awake critical section was at least
1505  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1506  * allow the target to go to sleep and cancel the sleep timer.
1507  * otherwise reschedule the sleep timer.
1508  */
1509 static void hif_sleep_entry(void *arg)
1510 {
1511 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1512 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1513 	uint32_t idle_ms;
1514 
1515 	if (scn->recovery)
1516 		return;
1517 
1518 	if (hif_is_driver_unloading(scn))
1519 		return;
1520 
1521 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1522 	if (hif_state->fake_sleep) {
1523 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1524 						    - hif_state->sleep_ticks);
1525 		if (!hif_state->verified_awake &&
1526 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1527 			if (!qdf_atomic_read(&scn->link_suspended)) {
1528 				soc_wake_reset(scn);
1529 				hif_state->fake_sleep = false;
1530 			}
1531 		} else {
1532 			qdf_timer_stop(&hif_state->sleep_timer);
1533 			qdf_timer_start(&hif_state->sleep_timer,
1534 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1535 		}
1536 	}
1537 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1538 }
1539 
1540 #define HIF_HIA_MAX_POLL_LOOP    1000000
1541 #define HIF_HIA_POLLING_DELAY_MS 10
1542 
1543 #ifdef QCA_HIF_HIA_EXTND
1544 
1545 static void hif_set_hia_extnd(struct hif_softc *scn)
1546 {
1547 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1548 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1549 	uint32_t target_type = tgt_info->target_type;
1550 
1551 	HIF_TRACE("%s: E", __func__);
1552 
1553 	if ((target_type == TARGET_TYPE_AR900B) ||
1554 			target_type == TARGET_TYPE_QCA9984 ||
1555 			target_type == TARGET_TYPE_QCA9888) {
1556 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1557 		 * in RTC space
1558 		 */
1559 		tgt_info->target_revision
1560 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1561 					+ CHIP_ID_ADDRESS));
1562 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1563 			  target_type, tgt_info->target_revision);
1564 	}
1565 
1566 	{
1567 		uint32_t flag2_value = 0;
1568 		uint32_t flag2_targ_addr =
1569 			host_interest_item_address(target_type,
1570 			offsetof(struct host_interest_s, hi_skip_clock_init));
1571 
1572 		if ((ar900b_20_targ_clk != -1) &&
1573 			(frac != -1) && (intval != -1)) {
1574 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1575 				&flag2_value);
1576 			qdf_print("\n Setting clk_override");
1577 			flag2_value |= CLOCK_OVERRIDE;
1578 
1579 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1580 					flag2_value);
1581 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1582 		} else {
1583 			qdf_print("\n CLOCK PLL skipped");
1584 		}
1585 	}
1586 
1587 	if (target_type == TARGET_TYPE_AR900B
1588 			|| target_type == TARGET_TYPE_QCA9984
1589 			|| target_type == TARGET_TYPE_QCA9888) {
1590 
1591 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1592 		 * this would be supplied through module parameters,
1593 		 * if not supplied assumed default or same behavior as 1.0.
1594 		 * Assume 1.0 clock can't be tuned, reset to defaults
1595 		 */
1596 
1597 		qdf_print(KERN_INFO
1598 			  "%s: setting the target pll frac %x intval %x",
1599 			  __func__, frac, intval);
1600 
1601 		/* do not touch frac, and int val, let them be default -1,
1602 		 * if desired, host can supply these through module params
1603 		 */
1604 		if (frac != -1 || intval != -1) {
1605 			uint32_t flag2_value = 0;
1606 			uint32_t flag2_targ_addr;
1607 
1608 			flag2_targ_addr =
1609 				host_interest_item_address(target_type,
1610 				offsetof(struct host_interest_s,
1611 					hi_clock_info));
1612 			hif_diag_read_access(hif_hdl,
1613 				flag2_targ_addr, &flag2_value);
1614 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1615 				  flag2_value);
1616 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1617 			qdf_print("\n INT Val %x  Address %x",
1618 				  intval, flag2_value + 4);
1619 			hif_diag_write_access(hif_hdl,
1620 					flag2_value + 4, intval);
1621 		} else {
1622 			qdf_print(KERN_INFO
1623 				  "%s: no frac provided, skipping pre-configuring PLL",
1624 				  __func__);
1625 		}
1626 
1627 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1628 		if ((target_type == TARGET_TYPE_AR900B)
1629 			&& (tgt_info->target_revision == AR900B_REV_2)
1630 			&& ar900b_20_targ_clk != -1) {
1631 			uint32_t flag2_value = 0;
1632 			uint32_t flag2_targ_addr;
1633 
1634 			flag2_targ_addr
1635 				= host_interest_item_address(target_type,
1636 					offsetof(struct host_interest_s,
1637 					hi_desired_cpu_speed_hz));
1638 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1639 							&flag2_value);
1640 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1641 				  flag2_value);
1642 			hif_diag_write_access(hif_hdl, flag2_value,
1643 				ar900b_20_targ_clk/*300000000u*/);
1644 		} else if (target_type == TARGET_TYPE_QCA9888) {
1645 			uint32_t flag2_targ_addr;
1646 
1647 			if (200000000u != qca9888_20_targ_clk) {
1648 				qca9888_20_targ_clk = 300000000u;
1649 				/* Setting the target clock speed to 300 mhz */
1650 			}
1651 
1652 			flag2_targ_addr
1653 				= host_interest_item_address(target_type,
1654 					offsetof(struct host_interest_s,
1655 					hi_desired_cpu_speed_hz));
1656 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1657 				qca9888_20_targ_clk);
1658 		} else {
1659 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1660 				  __func__);
1661 		}
1662 	} else {
1663 		if (frac != -1 || intval != -1) {
1664 			uint32_t flag2_value = 0;
1665 			uint32_t flag2_targ_addr =
1666 				host_interest_item_address(target_type,
1667 					offsetof(struct host_interest_s,
1668 							hi_clock_info));
1669 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1670 						&flag2_value);
1671 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1672 				  flag2_value);
1673 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1674 			qdf_print("\n INT Val %x  Address %x", intval,
1675 				  flag2_value + 4);
1676 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1677 					      intval);
1678 		}
1679 	}
1680 }
1681 
1682 #else
1683 
1684 static void hif_set_hia_extnd(struct hif_softc *scn)
1685 {
1686 }
1687 
1688 #endif
1689 
1690 /**
1691  * hif_set_hia() - fill out the host interest area
1692  * @scn: hif context
1693  *
1694  * This is replaced by hif_wlan_enable for integrated targets.
1695  * This fills out the host interest area.  The firmware will
1696  * process these memory addresses when it is first brought out
1697  * of reset.
1698  *
1699  * Return: 0 for success.
1700  */
1701 static int hif_set_hia(struct hif_softc *scn)
1702 {
1703 	QDF_STATUS rv;
1704 	uint32_t interconnect_targ_addr = 0;
1705 	uint32_t pcie_state_targ_addr = 0;
1706 	uint32_t pipe_cfg_targ_addr = 0;
1707 	uint32_t svc_to_pipe_map = 0;
1708 	uint32_t pcie_config_flags = 0;
1709 	uint32_t flag2_value = 0;
1710 	uint32_t flag2_targ_addr = 0;
1711 #ifdef QCA_WIFI_3_0
1712 	uint32_t host_interest_area = 0;
1713 	uint8_t i;
1714 #else
1715 	uint32_t ealloc_value = 0;
1716 	uint32_t ealloc_targ_addr = 0;
1717 	uint8_t banks_switched = 1;
1718 	uint32_t chip_id;
1719 #endif
1720 	uint32_t pipe_cfg_addr;
1721 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1722 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1723 	uint32_t target_type = tgt_info->target_type;
1724 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1725 	static struct CE_pipe_config *target_ce_config;
1726 	struct service_to_pipe *target_service_to_ce_map;
1727 
1728 	HIF_TRACE("%s: E", __func__);
1729 
1730 	hif_get_target_ce_config(scn,
1731 				 &target_ce_config, &target_ce_config_sz,
1732 				 &target_service_to_ce_map,
1733 				 &target_service_to_ce_map_sz,
1734 				 NULL, NULL);
1735 
1736 	if (ADRASTEA_BU)
1737 		return QDF_STATUS_SUCCESS;
1738 
1739 #ifdef QCA_WIFI_3_0
1740 	i = 0;
1741 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1742 		host_interest_area = hif_read32_mb(scn, scn->mem +
1743 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1744 		if ((host_interest_area & 0x01) == 0) {
1745 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1746 			host_interest_area = 0;
1747 			i++;
1748 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1749 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1750 		} else {
1751 			host_interest_area &= (~0x01);
1752 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1753 			break;
1754 		}
1755 	}
1756 
1757 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1758 		HIF_ERROR("%s: hia polling timeout", __func__);
1759 		return -EIO;
1760 	}
1761 
1762 	if (host_interest_area == 0) {
1763 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1764 		return -EIO;
1765 	}
1766 
1767 	interconnect_targ_addr = host_interest_area +
1768 			offsetof(struct host_interest_area_t,
1769 			hi_interconnect_state);
1770 
1771 	flag2_targ_addr = host_interest_area +
1772 			offsetof(struct host_interest_area_t, hi_option_flag2);
1773 
1774 #else
1775 	interconnect_targ_addr = hif_hia_item_address(target_type,
1776 		offsetof(struct host_interest_s, hi_interconnect_state));
1777 	ealloc_targ_addr = hif_hia_item_address(target_type,
1778 		offsetof(struct host_interest_s, hi_early_alloc));
1779 	flag2_targ_addr = hif_hia_item_address(target_type,
1780 		offsetof(struct host_interest_s, hi_option_flag2));
1781 #endif
1782 	/* Supply Target-side CE configuration */
1783 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1784 			  &pcie_state_targ_addr);
1785 	if (rv != QDF_STATUS_SUCCESS) {
1786 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1787 			  __func__, interconnect_targ_addr, rv);
1788 		goto done;
1789 	}
1790 	if (pcie_state_targ_addr == 0) {
1791 		rv = QDF_STATUS_E_FAILURE;
1792 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1793 		goto done;
1794 	}
1795 	pipe_cfg_addr = pcie_state_targ_addr +
1796 			  offsetof(struct pcie_state_s,
1797 			  pipe_cfg_addr);
1798 	rv = hif_diag_read_access(hif_hdl,
1799 			  pipe_cfg_addr,
1800 			  &pipe_cfg_targ_addr);
1801 	if (rv != QDF_STATUS_SUCCESS) {
1802 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1803 			__func__, pipe_cfg_addr, rv);
1804 		goto done;
1805 	}
1806 	if (pipe_cfg_targ_addr == 0) {
1807 		rv = QDF_STATUS_E_FAILURE;
1808 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1809 		goto done;
1810 	}
1811 
1812 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1813 			(uint8_t *) target_ce_config,
1814 			target_ce_config_sz);
1815 
1816 	if (rv != QDF_STATUS_SUCCESS) {
1817 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1818 		goto done;
1819 	}
1820 
1821 	rv = hif_diag_read_access(hif_hdl,
1822 			  pcie_state_targ_addr +
1823 			  offsetof(struct pcie_state_s,
1824 			   svc_to_pipe_map),
1825 			  &svc_to_pipe_map);
1826 	if (rv != QDF_STATUS_SUCCESS) {
1827 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1828 		goto done;
1829 	}
1830 	if (svc_to_pipe_map == 0) {
1831 		rv = QDF_STATUS_E_FAILURE;
1832 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1833 		goto done;
1834 	}
1835 
1836 	rv = hif_diag_write_mem(hif_hdl,
1837 			svc_to_pipe_map,
1838 			(uint8_t *) target_service_to_ce_map,
1839 			target_service_to_ce_map_sz);
1840 	if (rv != QDF_STATUS_SUCCESS) {
1841 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1842 		goto done;
1843 	}
1844 
1845 	rv = hif_diag_read_access(hif_hdl,
1846 			pcie_state_targ_addr +
1847 			offsetof(struct pcie_state_s,
1848 			config_flags),
1849 			&pcie_config_flags);
1850 	if (rv != QDF_STATUS_SUCCESS) {
1851 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1852 		goto done;
1853 	}
1854 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1855 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1856 #else
1857 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1858 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1859 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1860 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1861 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1862 #endif
1863 	rv = hif_diag_write_mem(hif_hdl,
1864 			pcie_state_targ_addr +
1865 			offsetof(struct pcie_state_s,
1866 			config_flags),
1867 			(uint8_t *) &pcie_config_flags,
1868 			sizeof(pcie_config_flags));
1869 	if (rv != QDF_STATUS_SUCCESS) {
1870 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1871 		goto done;
1872 	}
1873 
1874 #ifndef QCA_WIFI_3_0
1875 	/* configure early allocation */
1876 	ealloc_targ_addr = hif_hia_item_address(target_type,
1877 						offsetof(
1878 						struct host_interest_s,
1879 						hi_early_alloc));
1880 
1881 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1882 			&ealloc_value);
1883 	if (rv != QDF_STATUS_SUCCESS) {
1884 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1885 		goto done;
1886 	}
1887 
1888 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1889 	ealloc_value |=
1890 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1891 		 HI_EARLY_ALLOC_MAGIC_MASK);
1892 
1893 	rv = hif_diag_read_access(hif_hdl,
1894 			  CHIP_ID_ADDRESS |
1895 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1896 	if (rv != QDF_STATUS_SUCCESS) {
1897 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1898 		goto done;
1899 	}
1900 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1901 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1902 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1903 		case 0x2:       /* ROME 1.3 */
1904 			/* 2 banks are switched to IRAM */
1905 			banks_switched = 2;
1906 			break;
1907 		case 0x4:       /* ROME 2.1 */
1908 		case 0x5:       /* ROME 2.2 */
1909 			banks_switched = 6;
1910 			break;
1911 		case 0x8:       /* ROME 3.0 */
1912 		case 0x9:       /* ROME 3.1 */
1913 		case 0xA:       /* ROME 3.2 */
1914 			banks_switched = 9;
1915 			break;
1916 		case 0x0:       /* ROME 1.0 */
1917 		case 0x1:       /* ROME 1.1 */
1918 		default:
1919 			/* 3 banks are switched to IRAM */
1920 			banks_switched = 3;
1921 			break;
1922 		}
1923 	}
1924 
1925 	ealloc_value |=
1926 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1927 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1928 
1929 	rv = hif_diag_write_access(hif_hdl,
1930 				ealloc_targ_addr,
1931 				ealloc_value);
1932 	if (rv != QDF_STATUS_SUCCESS) {
1933 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1934 		goto done;
1935 	}
1936 #endif
1937 	if ((target_type == TARGET_TYPE_AR900B)
1938 			|| (target_type == TARGET_TYPE_QCA9984)
1939 			|| (target_type == TARGET_TYPE_QCA9888)
1940 			|| (target_type == TARGET_TYPE_AR9888)) {
1941 		hif_set_hia_extnd(scn);
1942 	}
1943 
1944 	/* Tell Target to proceed with initialization */
1945 	flag2_targ_addr = hif_hia_item_address(target_type,
1946 						offsetof(
1947 						struct host_interest_s,
1948 						hi_option_flag2));
1949 
1950 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1951 			  &flag2_value);
1952 	if (rv != QDF_STATUS_SUCCESS) {
1953 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1954 		goto done;
1955 	}
1956 
1957 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1958 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1959 			   flag2_value);
1960 	if (rv != QDF_STATUS_SUCCESS) {
1961 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1962 		goto done;
1963 	}
1964 
1965 	hif_wake_target_cpu(scn);
1966 
1967 done:
1968 
1969 	return rv;
1970 }
1971 
1972 /**
1973  * hif_bus_configure() - configure the pcie bus
1974  * @hif_sc: pointer to the hif context.
1975  *
1976  * return: 0 for success. nonzero for failure.
1977  */
1978 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1979 {
1980 	int status = 0;
1981 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1982 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1983 
1984 	hif_ce_prepare_config(hif_sc);
1985 
1986 	/* initialize sleep state adjust variables */
1987 	hif_state->sleep_timer_init = true;
1988 	hif_state->keep_awake_count = 0;
1989 	hif_state->fake_sleep = false;
1990 	hif_state->sleep_ticks = 0;
1991 
1992 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1993 			       hif_sleep_entry, (void *)hif_state,
1994 			       QDF_TIMER_TYPE_WAKE_APPS);
1995 	hif_state->sleep_timer_init = true;
1996 
1997 	status = hif_wlan_enable(hif_sc);
1998 	if (status) {
1999 		HIF_ERROR("%s: hif_wlan_enable error = %d",
2000 			  __func__, status);
2001 		goto timer_free;
2002 	}
2003 
2004 	A_TARGET_ACCESS_LIKELY(hif_sc);
2005 
2006 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
2007 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
2008 	    !ce_srng_based(hif_sc)) {
2009 		/*
2010 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
2011 		 * prevent sleep when we want to keep firmware always awake
2012 		 * note: when we want to keep firmware always awake,
2013 		 *       hif_target_sleep_state_adjust will point to a dummy
2014 		 *       function, and hif_pci_target_sleep_state_adjust must
2015 		 *       be called instead.
2016 		 * note: bus type check is here because AHB bus is reusing
2017 		 *       hif_pci_bus_configure code.
2018 		 */
2019 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
2020 			if (hif_pci_target_sleep_state_adjust(hif_sc,
2021 					false, true) < 0) {
2022 				status = -EACCES;
2023 				goto disable_wlan;
2024 			}
2025 		}
2026 	}
2027 
2028 	/* todo: consider replacing this with an srng field */
2029 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2030 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2031 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
2032 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
2033 		hif_sc->per_ce_irq = true;
2034 	}
2035 
2036 	status = hif_config_ce(hif_sc);
2037 	if (status)
2038 		goto disable_wlan;
2039 
2040 	if (hif_needs_bmi(hif_osc)) {
2041 		status = hif_set_hia(hif_sc);
2042 		if (status)
2043 			goto unconfig_ce;
2044 
2045 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2046 
2047 	}
2048 
2049 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
2050 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
2051 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
2052 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2053 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2054 						__func__);
2055 	else {
2056 		status = hif_configure_irq(hif_sc);
2057 		if (status < 0)
2058 			goto unconfig_ce;
2059 	}
2060 
2061 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2062 
2063 	return status;
2064 
2065 unconfig_ce:
2066 	hif_unconfig_ce(hif_sc);
2067 disable_wlan:
2068 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2069 	hif_wlan_disable(hif_sc);
2070 
2071 timer_free:
2072 	qdf_timer_stop(&hif_state->sleep_timer);
2073 	qdf_timer_free(&hif_state->sleep_timer);
2074 	hif_state->sleep_timer_init = false;
2075 
2076 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2077 	return status;
2078 }
2079 
2080 /**
2081  * hif_bus_close(): hif_bus_close
2082  *
2083  * Return: n/a
2084  */
2085 void hif_pci_close(struct hif_softc *hif_sc)
2086 {
2087 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2088 
2089 	hif_pm_runtime_close(hif_pci_sc);
2090 	hif_ce_close(hif_sc);
2091 }
2092 
2093 #define BAR_NUM 0
2094 
2095 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
2096 				struct pci_dev *pdev,
2097 				const struct pci_device_id *id)
2098 {
2099 	void __iomem *mem;
2100 	int ret = 0;
2101 	uint16_t device_id = 0;
2102 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2103 
2104 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2105 	if (device_id != id->device)  {
2106 		HIF_ERROR(
2107 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2108 		   __func__, device_id, id->device);
2109 		/* pci link is down, so returing with error code */
2110 		return -EIO;
2111 	}
2112 
2113 	/* FIXME: temp. commenting out assign_resource
2114 	 * call for dev_attach to work on 2.6.38 kernel
2115 	 */
2116 #if (!defined(__LINUX_ARM_ARCH__))
2117 	if (pci_assign_resource(pdev, BAR_NUM)) {
2118 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2119 		return -EIO;
2120 	}
2121 #endif
2122 	if (pci_enable_device(pdev)) {
2123 		HIF_ERROR("%s: pci_enable_device error",
2124 			   __func__);
2125 		return -EIO;
2126 	}
2127 
2128 	/* Request MMIO resources */
2129 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2130 	if (ret) {
2131 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2132 		ret = -EIO;
2133 		goto err_region;
2134 	}
2135 
2136 #ifdef CONFIG_ARM_LPAE
2137 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2138 	 * for 32 bits device also.
2139 	 */
2140 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2141 	if (ret) {
2142 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2143 		goto err_dma;
2144 	}
2145 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2146 	if (ret) {
2147 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2148 		goto err_dma;
2149 	}
2150 #else
2151 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2152 	if (ret) {
2153 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2154 		goto err_dma;
2155 	}
2156 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2157 	if (ret) {
2158 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2159 			   __func__);
2160 		goto err_dma;
2161 	}
2162 #endif
2163 
2164 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2165 
2166 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2167 	pci_set_master(pdev);
2168 
2169 	/* Arrange for access to Target SoC registers. */
2170 	mem = pci_iomap(pdev, BAR_NUM, 0);
2171 	if (!mem) {
2172 		HIF_ERROR("%s: PCI iomap error", __func__);
2173 		ret = -EIO;
2174 		goto err_iomap;
2175 	}
2176 
2177 	HIF_INFO("*****BAR is %pK\n", (void *)mem);
2178 
2179 	sc->mem = mem;
2180 
2181 	/* Hawkeye emulation specific change */
2182 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2183 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2184 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2185 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
2186 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
2187 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
2188 		mem = mem + 0x0c000000;
2189 		sc->mem = mem;
2190 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2191 			__func__, sc->mem);
2192 	}
2193 
2194 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2195 	ol_sc->mem = mem;
2196 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2197 	sc->pci_enabled = true;
2198 	return ret;
2199 
2200 err_iomap:
2201 	pci_clear_master(pdev);
2202 err_dma:
2203 	pci_release_region(pdev, BAR_NUM);
2204 err_region:
2205 	pci_disable_device(pdev);
2206 	return ret;
2207 }
2208 
2209 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
2210 			      struct pci_dev *pdev,
2211 			      const struct pci_device_id *id)
2212 {
2213 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2214 	sc->pci_enabled = true;
2215 	return 0;
2216 }
2217 
2218 
2219 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
2220 {
2221 	pci_disable_msi(sc->pdev);
2222 	pci_iounmap(sc->pdev, sc->mem);
2223 	pci_clear_master(sc->pdev);
2224 	pci_release_region(sc->pdev, BAR_NUM);
2225 	pci_disable_device(sc->pdev);
2226 }
2227 
2228 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
2229 
2230 static void hif_disable_pci(struct hif_pci_softc *sc)
2231 {
2232 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2233 
2234 	if (!ol_sc) {
2235 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2236 		return;
2237 	}
2238 	hif_pci_device_reset(sc);
2239 	sc->hif_pci_deinit(sc);
2240 
2241 	sc->mem = NULL;
2242 	ol_sc->mem = NULL;
2243 }
2244 
2245 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2246 {
2247 	int ret = 0;
2248 	int targ_awake_limit = 500;
2249 #ifndef QCA_WIFI_3_0
2250 	uint32_t fw_indicator;
2251 #endif
2252 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2253 
2254 	/*
2255 	 * Verify that the Target was started cleanly.*
2256 	 * The case where this is most likely is with an AUX-powered
2257 	 * Target and a Host in WoW mode. If the Host crashes,
2258 	 * loses power, or is restarted (without unloading the driver)
2259 	 * then the Target is left (aux) powered and running.  On a
2260 	 * subsequent driver load, the Target is in an unexpected state.
2261 	 * We try to catch that here in order to reset the Target and
2262 	 * retry the probe.
2263 	 */
2264 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2265 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2266 	while (!hif_targ_is_awake(scn, sc->mem)) {
2267 		if (0 == targ_awake_limit) {
2268 			HIF_ERROR("%s: target awake timeout", __func__);
2269 			ret = -EAGAIN;
2270 			goto end;
2271 		}
2272 		qdf_mdelay(1);
2273 		targ_awake_limit--;
2274 	}
2275 
2276 #if PCIE_BAR0_READY_CHECKING
2277 	{
2278 		int wait_limit = 200;
2279 		/* Synchronization point: wait the BAR0 is configured */
2280 		while (wait_limit-- &&
2281 			   !(hif_read32_mb(sc, c->mem +
2282 					  PCIE_LOCAL_BASE_ADDRESS +
2283 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2284 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2285 			qdf_mdelay(10);
2286 		}
2287 		if (wait_limit < 0) {
2288 			/* AR6320v1 doesn't support checking of BAR0
2289 			 * configuration, takes one sec to wait BAR0 ready
2290 			 */
2291 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2292 				    __func__);
2293 		}
2294 	}
2295 #endif
2296 
2297 #ifndef QCA_WIFI_3_0
2298 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2299 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2300 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2301 
2302 	if (fw_indicator & FW_IND_INITIALIZED) {
2303 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2304 			   __func__);
2305 		ret = -EAGAIN;
2306 		goto end;
2307 	}
2308 #endif
2309 
2310 end:
2311 	return ret;
2312 }
2313 
2314 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2315 {
2316 	int ret = 0;
2317 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2318 	uint32_t target_type = scn->target_info.target_type;
2319 
2320 	HIF_TRACE("%s: E", __func__);
2321 
2322 	/* do notn support MSI or MSI IRQ failed */
2323 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2324 	ret = request_irq(sc->pdev->irq,
2325 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2326 			  "wlan_pci", sc);
2327 	if (ret) {
2328 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2329 		goto end;
2330 	}
2331 	scn->wake_irq = sc->pdev->irq;
2332 	/* Use sc->irq instead of sc->pdev-irq
2333 	 * platform_device pdev doesn't have an irq field
2334 	 */
2335 	sc->irq = sc->pdev->irq;
2336 	/* Use Legacy PCI Interrupts */
2337 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2338 		  PCIE_INTR_ENABLE_ADDRESS),
2339 		  HOST_GROUP0_MASK);
2340 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2341 			       PCIE_INTR_ENABLE_ADDRESS));
2342 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2343 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2344 
2345 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2346 			(target_type == TARGET_TYPE_AR900B)  ||
2347 			(target_type == TARGET_TYPE_QCA9984) ||
2348 			(target_type == TARGET_TYPE_AR9888) ||
2349 			(target_type == TARGET_TYPE_QCA9888) ||
2350 			(target_type == TARGET_TYPE_AR6320V1) ||
2351 			(target_type == TARGET_TYPE_AR6320V2) ||
2352 			(target_type == TARGET_TYPE_AR6320V3)) {
2353 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2354 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2355 	}
2356 end:
2357 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2358 			  "%s: X, ret = %d", __func__, ret);
2359 	return ret;
2360 }
2361 
2362 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2363 {
2364 	int ret;
2365 	int ce_id, irq;
2366 	uint32_t msi_data_start;
2367 	uint32_t msi_data_count;
2368 	uint32_t msi_irq_start;
2369 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2370 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2371 
2372 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2373 					    &msi_data_count, &msi_data_start,
2374 					    &msi_irq_start);
2375 	if (ret)
2376 		return ret;
2377 
2378 	/* needs to match the ce_id -> irq data mapping
2379 	 * used in the srng parameter configuration
2380 	 */
2381 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2382 		unsigned int msi_data;
2383 
2384 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2385 			continue;
2386 
2387 		if (!ce_sc->tasklets[ce_id].inited)
2388 			continue;
2389 
2390 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2391 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2392 
2393 		hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2394 			  ce_id, msi_data, irq);
2395 
2396 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2397 	}
2398 
2399 	return ret;
2400 }
2401 
2402 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2403 {
2404 	int i, j, irq;
2405 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2406 	struct hif_exec_context *hif_ext_group;
2407 
2408 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2409 		hif_ext_group = hif_state->hif_ext_group[i];
2410 		if (hif_ext_group->irq_requested) {
2411 			hif_ext_group->irq_requested = false;
2412 			for (j = 0; j < hif_ext_group->numirq; j++) {
2413 				irq = hif_ext_group->os_irq[j];
2414 				pfrm_free_irq(scn->qdf_dev->dev,
2415 					      irq, hif_ext_group);
2416 			}
2417 			hif_ext_group->numirq = 0;
2418 		}
2419 	}
2420 }
2421 
2422 /**
2423  * hif_nointrs(): disable IRQ
2424  *
2425  * This function stops interrupt(s)
2426  *
2427  * @scn: struct hif_softc
2428  *
2429  * Return: none
2430  */
2431 void hif_pci_nointrs(struct hif_softc *scn)
2432 {
2433 	int i, ret;
2434 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2435 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2436 
2437 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2438 
2439 	if (scn->request_irq_done == false)
2440 		return;
2441 
2442 	hif_pci_deconfigure_grp_irq(scn);
2443 
2444 	ret = hif_ce_srng_msi_free_irq(scn);
2445 	if (ret != -EINVAL) {
2446 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2447 
2448 		if (scn->wake_irq)
2449 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2450 		scn->wake_irq = 0;
2451 	} else if (sc->num_msi_intrs > 0) {
2452 		/* MSI interrupt(s) */
2453 		for (i = 0; i < sc->num_msi_intrs; i++)
2454 			free_irq(sc->irq + i, sc);
2455 		sc->num_msi_intrs = 0;
2456 	} else {
2457 		/* Legacy PCI line interrupt
2458 		 * Use sc->irq instead of sc->pdev-irq
2459 		 * platform_device pdev doesn't have an irq field
2460 		 */
2461 		free_irq(sc->irq, sc);
2462 	}
2463 	scn->request_irq_done = false;
2464 }
2465 
2466 /**
2467  * hif_disable_bus(): hif_disable_bus
2468  *
2469  * This function disables the bus
2470  *
2471  * @bdev: bus dev
2472  *
2473  * Return: none
2474  */
2475 void hif_pci_disable_bus(struct hif_softc *scn)
2476 {
2477 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2478 	struct pci_dev *pdev;
2479 	void __iomem *mem;
2480 	struct hif_target_info *tgt_info = &scn->target_info;
2481 
2482 	/* Attach did not succeed, all resources have been
2483 	 * freed in error handler
2484 	 */
2485 	if (!sc)
2486 		return;
2487 
2488 	pdev = sc->pdev;
2489 	if (ADRASTEA_BU) {
2490 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2491 
2492 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2493 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2494 			       HOST_GROUP0_MASK);
2495 	}
2496 
2497 #if defined(CPU_WARM_RESET_WAR)
2498 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2499 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2500 	 * verified for AR9888_REV1
2501 	 */
2502 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2503 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2504 		hif_pci_device_warm_reset(sc);
2505 	else
2506 		hif_pci_device_reset(sc);
2507 #else
2508 	hif_pci_device_reset(sc);
2509 #endif
2510 	mem = (void __iomem *)sc->mem;
2511 	if (mem) {
2512 		hif_dump_pipe_debug_count(scn);
2513 		if (scn->athdiag_procfs_inited) {
2514 			athdiag_procfs_remove();
2515 			scn->athdiag_procfs_inited = false;
2516 		}
2517 		sc->hif_pci_deinit(sc);
2518 		scn->mem = NULL;
2519 	}
2520 	HIF_INFO("%s: X", __func__);
2521 }
2522 
2523 #define OL_ATH_PCI_PM_CONTROL 0x44
2524 
2525 #ifdef FEATURE_RUNTIME_PM
2526 /**
2527  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
2528  * @scn: hif context
2529  * @flag: prevent linkdown if true otherwise allow
2530  *
2531  * this api should only be called as part of bus prevent linkdown
2532  */
2533 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2534 {
2535 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2536 
2537 	if (flag)
2538 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2539 	else
2540 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2541 }
2542 #else
2543 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2544 {
2545 }
2546 #endif
2547 
2548 #if defined(CONFIG_PCI_MSM)
2549 /**
2550  * hif_bus_prevent_linkdown(): allow or permit linkdown
2551  * @flag: true prevents linkdown, false allows
2552  *
2553  * Calls into the platform driver to vote against taking down the
2554  * pcie link.
2555  *
2556  * Return: n/a
2557  */
2558 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2559 {
2560 	int errno;
2561 
2562 	HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2563 	hif_runtime_prevent_linkdown(scn, flag);
2564 
2565 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2566 	if (errno)
2567 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2568 			  __func__, errno);
2569 }
2570 #else
2571 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2572 {
2573 	HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
2574 	hif_runtime_prevent_linkdown(scn, flag);
2575 }
2576 #endif
2577 
2578 /**
2579  * hif_pci_bus_suspend(): prepare hif for suspend
2580  *
2581  * Return: Errno
2582  */
2583 int hif_pci_bus_suspend(struct hif_softc *scn)
2584 {
2585 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2586 
2587 	if (hif_drain_tasklets(scn)) {
2588 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2589 		return -EBUSY;
2590 	}
2591 
2592 	/* Stop the HIF Sleep Timer */
2593 	hif_cancel_deferred_target_sleep(scn);
2594 
2595 	return 0;
2596 }
2597 
2598 /**
2599  * __hif_check_link_status() - API to check if PCIe link is active/not
2600  * @scn: HIF Context
2601  *
2602  * API reads the PCIe config space to verify if PCIe link training is
2603  * successful or not.
2604  *
2605  * Return: Success/Failure
2606  */
2607 static int __hif_check_link_status(struct hif_softc *scn)
2608 {
2609 	uint16_t dev_id = 0;
2610 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2611 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2612 
2613 	if (!sc) {
2614 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2615 		return -EINVAL;
2616 	}
2617 
2618 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2619 
2620 	if (dev_id == sc->devid)
2621 		return 0;
2622 
2623 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2624 	       __func__, dev_id);
2625 
2626 	scn->recovery = true;
2627 
2628 	if (cbk && cbk->set_recovery_in_progress)
2629 		cbk->set_recovery_in_progress(cbk->context, true);
2630 	else
2631 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2632 
2633 	pld_is_pci_link_down(sc->dev);
2634 	return -EACCES;
2635 }
2636 
2637 /**
2638  * hif_pci_bus_resume(): prepare hif for resume
2639  *
2640  * Return: Errno
2641  */
2642 int hif_pci_bus_resume(struct hif_softc *scn)
2643 {
2644 	int errno;
2645 
2646 	errno = __hif_check_link_status(scn);
2647 	if (errno)
2648 		return errno;
2649 
2650 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2651 
2652 	return 0;
2653 }
2654 
2655 /**
2656  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2657  * @scn: hif context
2658  *
2659  * Ensure that if we received the wakeup message before the irq
2660  * was disabled that the message is pocessed before suspending.
2661  *
2662  * Return: -EBUSY if we fail to flush the tasklets.
2663  */
2664 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2665 {
2666 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2667 		qdf_atomic_set(&scn->link_suspended, 1);
2668 
2669 	hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
2670 
2671 	return 0;
2672 }
2673 
2674 /**
2675  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2676  * @scn: hif context
2677  *
2678  * Ensure that if we received the wakeup message before the irq
2679  * was disabled that the message is pocessed before suspending.
2680  *
2681  * Return: -EBUSY if we fail to flush the tasklets.
2682  */
2683 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2684 {
2685 	hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
2686 
2687 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2688 		qdf_atomic_set(&scn->link_suspended, 0);
2689 
2690 	return 0;
2691 }
2692 
2693 #ifdef FEATURE_RUNTIME_PM
2694 /**
2695  * __hif_runtime_pm_set_state(): utility function
2696  * @state: state to set
2697  *
2698  * indexes into the runtime pm state and sets it.
2699  */
2700 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2701 				enum hif_pm_runtime_state state)
2702 {
2703 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2704 
2705 	if (!sc) {
2706 		HIF_ERROR("%s: HIF_CTX not initialized",
2707 		       __func__);
2708 		return;
2709 	}
2710 
2711 	qdf_atomic_set(&sc->pm_state, state);
2712 }
2713 
2714 /**
2715  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2716  *
2717  * Notify hif that a the runtime pm state should be on
2718  */
2719 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2720 {
2721 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2722 }
2723 
2724 /**
2725  * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
2726  *
2727  * Notify hif that a runtime pm resuming has started
2728  */
2729 static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
2730 {
2731 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
2732 }
2733 
2734 /**
2735  * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
2736  *
2737  * Notify hif that a runtime pm suspend has started
2738  */
2739 static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
2740 {
2741 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
2742 }
2743 
2744 /**
2745  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2746  *
2747  * Notify hif that a runtime suspend attempt has been completed successfully
2748  */
2749 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2750 {
2751 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2752 }
2753 
2754 /**
2755  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2756  */
2757 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2758 {
2759 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2760 
2761 	if (!sc)
2762 		return;
2763 
2764 	sc->pm_stats.suspended++;
2765 	sc->pm_stats.suspend_jiffies = jiffies;
2766 }
2767 
2768 /**
2769  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2770  *
2771  * log a failed runtime suspend
2772  * mark last busy to prevent immediate runtime suspend
2773  */
2774 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2775 {
2776 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2777 
2778 	if (!sc)
2779 		return;
2780 
2781 	sc->pm_stats.suspend_err++;
2782 }
2783 
2784 /**
2785  * hif_log_runtime_resume_success() - log a successful runtime resume
2786  *
2787  * log a successful runtime resume
2788  * mark last busy to prevent immediate runtime suspend
2789  */
2790 static void hif_log_runtime_resume_success(void *hif_ctx)
2791 {
2792 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2793 
2794 	if (!sc)
2795 		return;
2796 
2797 	sc->pm_stats.resumed++;
2798 }
2799 
2800 /**
2801  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2802  *
2803  * Record the failure.
2804  * mark last busy to delay a retry.
2805  * adjust the runtime_pm state.
2806  */
2807 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2808 {
2809 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2810 
2811 	hif_log_runtime_suspend_failure(hif_ctx);
2812 	hif_pm_runtime_mark_last_busy(hif_ctx);
2813 	hif_runtime_pm_set_state_on(scn);
2814 }
2815 
2816 /**
2817  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2818  *
2819  * Makes sure that the pci link will be taken down by the suspend opperation.
2820  * If the hif layer is configured to leave the bus on, runtime suspend will
2821  * not save any power.
2822  *
2823  * Set the runtime suspend state to in progress.
2824  *
2825  * return -EINVAL if the bus won't go down.  otherwise return 0
2826  */
2827 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2828 {
2829 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2830 
2831 	if (!hif_can_suspend_link(hif_ctx)) {
2832 		HIF_ERROR("Runtime PM not supported for link up suspend");
2833 		return -EINVAL;
2834 	}
2835 
2836 	hif_runtime_pm_set_state_suspending(scn);
2837 	return 0;
2838 }
2839 
2840 /**
2841  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2842  *
2843  * Record the success.
2844  * adjust the runtime_pm state
2845  */
2846 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
2847 {
2848 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2849 
2850 	hif_runtime_pm_set_state_suspended(scn);
2851 	hif_log_runtime_suspend_success(scn);
2852 }
2853 
2854 /**
2855  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
2856  *
2857  * update the runtime pm state.
2858  */
2859 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
2860 {
2861 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2862 
2863 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
2864 	hif_runtime_pm_set_state_resuming(scn);
2865 }
2866 
2867 /**
2868  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
2869  *
2870  * record the success.
2871  * adjust the runtime_pm state
2872  */
2873 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
2874 {
2875 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2876 
2877 	hif_log_runtime_resume_success(hif_ctx);
2878 	hif_pm_runtime_mark_last_busy(hif_ctx);
2879 	hif_runtime_pm_set_state_on(scn);
2880 }
2881 
2882 /**
2883  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
2884  *
2885  * Return: 0 for success and non-zero error code for failure
2886  */
2887 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2888 {
2889 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2890 	int errno;
2891 
2892 	errno = hif_bus_suspend(hif_ctx);
2893 	if (errno) {
2894 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
2895 		return errno;
2896 	}
2897 
2898 	hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
2899 
2900 	errno = hif_bus_suspend_noirq(hif_ctx);
2901 	if (errno) {
2902 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
2903 		hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
2904 		goto bus_resume;
2905 	}
2906 
2907 	qdf_atomic_set(&sc->pm_dp_rx_busy, 0);
2908 
2909 	return 0;
2910 
2911 bus_resume:
2912 	QDF_BUG(!hif_bus_resume(hif_ctx));
2913 
2914 	return errno;
2915 }
2916 
2917 /**
2918  * hif_fastpath_resume() - resume fastpath for runtimepm
2919  *
2920  * ensure that the fastpath write index register is up to date
2921  * since runtime pm may cause ce_send_fast to skip the register
2922  * write.
2923  *
2924  * fastpath only applicable to legacy copy engine
2925  */
2926 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
2927 {
2928 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2929 	struct CE_state *ce_state;
2930 
2931 	if (!scn)
2932 		return;
2933 
2934 	if (scn->fastpath_mode_on) {
2935 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
2936 			return;
2937 
2938 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
2939 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
2940 
2941 		/*war_ce_src_ring_write_idx_set */
2942 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
2943 				ce_state->src_ring->write_index);
2944 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
2945 		Q_TARGET_ACCESS_END(scn);
2946 	}
2947 }
2948 
2949 /**
2950  * hif_runtime_resume() - do the bus resume part of a runtime resume
2951  *
2952  *  Return: 0 for success and non-zero error code for failure
2953  */
2954 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
2955 {
2956 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
2957 	QDF_BUG(!hif_bus_resume(hif_ctx));
2958 	return 0;
2959 }
2960 #endif /* #ifdef FEATURE_RUNTIME_PM */
2961 
2962 #if CONFIG_PCIE_64BIT_MSI
2963 static void hif_free_msi_ctx(struct hif_softc *scn)
2964 {
2965 	struct hif_pci_softc *sc = scn->hif_sc;
2966 	struct hif_msi_info *info = &sc->msi_info;
2967 	struct device *dev = scn->qdf_dev->dev;
2968 
2969 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2970 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2971 	info->magic = NULL;
2972 	info->magic_dma = 0;
2973 }
2974 #else
2975 static void hif_free_msi_ctx(struct hif_softc *scn)
2976 {
2977 }
2978 #endif
2979 
2980 void hif_pci_disable_isr(struct hif_softc *scn)
2981 {
2982 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2983 
2984 	hif_exec_kill(&scn->osc);
2985 	hif_nointrs(scn);
2986 	hif_free_msi_ctx(scn);
2987 	/* Cancel the pending tasklet */
2988 	ce_tasklet_kill(scn);
2989 	tasklet_kill(&sc->intr_tq);
2990 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2991 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2992 }
2993 
2994 /* Function to reset SoC */
2995 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2996 {
2997 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2998 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2999 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
3000 
3001 #if defined(CPU_WARM_RESET_WAR)
3002 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
3003 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
3004 	 * verified for AR9888_REV1
3005 	 */
3006 	if (tgt_info->target_version == AR9888_REV2_VERSION)
3007 		hif_pci_device_warm_reset(sc);
3008 	else
3009 		hif_pci_device_reset(sc);
3010 #else
3011 	hif_pci_device_reset(sc);
3012 #endif
3013 }
3014 
3015 #ifdef CONFIG_PCI_MSM
3016 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
3017 {
3018 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
3019 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
3020 }
3021 #else
3022 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3023 #endif
3024 
3025 /**
3026  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3027  * @sc: HIF PCIe Context
3028  *
3029  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3030  *
3031  * Return: Failure to caller
3032  */
3033 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3034 {
3035 	uint16_t val = 0;
3036 	uint32_t bar = 0;
3037 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3038 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3039 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3040 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
3041 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
3042 	A_target_id_t pci_addr = scn->mem;
3043 
3044 	HIF_ERROR("%s: keep_awake_count = %d",
3045 			__func__, hif_state->keep_awake_count);
3046 
3047 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3048 
3049 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3050 
3051 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3052 
3053 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3054 
3055 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
3056 
3057 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3058 
3059 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
3060 
3061 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3062 
3063 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3064 
3065 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3066 
3067 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3068 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3069 						PCIE_SOC_WAKE_ADDRESS));
3070 
3071 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3072 			hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3073 							RTC_STATE_ADDRESS));
3074 
3075 	HIF_ERROR("%s:error, wakeup target", __func__);
3076 	hif_msm_pcie_debug_info(sc);
3077 
3078 	if (!cfg->enable_self_recovery)
3079 		QDF_BUG(0);
3080 
3081 	scn->recovery = true;
3082 
3083 	if (cbk->set_recovery_in_progress)
3084 		cbk->set_recovery_in_progress(cbk->context, true);
3085 
3086 	pld_is_pci_link_down(sc->dev);
3087 	return -EACCES;
3088 }
3089 
3090 /*
3091  * For now, we use simple on-demand sleep/wake.
3092  * Some possible improvements:
3093  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3094  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3095  *   Careful, though, these functions may be used by
3096  *  interrupt handlers ("atomic")
3097  *  -Don't use host_reg_table for this code; instead use values directly
3098  *  -Use a separate timer to track activity and allow Target to sleep only
3099  *   if it hasn't done anything for a while; may even want to delay some
3100  *   processing for a short while in order to "batch" (e.g.) transmit
3101  *   requests with completion processing into "windows of up time".  Costs
3102  *   some performance, but improves power utilization.
3103  *  -On some platforms, it might be possible to eliminate explicit
3104  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3105  *   recover from the failure by forcing the Target awake.
3106  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3107  *   overhead in some cases. Perhaps this makes more sense when
3108  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3109  *   disabled.
3110  *  -It is possible to compile this code out and simply force the Target
3111  *   to remain awake.  That would yield optimal performance at the cost of
3112  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3113  *
3114  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3115  */
3116 /**
3117  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3118  * @scn: hif_softc pointer.
3119  * @sleep_ok: bool
3120  * @wait_for_it: bool
3121  *
3122  * Output the pipe error counts of each pipe to log file
3123  *
3124  * Return: int
3125  */
3126 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3127 			      bool sleep_ok, bool wait_for_it)
3128 {
3129 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3130 	A_target_id_t pci_addr = scn->mem;
3131 	static int max_delay;
3132 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3133 	static int debug;
3134 	if (scn->recovery)
3135 		return -EACCES;
3136 
3137 	if (qdf_atomic_read(&scn->link_suspended)) {
3138 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3139 		debug = true;
3140 		QDF_ASSERT(0);
3141 		return -EACCES;
3142 	}
3143 
3144 	if (debug) {
3145 		wait_for_it = true;
3146 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3147 				__func__);
3148 		QDF_ASSERT(0);
3149 	}
3150 
3151 	if (sleep_ok) {
3152 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3153 		hif_state->keep_awake_count--;
3154 		if (hif_state->keep_awake_count == 0) {
3155 			/* Allow sleep */
3156 			hif_state->verified_awake = false;
3157 			hif_state->sleep_ticks = qdf_system_ticks();
3158 		}
3159 		if (hif_state->fake_sleep == false) {
3160 			/* Set the Fake Sleep */
3161 			hif_state->fake_sleep = true;
3162 
3163 			/* Start the Sleep Timer */
3164 			qdf_timer_stop(&hif_state->sleep_timer);
3165 			qdf_timer_start(&hif_state->sleep_timer,
3166 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3167 		}
3168 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3169 	} else {
3170 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3171 
3172 		if (hif_state->fake_sleep) {
3173 			hif_state->verified_awake = true;
3174 		} else {
3175 			if (hif_state->keep_awake_count == 0) {
3176 				/* Force AWAKE */
3177 				hif_write32_mb(sc, pci_addr +
3178 					      PCIE_LOCAL_BASE_ADDRESS +
3179 					      PCIE_SOC_WAKE_ADDRESS,
3180 					      PCIE_SOC_WAKE_V_MASK);
3181 			}
3182 		}
3183 		hif_state->keep_awake_count++;
3184 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3185 
3186 		if (wait_for_it && !hif_state->verified_awake) {
3187 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3188 			int tot_delay = 0;
3189 			int curr_delay = 5;
3190 
3191 			for (;; ) {
3192 				if (hif_targ_is_awake(scn, pci_addr)) {
3193 					hif_state->verified_awake = true;
3194 					break;
3195 				}
3196 				if (!hif_pci_targ_is_present(scn, pci_addr))
3197 					break;
3198 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3199 					return hif_log_soc_wakeup_timeout(sc);
3200 
3201 				OS_DELAY(curr_delay);
3202 				tot_delay += curr_delay;
3203 
3204 				if (curr_delay < 50)
3205 					curr_delay += 5;
3206 			}
3207 
3208 			/*
3209 			 * NB: If Target has to come out of Deep Sleep,
3210 			 * this may take a few Msecs. Typically, though
3211 			 * this delay should be <30us.
3212 			 */
3213 			if (tot_delay > max_delay)
3214 				max_delay = tot_delay;
3215 		}
3216 	}
3217 
3218 	if (debug && hif_state->verified_awake) {
3219 		debug = 0;
3220 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3221 			__func__,
3222 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3223 				PCIE_INTR_ENABLE_ADDRESS),
3224 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3225 				PCIE_INTR_CAUSE_ADDRESS),
3226 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3227 				CPU_INTR_ADDRESS),
3228 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
3229 				PCIE_INTR_CLR_ADDRESS),
3230 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
3231 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3232 	}
3233 
3234 	return 0;
3235 }
3236 
3237 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3238 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3239 {
3240 	uint32_t value;
3241 	void *addr;
3242 
3243 	addr = scn->mem + offset;
3244 	value = hif_read32_mb(scn, addr);
3245 
3246 	{
3247 		unsigned long irq_flags;
3248 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3249 
3250 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3251 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3252 		pcie_access_log[idx].is_write = false;
3253 		pcie_access_log[idx].addr = addr;
3254 		pcie_access_log[idx].value = value;
3255 		pcie_access_log_seqnum++;
3256 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3257 	}
3258 
3259 	return value;
3260 }
3261 
3262 void
3263 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3264 {
3265 	void *addr;
3266 
3267 	addr = scn->mem + (offset);
3268 	hif_write32_mb(scn, addr, value);
3269 
3270 	{
3271 		unsigned long irq_flags;
3272 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3273 
3274 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3275 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3276 		pcie_access_log[idx].is_write = true;
3277 		pcie_access_log[idx].addr = addr;
3278 		pcie_access_log[idx].value = value;
3279 		pcie_access_log_seqnum++;
3280 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3281 	}
3282 }
3283 
3284 /**
3285  * hif_target_dump_access_log() - dump access log
3286  *
3287  * dump access log
3288  *
3289  * Return: n/a
3290  */
3291 void hif_target_dump_access_log(void)
3292 {
3293 	int idx, len, start_idx, cur_idx;
3294 	unsigned long irq_flags;
3295 
3296 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3297 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3298 		len = PCIE_ACCESS_LOG_NUM;
3299 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3300 	} else {
3301 		len = pcie_access_log_seqnum;
3302 		start_idx = 0;
3303 	}
3304 
3305 	for (idx = 0; idx < len; idx++) {
3306 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3307 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3308 		       __func__, idx,
3309 		       pcie_access_log[cur_idx].seqnum,
3310 		       pcie_access_log[cur_idx].is_write,
3311 		       pcie_access_log[cur_idx].addr,
3312 		       pcie_access_log[cur_idx].value);
3313 	}
3314 
3315 	pcie_access_log_seqnum = 0;
3316 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3317 }
3318 #endif
3319 
3320 #ifndef HIF_AHB
3321 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3322 {
3323 	QDF_BUG(0);
3324 	return -EINVAL;
3325 }
3326 
3327 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3328 {
3329 	QDF_BUG(0);
3330 	return -EINVAL;
3331 }
3332 #endif
3333 
3334 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3335 {
3336 	struct ce_tasklet_entry *tasklet_entry = context;
3337 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3338 }
3339 extern const char *ce_name[];
3340 
3341 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3342 {
3343 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3344 
3345 	return pci_scn->ce_msi_irq_num[ce_id];
3346 }
3347 
3348 /* hif_srng_msi_irq_disable() - disable the irq for msi
3349  * @hif_sc: hif context
3350  * @ce_id: which ce to disable copy complete interrupts for
3351  *
3352  * since MSI interrupts are not level based, the system can function
3353  * without disabling these interrupts.  Interrupt mitigation can be
3354  * added here for better system performance.
3355  */
3356 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3357 {
3358 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
3359 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3360 }
3361 
3362 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3363 {
3364 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
3365 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3366 }
3367 
3368 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3369 {
3370 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3371 }
3372 
3373 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3374 {
3375 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3376 }
3377 
3378 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3379 {
3380 	int ret;
3381 	int ce_id, irq;
3382 	uint32_t msi_data_start;
3383 	uint32_t msi_data_count;
3384 	uint32_t msi_irq_start;
3385 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3386 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3387 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
3388 
3389 	if (!scn->disable_wake_irq) {
3390 		/* do wake irq assignment */
3391 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3392 						  &msi_data_count,
3393 						  &msi_data_start,
3394 						  &msi_irq_start);
3395 		if (ret)
3396 			return ret;
3397 
3398 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
3399 						msi_irq_start);
3400 
3401 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
3402 				       hif_wake_interrupt_handler,
3403 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
3404 
3405 		if (ret)
3406 			return ret;
3407 	}
3408 
3409 	/* do ce irq assignments */
3410 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3411 					    &msi_data_count, &msi_data_start,
3412 					    &msi_irq_start);
3413 	if (ret)
3414 		goto free_wake_irq;
3415 
3416 	if (ce_srng_based(scn)) {
3417 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3418 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3419 	} else {
3420 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3421 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3422 	}
3423 
3424 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3425 
3426 	/* needs to match the ce_id -> irq data mapping
3427 	 * used in the srng parameter configuration
3428 	 */
3429 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3430 		unsigned int msi_data = (ce_id % msi_data_count) +
3431 			msi_irq_start;
3432 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3433 			continue;
3434 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3435 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3436 			 __func__, ce_id, msi_data, irq,
3437 			 &ce_sc->tasklets[ce_id]);
3438 
3439 		/* implies the ce is also initialized */
3440 		if (!ce_sc->tasklets[ce_id].inited)
3441 			continue;
3442 
3443 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3444 		ret = pfrm_request_irq(scn->qdf_dev->dev,
3445 				       irq, hif_ce_interrupt_handler,
3446 				       IRQF_SHARED,
3447 				       ce_name[ce_id],
3448 				       &ce_sc->tasklets[ce_id]);
3449 		if (ret)
3450 			goto free_irq;
3451 	}
3452 
3453 	return ret;
3454 
3455 free_irq:
3456 	/* the request_irq for the last ce_id failed so skip it. */
3457 	while (ce_id > 0 && ce_id < scn->ce_count) {
3458 		unsigned int msi_data;
3459 
3460 		ce_id--;
3461 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
3462 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3463 		pfrm_free_irq(scn->qdf_dev->dev,
3464 			      irq, &ce_sc->tasklets[ce_id]);
3465 	}
3466 
3467 free_wake_irq:
3468 	if (!scn->disable_wake_irq) {
3469 		pfrm_free_irq(scn->qdf_dev->dev,
3470 			      scn->wake_irq, scn->qdf_dev->dev);
3471 		scn->wake_irq = 0;
3472 	}
3473 
3474 	return ret;
3475 }
3476 
3477 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3478 {
3479 	int i;
3480 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3481 
3482 	for (i = 0; i < hif_ext_group->numirq; i++)
3483 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
3484 					hif_ext_group->os_irq[i]);
3485 }
3486 
3487 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3488 {
3489 	int i;
3490 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3491 
3492 	for (i = 0; i < hif_ext_group->numirq; i++)
3493 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
3494 }
3495 
3496 /**
3497  * hif_pci_get_irq_name() - get irqname
3498  * This function gives irqnumber to irqname
3499  * mapping.
3500  *
3501  * @irq_no: irq number
3502  *
3503  * Return: irq name
3504  */
3505 const char *hif_pci_get_irq_name(int irq_no)
3506 {
3507 	return "pci-dummy";
3508 }
3509 
3510 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3511 			      struct hif_exec_context *hif_ext_group)
3512 {
3513 	int ret = 0;
3514 	int irq = 0;
3515 	int j;
3516 
3517 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3518 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3519 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3520 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3521 
3522 	for (j = 0; j < hif_ext_group->numirq; j++) {
3523 		irq = hif_ext_group->irq[j];
3524 
3525 		hif_info("request_irq = %d for grp %d",
3526 			 irq, hif_ext_group->grp_id);
3527 		ret = pfrm_request_irq(
3528 				scn->qdf_dev->dev, irq,
3529 				hif_ext_group_interrupt_handler,
3530 				IRQF_SHARED | IRQF_NO_SUSPEND,
3531 				"wlan_EXT_GRP",
3532 				hif_ext_group);
3533 		if (ret) {
3534 			HIF_ERROR("%s: request_irq failed ret = %d",
3535 				  __func__, ret);
3536 			return -EFAULT;
3537 		}
3538 		hif_ext_group->os_irq[j] = irq;
3539 	}
3540 	hif_ext_group->irq_requested = true;
3541 	return 0;
3542 }
3543 
3544 /**
3545  * hif_configure_irq() - configure interrupt
3546  *
3547  * This function configures interrupt(s)
3548  *
3549  * @sc: PCIe control struct
3550  * @hif_hdl: struct HIF_CE_state
3551  *
3552  * Return: 0 - for success
3553  */
3554 int hif_configure_irq(struct hif_softc *scn)
3555 {
3556 	int ret = 0;
3557 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3558 
3559 	HIF_TRACE("%s: E", __func__);
3560 
3561 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3562 		scn->request_irq_done = false;
3563 		return 0;
3564 	}
3565 
3566 	hif_init_reschedule_tasklet_work(sc);
3567 
3568 	ret = hif_ce_msi_configure_irq(scn);
3569 	if (ret == 0) {
3570 		goto end;
3571 	}
3572 
3573 	switch (scn->target_info.target_type) {
3574 	case TARGET_TYPE_IPQ4019:
3575 		ret = hif_ahb_configure_legacy_irq(sc);
3576 		break;
3577 	case TARGET_TYPE_QCA8074:
3578 	case TARGET_TYPE_QCA8074V2:
3579 	case TARGET_TYPE_QCA6018:
3580 		ret = hif_ahb_configure_irq(sc);
3581 		break;
3582 	default:
3583 		ret = hif_pci_configure_legacy_irq(sc);
3584 		break;
3585 	}
3586 	if (ret < 0) {
3587 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3588 			__func__, ret);
3589 		return ret;
3590 	}
3591 end:
3592 	scn->request_irq_done = true;
3593 	return 0;
3594 }
3595 
3596 /**
3597  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3598  * @scn: hif control structure
3599  *
3600  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3601  * stuck at a polling loop in pcie_address_config in FW
3602  *
3603  * Return: none
3604  */
3605 static void hif_trigger_timer_irq(struct hif_softc *scn)
3606 {
3607 	int tmp;
3608 	/* Trigger IRQ on Peregrine/Swift by setting
3609 	 * IRQ Bit of LF_TIMER 0
3610 	 */
3611 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3612 						SOC_LF_TIMER_STATUS0_ADDRESS));
3613 	/* Set Raw IRQ Bit */
3614 	tmp |= 1;
3615 	/* SOC_LF_TIMER_STATUS0 */
3616 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3617 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3618 }
3619 
3620 /**
3621  * hif_target_sync() : ensure the target is ready
3622  * @scn: hif control structure
3623  *
3624  * Informs fw that we plan to use legacy interupts so that
3625  * it can begin booting. Ensures that the fw finishes booting
3626  * before continuing. Should be called before trying to write
3627  * to the targets other registers for the first time.
3628  *
3629  * Return: none
3630  */
3631 static void hif_target_sync(struct hif_softc *scn)
3632 {
3633 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3634 			    PCIE_INTR_ENABLE_ADDRESS),
3635 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3636 	/* read to flush pcie write */
3637 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3638 			PCIE_INTR_ENABLE_ADDRESS));
3639 
3640 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3641 			PCIE_SOC_WAKE_ADDRESS,
3642 			PCIE_SOC_WAKE_V_MASK);
3643 	while (!hif_targ_is_awake(scn, scn->mem))
3644 		;
3645 
3646 	if (HAS_FW_INDICATOR) {
3647 		int wait_limit = 500;
3648 		int fw_ind = 0;
3649 		int retry_count = 0;
3650 		uint32_t target_type = scn->target_info.target_type;
3651 fw_retry:
3652 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3653 		while (1) {
3654 			fw_ind = hif_read32_mb(scn, scn->mem +
3655 					FW_INDICATOR_ADDRESS);
3656 			if (fw_ind & FW_IND_INITIALIZED)
3657 				break;
3658 			if (wait_limit-- < 0)
3659 				break;
3660 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3661 			    PCIE_INTR_ENABLE_ADDRESS),
3662 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3663 			    /* read to flush pcie write */
3664 			(void)hif_read32_mb(scn, scn->mem +
3665 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3666 
3667 			qdf_mdelay(10);
3668 		}
3669 		if (wait_limit < 0) {
3670 			if (target_type == TARGET_TYPE_AR9888 &&
3671 			    retry_count++ < 2) {
3672 				hif_trigger_timer_irq(scn);
3673 				wait_limit = 500;
3674 				goto fw_retry;
3675 			}
3676 			HIF_TRACE("%s: FW signal timed out",
3677 					__func__);
3678 			qdf_assert_always(0);
3679 		} else {
3680 			HIF_TRACE("%s: Got FW signal, retries = %x",
3681 					__func__, 500-wait_limit);
3682 		}
3683 	}
3684 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3685 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3686 }
3687 
3688 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3689 				     struct device *dev)
3690 {
3691 	struct pld_soc_info info;
3692 
3693 	pld_get_soc_info(dev, &info);
3694 	sc->mem = info.v_addr;
3695 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3696 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3697 }
3698 
3699 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3700 				       struct device *dev)
3701 {}
3702 
3703 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3704 				    int device_id)
3705 {
3706 	if (!pld_have_platform_driver_support(sc->dev))
3707 		return false;
3708 
3709 	switch (device_id) {
3710 	case QCA6290_DEVICE_ID:
3711 	case QCN9000_DEVICE_ID:
3712 	case QCA6290_EMULATION_DEVICE_ID:
3713 	case QCA6390_DEVICE_ID:
3714 	case QCA6490_DEVICE_ID:
3715 	case AR6320_DEVICE_ID:
3716 	case QCN7605_DEVICE_ID:
3717 		return true;
3718 	}
3719 	return false;
3720 }
3721 
3722 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3723 					   int device_id)
3724 {
3725 	if (hif_is_pld_based_target(sc, device_id)) {
3726 		sc->hif_enable_pci = hif_enable_pci_pld;
3727 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3728 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3729 	} else {
3730 		sc->hif_enable_pci = hif_enable_pci_nopld;
3731 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3732 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3733 	}
3734 }
3735 
3736 #ifdef HIF_REG_WINDOW_SUPPORT
3737 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3738 					       u32 target_type)
3739 {
3740 	switch (target_type) {
3741 	case TARGET_TYPE_QCN7605:
3742 		sc->use_register_windowing = true;
3743 		qdf_spinlock_create(&sc->register_access_lock);
3744 		sc->register_window = 0;
3745 		break;
3746 	default:
3747 		sc->use_register_windowing = false;
3748 	}
3749 }
3750 #else
3751 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3752 					       u32 target_type)
3753 {
3754 	sc->use_register_windowing = false;
3755 }
3756 #endif
3757 
3758 /**
3759  * hif_enable_bus(): enable bus
3760  *
3761  * This function enables the bus
3762  *
3763  * @ol_sc: soft_sc struct
3764  * @dev: device pointer
3765  * @bdev: bus dev pointer
3766  * bid: bus id pointer
3767  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3768  * Return: QDF_STATUS
3769  */
3770 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3771 			  struct device *dev, void *bdev,
3772 			  const struct hif_bus_id *bid,
3773 			  enum hif_enable_type type)
3774 {
3775 	int ret = 0;
3776 	uint32_t hif_type, target_type;
3777 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3778 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3779 	uint16_t revision_id = 0;
3780 	int probe_again = 0;
3781 	struct pci_dev *pdev = bdev;
3782 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3783 	struct hif_target_info *tgt_info;
3784 
3785 	if (!ol_sc) {
3786 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3787 		return QDF_STATUS_E_NOMEM;
3788 	}
3789 
3790 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3791 		  __func__, hif_get_conparam(ol_sc), id->device);
3792 
3793 	sc->pdev = pdev;
3794 	sc->dev = &pdev->dev;
3795 	sc->devid = id->device;
3796 	sc->cacheline_sz = dma_get_cache_alignment();
3797 	tgt_info = hif_get_target_info_handle(hif_hdl);
3798 	hif_pci_init_deinit_ops_attach(sc, id->device);
3799 	sc->hif_pci_get_soc_info(sc, dev);
3800 again:
3801 	ret = sc->hif_enable_pci(sc, pdev, id);
3802 	if (ret < 0) {
3803 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3804 		       __func__, ret);
3805 		goto err_enable_pci;
3806 	}
3807 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3808 
3809 	/* Temporary FIX: disable ASPM on peregrine.
3810 	 * Will be removed after the OTP is programmed
3811 	 */
3812 	hif_disable_power_gating(hif_hdl);
3813 
3814 	device_disable_async_suspend(&pdev->dev);
3815 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3816 
3817 	ret = hif_get_device_type(id->device, revision_id,
3818 						&hif_type, &target_type);
3819 	if (ret < 0) {
3820 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3821 		goto err_tgtstate;
3822 	}
3823 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3824 		  __func__, hif_type, target_type);
3825 
3826 	hif_register_tbl_attach(ol_sc, hif_type);
3827 	hif_target_register_tbl_attach(ol_sc, target_type);
3828 
3829 	hif_pci_init_reg_windowing_support(sc, target_type);
3830 
3831 	tgt_info->target_type = target_type;
3832 
3833 	if (ce_srng_based(ol_sc)) {
3834 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3835 	} else {
3836 		ret = hif_pci_probe_tgt_wakeup(sc);
3837 		if (ret < 0) {
3838 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3839 					__func__, ret);
3840 			if (ret == -EAGAIN)
3841 				probe_again++;
3842 			goto err_tgtstate;
3843 		}
3844 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3845 	}
3846 
3847 	if (!ol_sc->mem_pa) {
3848 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3849 		ret = -EIO;
3850 		goto err_tgtstate;
3851 	}
3852 
3853 	if (!ce_srng_based(ol_sc)) {
3854 		hif_target_sync(ol_sc);
3855 
3856 		if (ADRASTEA_BU)
3857 			hif_vote_link_up(hif_hdl);
3858 	}
3859 
3860 	return 0;
3861 
3862 err_tgtstate:
3863 	hif_disable_pci(sc);
3864 	sc->pci_enabled = false;
3865 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3866 	return QDF_STATUS_E_ABORTED;
3867 
3868 err_enable_pci:
3869 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3870 		int delay_time;
3871 
3872 		HIF_INFO("%s: pci reprobe", __func__);
3873 		/* 10, 40, 90, 100, 100, ... */
3874 		delay_time = max(100, 10 * (probe_again * probe_again));
3875 		qdf_mdelay(delay_time);
3876 		goto again;
3877 	}
3878 	return ret;
3879 }
3880 
3881 /**
3882  * hif_pci_irq_enable() - ce_irq_enable
3883  * @scn: hif_softc
3884  * @ce_id: ce_id
3885  *
3886  * Return: void
3887  */
3888 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3889 {
3890 	uint32_t tmp = 1 << ce_id;
3891 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3892 
3893 	qdf_spin_lock_irqsave(&sc->irq_lock);
3894 	scn->ce_irq_summary &= ~tmp;
3895 	if (scn->ce_irq_summary == 0) {
3896 		/* Enable Legacy PCI line interrupts */
3897 		if (LEGACY_INTERRUPTS(sc) &&
3898 			(scn->target_status != TARGET_STATUS_RESET) &&
3899 			(!qdf_atomic_read(&scn->link_suspended))) {
3900 
3901 			hif_write32_mb(scn, scn->mem +
3902 				(SOC_CORE_BASE_ADDRESS |
3903 				PCIE_INTR_ENABLE_ADDRESS),
3904 				HOST_GROUP0_MASK);
3905 
3906 			hif_read32_mb(scn, scn->mem +
3907 					(SOC_CORE_BASE_ADDRESS |
3908 					PCIE_INTR_ENABLE_ADDRESS));
3909 		}
3910 	}
3911 	if (scn->hif_init_done == true)
3912 		Q_TARGET_ACCESS_END(scn);
3913 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3914 
3915 	/* check for missed firmware crash */
3916 	hif_fw_interrupt_handler(0, scn);
3917 }
3918 
3919 /**
3920  * hif_pci_irq_disable() - ce_irq_disable
3921  * @scn: hif_softc
3922  * @ce_id: ce_id
3923  *
3924  * only applicable to legacy copy engine...
3925  *
3926  * Return: void
3927  */
3928 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3929 {
3930 	/* For Rome only need to wake up target */
3931 	/* target access is maintained until interrupts are re-enabled */
3932 	Q_TARGET_ACCESS_BEGIN(scn);
3933 }
3934 
3935 #ifdef FEATURE_RUNTIME_PM
3936 /**
3937  * hif_pm_runtime_get_sync() - do a get operation with sync resume
3938  *
3939  * A get operation will prevent a runtime suspend until a corresponding
3940  * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
3941  * resume instead of requesting a resume if it is runtime PM suspended
3942  * so it can only be called in non-atomic context.
3943  *
3944  * @hif_ctx: pointer of HIF context
3945  *
3946  * Return: 0 if it is runtime PM resumed otherwise an error code.
3947  */
3948 int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
3949 {
3950 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3951 	int pm_state;
3952 	int ret;
3953 
3954 	if (!sc)
3955 		return -EINVAL;
3956 
3957 	if (!pm_runtime_enabled(sc->dev))
3958 		return 0;
3959 
3960 	pm_state = qdf_atomic_read(&sc->pm_state);
3961 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
3962 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
3963 		hif_info_high("Runtime PM resume is requested by %ps",
3964 			      (void *)_RET_IP_);
3965 
3966 	sc->pm_stats.runtime_get++;
3967 	ret = pm_runtime_get_sync(sc->dev);
3968 
3969 	/* Get can return 1 if the device is already active, just return
3970 	 * success in that case.
3971 	 */
3972 	if (ret > 0)
3973 		ret = 0;
3974 
3975 	if (ret) {
3976 		sc->pm_stats.runtime_get_err++;
3977 		hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
3978 			qdf_atomic_read(&sc->pm_state), ret);
3979 		hif_pm_runtime_put(hif_ctx);
3980 	}
3981 
3982 	return ret;
3983 }
3984 
3985 /**
3986  * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
3987  *
3988  * This API will do a runtime put operation followed by a sync suspend if usage
3989  * count is 0 so it can only be called in non-atomic context.
3990  *
3991  * @hif_ctx: pointer of HIF context
3992  *
3993  * Return: 0 for success otherwise an error code
3994  */
3995 int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
3996 {
3997 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3998 	int usage_count, pm_state;
3999 	char *err = NULL;
4000 
4001 	if (!sc)
4002 		return -EINVAL;
4003 
4004 	if (!pm_runtime_enabled(sc->dev))
4005 		return 0;
4006 
4007 	usage_count = atomic_read(&sc->dev->power.usage_count);
4008 	if (usage_count == 1) {
4009 		pm_state = qdf_atomic_read(&sc->pm_state);
4010 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4011 			err = "Ignore unexpected Put as runtime PM is disabled";
4012 	} else if (usage_count == 0) {
4013 		err = "Put without a Get Operation";
4014 	}
4015 
4016 	if (err) {
4017 		hif_pci_runtime_pm_warn(sc, err);
4018 		return -EINVAL;
4019 	}
4020 
4021 	sc->pm_stats.runtime_put++;
4022 	return pm_runtime_put_sync_suspend(sc->dev);
4023 }
4024 
4025 int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
4026 {
4027 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4028 	int pm_state;
4029 
4030 	if (!sc)
4031 		return -EINVAL;
4032 
4033 	if (!pm_runtime_enabled(sc->dev))
4034 		return 0;
4035 
4036 	pm_state = qdf_atomic_read(&sc->pm_state);
4037 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
4038 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
4039 		HIF_INFO("Runtime PM resume is requested by %ps",
4040 			 (void *)_RET_IP_);
4041 
4042 	sc->pm_stats.request_resume++;
4043 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4044 
4045 	return hif_pm_request_resume(sc->dev);
4046 }
4047 
4048 void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
4049 {
4050 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4051 
4052 	if (!sc)
4053 		return;
4054 
4055 	sc->pm_stats.last_busy_marker = (void *)_RET_IP_;
4056 	sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
4057 
4058 	return pm_runtime_mark_last_busy(sc->dev);
4059 }
4060 
4061 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
4062 {
4063 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4064 
4065 	if (!sc)
4066 		return;
4067 
4068 	if (!pm_runtime_enabled(sc->dev))
4069 		return;
4070 
4071 	sc->pm_stats.runtime_get++;
4072 	pm_runtime_get_noresume(sc->dev);
4073 }
4074 
4075 /**
4076  * hif_pm_runtime_get() - do a get opperation on the device
4077  *
4078  * A get opperation will prevent a runtime suspend until a
4079  * corresponding put is done.  This api should be used when sending
4080  * data.
4081  *
4082  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
4083  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
4084  *
4085  * return: success if the bus is up and a get has been issued
4086  *   otherwise an error code.
4087  */
4088 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
4089 {
4090 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4091 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4092 	int ret;
4093 	int pm_state;
4094 
4095 	if (!scn) {
4096 		hif_err("Could not do runtime get, scn is null");
4097 		return -EFAULT;
4098 	}
4099 
4100 	if (!pm_runtime_enabled(sc->dev))
4101 		return 0;
4102 
4103 	pm_state = qdf_atomic_read(&sc->pm_state);
4104 
4105 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
4106 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
4107 		sc->pm_stats.runtime_get++;
4108 		ret = __hif_pm_runtime_get(sc->dev);
4109 
4110 		/* Get can return 1 if the device is already active, just return
4111 		 * success in that case
4112 		 */
4113 		if (ret > 0)
4114 			ret = 0;
4115 
4116 		if (ret)
4117 			hif_pm_runtime_put(hif_ctx);
4118 
4119 		if (ret && ret != -EINPROGRESS) {
4120 			sc->pm_stats.runtime_get_err++;
4121 			hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
4122 				qdf_atomic_read(&sc->pm_state), ret);
4123 		}
4124 
4125 		return ret;
4126 	}
4127 
4128 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
4129 	    pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
4130 		hif_info_high("Runtime PM resume is requested by %ps",
4131 			      (void *)_RET_IP_);
4132 		ret = -EAGAIN;
4133 	} else {
4134 		ret = -EBUSY;
4135 	}
4136 
4137 	sc->pm_stats.request_resume++;
4138 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4139 	hif_pm_request_resume(sc->dev);
4140 
4141 	return ret;
4142 }
4143 
4144 /**
4145  * hif_pm_runtime_put() - do a put operation on the device
4146  *
4147  * A put operation will allow a runtime suspend after a corresponding
4148  * get was done.  This api should be used when sending data.
4149  *
4150  * This api will return a failure if runtime pm is stopped
4151  * This api will return failure if it would decrement the usage count below 0.
4152  *
4153  * return: QDF_STATUS_SUCCESS if the put is performed
4154  */
4155 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
4156 {
4157 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4158 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4159 	int pm_state, usage_count;
4160 	char *error = NULL;
4161 
4162 	if (!scn) {
4163 		HIF_ERROR("%s: Could not do runtime put, scn is null",
4164 				__func__);
4165 		return -EFAULT;
4166 	}
4167 
4168 	if (!pm_runtime_enabled(sc->dev))
4169 		return 0;
4170 
4171 	usage_count = atomic_read(&sc->dev->power.usage_count);
4172 
4173 	if (usage_count == 1) {
4174 		pm_state = qdf_atomic_read(&sc->pm_state);
4175 
4176 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4177 			error = "Ignoring unexpected put when runtime pm is disabled";
4178 
4179 	} else if (usage_count == 0) {
4180 		error = "PUT Without a Get Operation";
4181 	}
4182 
4183 	if (error) {
4184 		hif_pci_runtime_pm_warn(sc, error);
4185 		return -EINVAL;
4186 	}
4187 
4188 	sc->pm_stats.runtime_put++;
4189 
4190 	hif_pm_runtime_mark_last_busy(hif_ctx);
4191 	hif_pm_runtime_put_auto(sc->dev);
4192 
4193 	return 0;
4194 }
4195 
4196 /**
4197  * hif_pm_runtime_put_noidle() - do a put operation with no idle
4198  *
4199  * This API will do a runtime put no idle operation
4200  *
4201  * @hif_ctx: pointer of HIF context
4202  *
4203  * Return: 0 for success otherwise an error code
4204  */
4205 int hif_pm_runtime_put_noidle(struct hif_opaque_softc *hif_ctx)
4206 {
4207 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4208 	int usage_count, pm_state;
4209 	char *err = NULL;
4210 
4211 	if (!sc)
4212 		return -EINVAL;
4213 
4214 	if (!pm_runtime_enabled(sc->dev))
4215 		return 0;
4216 
4217 	usage_count = atomic_read(&sc->dev->power.usage_count);
4218 	if (usage_count == 1) {
4219 		pm_state = qdf_atomic_read(&sc->pm_state);
4220 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4221 			err = "Ignore unexpected Put as runtime PM is disabled";
4222 	} else if (usage_count == 0) {
4223 		err = "Put without a Get Operation";
4224 	}
4225 
4226 	if (err) {
4227 		hif_pci_runtime_pm_warn(sc, err);
4228 		return -EINVAL;
4229 	}
4230 
4231 	sc->pm_stats.runtime_put++;
4232 	pm_runtime_put_noidle(sc->dev);
4233 
4234 	return 0;
4235 }
4236 
4237 /**
4238  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
4239  *                                      reason
4240  * @hif_sc: pci context
4241  * @lock: runtime_pm lock being acquired
4242  *
4243  * Return 0 if successful.
4244  */
4245 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4246 		*hif_sc, struct hif_pm_runtime_lock *lock)
4247 {
4248 	int ret = 0;
4249 
4250 	/*
4251 	 * We shouldn't be setting context->timeout to zero here when
4252 	 * context is active as we will have a case where Timeout API's
4253 	 * for the same context called back to back.
4254 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4255 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4256 	 * API to ensure the timeout version is no more active and
4257 	 * list entry of this context will be deleted during allow suspend.
4258 	 */
4259 	if (lock->active)
4260 		return 0;
4261 
4262 	ret = __hif_pm_runtime_get(hif_sc->dev);
4263 
4264 	/**
4265 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4266 	 * RPM_SUSPENDING. Any other negative value is an error.
4267 	 * We shouldn't be do runtime_put here as in later point allow
4268 	 * suspend gets called with the the context and there the usage count
4269 	 * is decremented, so suspend will be prevented.
4270 	 */
4271 
4272 	if (ret < 0 && ret != -EINPROGRESS) {
4273 		hif_sc->pm_stats.runtime_get_err++;
4274 		hif_pci_runtime_pm_warn(hif_sc,
4275 				"Prevent Suspend Runtime PM Error");
4276 	}
4277 
4278 	hif_sc->prevent_suspend_cnt++;
4279 
4280 	lock->active = true;
4281 
4282 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4283 
4284 	hif_sc->pm_stats.prevent_suspend++;
4285 
4286 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4287 		hif_pm_runtime_state_to_string(
4288 			qdf_atomic_read(&hif_sc->pm_state)),
4289 					ret);
4290 
4291 	return ret;
4292 }
4293 
4294 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4295 		struct hif_pm_runtime_lock *lock)
4296 {
4297 	struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc);
4298 	int ret = 0;
4299 	int usage_count;
4300 
4301 	if (hif_sc->prevent_suspend_cnt == 0)
4302 		return ret;
4303 
4304 	if (!lock->active)
4305 		return ret;
4306 
4307 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4308 
4309 	/*
4310 	 * During Driver unload, platform driver increments the usage
4311 	 * count to prevent any runtime suspend getting called.
4312 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4313 	 * usage_count should be one. Ideally this shouldn't happen as
4314 	 * context->active should be active for allow suspend to happen
4315 	 * Handling this case here to prevent any failures.
4316 	 */
4317 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4318 				&& usage_count == 1) || usage_count == 0) {
4319 		hif_pci_runtime_pm_warn(hif_sc,
4320 				"Allow without a prevent suspend");
4321 		return -EINVAL;
4322 	}
4323 
4324 	list_del(&lock->list);
4325 
4326 	hif_sc->prevent_suspend_cnt--;
4327 
4328 	lock->active = false;
4329 	lock->timeout = 0;
4330 
4331 	hif_pm_runtime_mark_last_busy(hif_ctx);
4332 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4333 
4334 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4335 		hif_pm_runtime_state_to_string(
4336 			qdf_atomic_read(&hif_sc->pm_state)),
4337 					ret);
4338 
4339 	hif_sc->pm_stats.allow_suspend++;
4340 	return ret;
4341 }
4342 
4343 /**
4344  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4345  * @data: calback data that is the pci context
4346  *
4347  * if runtime locks are acquired with a timeout, this function releases
4348  * the locks when the last runtime lock expires.
4349  *
4350  * dummy implementation until lock acquisition is implemented.
4351  */
4352 static void hif_pm_runtime_lock_timeout_fn(void *data)
4353 {
4354 	struct hif_pci_softc *hif_sc = data;
4355 	unsigned long timer_expires;
4356 	struct hif_pm_runtime_lock *context, *temp;
4357 
4358 	spin_lock_bh(&hif_sc->runtime_lock);
4359 
4360 	timer_expires = hif_sc->runtime_timer_expires;
4361 
4362 	/* Make sure we are not called too early, this should take care of
4363 	 * following case
4364 	 *
4365 	 * CPU0                         CPU1 (timeout function)
4366 	 * ----                         ----------------------
4367 	 * spin_lock_irq
4368 	 *                              timeout function called
4369 	 *
4370 	 * mod_timer()
4371 	 *
4372 	 * spin_unlock_irq
4373 	 *                              spin_lock_irq
4374 	 */
4375 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4376 		hif_sc->runtime_timer_expires = 0;
4377 		list_for_each_entry_safe(context, temp,
4378 				&hif_sc->prevent_suspend_list, list) {
4379 			if (context->timeout) {
4380 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4381 				hif_sc->pm_stats.allow_suspend_timeout++;
4382 			}
4383 		}
4384 	}
4385 
4386 	spin_unlock_bh(&hif_sc->runtime_lock);
4387 }
4388 
4389 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4390 		struct hif_pm_runtime_lock *data)
4391 {
4392 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4393 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4394 	struct hif_pm_runtime_lock *context = data;
4395 
4396 	if (!sc->hif_config.enable_runtime_pm)
4397 		return 0;
4398 
4399 	if (!context)
4400 		return -EINVAL;
4401 
4402 	if (in_irq())
4403 		WARN_ON(1);
4404 
4405 	spin_lock_bh(&hif_sc->runtime_lock);
4406 	context->timeout = 0;
4407 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4408 	spin_unlock_bh(&hif_sc->runtime_lock);
4409 
4410 	return 0;
4411 }
4412 
4413 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4414 				struct hif_pm_runtime_lock *data)
4415 {
4416 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4417 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4418 	struct hif_pm_runtime_lock *context = data;
4419 
4420 	if (!sc->hif_config.enable_runtime_pm)
4421 		return 0;
4422 
4423 	if (!context)
4424 		return -EINVAL;
4425 
4426 	if (in_irq())
4427 		WARN_ON(1);
4428 
4429 	spin_lock_bh(&hif_sc->runtime_lock);
4430 
4431 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4432 
4433 	/* The list can be empty as well in cases where
4434 	 * we have one context in the list and the allow
4435 	 * suspend came before the timer expires and we delete
4436 	 * context above from the list.
4437 	 * When list is empty prevent_suspend count will be zero.
4438 	 */
4439 	if (hif_sc->prevent_suspend_cnt == 0 &&
4440 			hif_sc->runtime_timer_expires > 0) {
4441 		qdf_timer_free(&hif_sc->runtime_timer);
4442 		hif_sc->runtime_timer_expires = 0;
4443 	}
4444 
4445 	spin_unlock_bh(&hif_sc->runtime_lock);
4446 
4447 	return 0;
4448 }
4449 
4450 /**
4451  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4452  * @ol_sc: HIF context
4453  * @lock: which lock is being acquired
4454  * @delay: Timeout in milliseconds
4455  *
4456  * Prevent runtime suspend with a timeout after which runtime suspend would be
4457  * allowed. This API uses a single timer to allow the suspend and timer is
4458  * modified if the timeout is changed before timer fires.
4459  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4460  * of starting the timer.
4461  *
4462  * It is wise to try not to use this API and correct the design if possible.
4463  *
4464  * Return: 0 on success and negative error code on failure
4465  */
4466 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4467 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4468 {
4469 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4470 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4471 
4472 	int ret = 0;
4473 	unsigned long expires;
4474 	struct hif_pm_runtime_lock *context = lock;
4475 
4476 	if (hif_is_load_or_unload_in_progress(sc)) {
4477 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4478 				__func__);
4479 		return -EINVAL;
4480 	}
4481 
4482 	if (hif_is_recovery_in_progress(sc)) {
4483 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4484 		return -EINVAL;
4485 	}
4486 
4487 	if (!sc->hif_config.enable_runtime_pm)
4488 		return 0;
4489 
4490 	if (!context)
4491 		return -EINVAL;
4492 
4493 	if (in_irq())
4494 		WARN_ON(1);
4495 
4496 	/*
4497 	 * Don't use internal timer if the timeout is less than auto suspend
4498 	 * delay.
4499 	 */
4500 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4501 		hif_pm_request_resume(hif_sc->dev);
4502 		hif_pm_runtime_mark_last_busy(ol_sc);
4503 		return ret;
4504 	}
4505 
4506 	expires = jiffies + msecs_to_jiffies(delay);
4507 	expires += !expires;
4508 
4509 	spin_lock_bh(&hif_sc->runtime_lock);
4510 
4511 	context->timeout = delay;
4512 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4513 	hif_sc->pm_stats.prevent_suspend_timeout++;
4514 
4515 	/* Modify the timer only if new timeout is after already configured
4516 	 * timeout
4517 	 */
4518 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4519 		qdf_timer_mod(&hif_sc->runtime_timer, delay);
4520 		hif_sc->runtime_timer_expires = expires;
4521 	}
4522 
4523 	spin_unlock_bh(&hif_sc->runtime_lock);
4524 
4525 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4526 		hif_pm_runtime_state_to_string(
4527 			qdf_atomic_read(&hif_sc->pm_state)),
4528 					delay, ret);
4529 
4530 	return ret;
4531 }
4532 
4533 /**
4534  * hif_runtime_lock_init() - API to initialize Runtime PM context
4535  * @name: Context name
4536  *
4537  * This API initializes the Runtime PM context of the caller and
4538  * return the pointer.
4539  *
4540  * Return: None
4541  */
4542 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4543 {
4544 	struct hif_pm_runtime_lock *context;
4545 
4546 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4547 
4548 	context = qdf_mem_malloc(sizeof(*context));
4549 	if (!context)
4550 		return -ENOMEM;
4551 
4552 	context->name = name ? name : "Default";
4553 	lock->lock = context;
4554 
4555 	return 0;
4556 }
4557 
4558 /**
4559  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4560  * @data: Runtime PM context
4561  *
4562  * Return: void
4563  */
4564 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4565 			     struct hif_pm_runtime_lock *data)
4566 {
4567 	struct hif_pm_runtime_lock *context = data;
4568 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4569 
4570 	if (!context) {
4571 		HIF_ERROR("Runtime PM wakelock context is NULL");
4572 		return;
4573 	}
4574 
4575 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4576 
4577 	/*
4578 	 * Ensure to delete the context list entry and reduce the usage count
4579 	 * before freeing the context if context is active.
4580 	 */
4581 	if (sc) {
4582 		spin_lock_bh(&sc->runtime_lock);
4583 		__hif_pm_runtime_allow_suspend(sc, context);
4584 		spin_unlock_bh(&sc->runtime_lock);
4585 	}
4586 
4587 	qdf_mem_free(context);
4588 }
4589 
4590 /**
4591  * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
4592  * @hif_ctx: HIF context
4593  *
4594  * Return: true for runtime suspended, otherwise false
4595  */
4596 bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
4597 {
4598 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4599 
4600 	return qdf_atomic_read(&sc->pm_state) ==
4601 		HIF_PM_RUNTIME_STATE_SUSPENDED;
4602 }
4603 
4604 /**
4605  * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
4606  * @hif_ctx: HIF context
4607  *
4608  * monitor_wake_intr variable can be used to indicate if driver expects wake
4609  * MSI for runtime PM
4610  *
4611  * Return: monitor_wake_intr variable
4612  */
4613 int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
4614 {
4615 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4616 
4617 	return qdf_atomic_read(&sc->monitor_wake_intr);
4618 }
4619 
4620 /**
4621  * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
4622  * @hif_ctx: HIF context
4623  * @val: value to set
4624  *
4625  * monitor_wake_intr variable can be used to indicate if driver expects wake
4626  * MSI for runtime PM
4627  *
4628  * Return: void
4629  */
4630 void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
4631 					  int val)
4632 {
4633 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4634 
4635 	qdf_atomic_set(&sc->monitor_wake_intr, val);
4636 }
4637 
4638 /**
4639  * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
4640  * @hif_ctx: HIF context
4641  *
4642  * Return: void
4643  */
4644 void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
4645 {
4646 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4647 
4648 	if (!sc)
4649 		return;
4650 
4651 	qdf_atomic_set(&sc->pm_dp_rx_busy, 1);
4652 	sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
4653 
4654 	hif_pm_runtime_mark_last_busy(hif_ctx);
4655 }
4656 
4657 /**
4658  * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
4659  * @hif_ctx: HIF context
4660  *
4661  * Return: dp rx busy set value
4662  */
4663 int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
4664 {
4665 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4666 
4667 	if (!sc)
4668 		return 0;
4669 
4670 	return qdf_atomic_read(&sc->pm_dp_rx_busy);
4671 }
4672 
4673 /**
4674  * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
4675  * @hif_ctx: HIF context
4676  *
4677  * Return: timestamp of last mark busy by dp rx
4678  */
4679 qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
4680 {
4681 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4682 
4683 	if (!sc)
4684 		return 0;
4685 
4686 	return sc->dp_last_busy_timestamp;
4687 }
4688 
4689 #endif /* FEATURE_RUNTIME_PM */
4690 
4691 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4692 {
4693 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4694 
4695 	/* legacy case only has one irq */
4696 	return pci_scn->irq;
4697 }
4698 
4699 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4700 {
4701 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4702 	struct hif_target_info *tgt_info;
4703 
4704 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4705 
4706 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4707 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
4708 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
4709 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4710 		/*
4711 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4712 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4713 		 * well initialized/defined.
4714 		 */
4715 		return 0;
4716 	}
4717 
4718 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4719 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4720 		return 0;
4721 	}
4722 
4723 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
4724 		  offset, (uint32_t)(offset + sizeof(unsigned int)),
4725 		  sc->mem_len);
4726 
4727 	return -EINVAL;
4728 }
4729 
4730 /**
4731  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4732  * @scn: hif context
4733  *
4734  * Return: true if soc needs driver bmi otherwise false
4735  */
4736 bool hif_pci_needs_bmi(struct hif_softc *scn)
4737 {
4738 	return !ce_srng_based(scn);
4739 }
4740 
4741 #ifdef FORCE_WAKE
4742 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4743 {
4744 	uint32_t timeout = 0, value;
4745 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4746 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4747 
4748 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
4749 		hif_err("force wake request send failed");
4750 		return -EINVAL;
4751 	}
4752 
4753 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4754 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
4755 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
4756 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
4757 		timeout += FORCE_WAKE_DELAY_MS;
4758 	}
4759 
4760 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
4761 		hif_err("Unable to wake up mhi");
4762 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4763 		return -EINVAL;
4764 	}
4765 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4766 	hif_write32_mb(scn,
4767 		       scn->mem +
4768 		       PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG,
4769 		       0);
4770 	hif_write32_mb(scn,
4771 		       scn->mem +
4772 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
4773 		       1);
4774 
4775 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
4776 	/*
4777 	 * do not reset the timeout
4778 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
4779 	 */
4780 	do {
4781 		value =
4782 		hif_read32_mb(scn,
4783 			      scn->mem +
4784 			      PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
4785 		if (value)
4786 			break;
4787 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
4788 		timeout += FORCE_WAKE_DELAY_MS;
4789 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
4790 
4791 	if (!value) {
4792 		hif_err("failed handshake mechanism");
4793 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
4794 		return -ETIMEDOUT;
4795 	}
4796 
4797 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
4798 
4799 	return 0;
4800 }
4801 
4802 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4803 {
4804 	int ret;
4805 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4806 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4807 
4808 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4809 	if (ret) {
4810 		hif_err("force wake release failure");
4811 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4812 		return ret;
4813 	}
4814 
4815 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4816 	hif_write32_mb(scn,
4817 		       scn->mem +
4818 		       PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
4819 		       0);
4820 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
4821 	return 0;
4822 }
4823 
4824 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
4825 {
4826 	hif_debug("mhi_force_wake_request_vote: %d",
4827 		  pci_handle->stats.mhi_force_wake_request_vote);
4828 	hif_debug("mhi_force_wake_failure: %d",
4829 		  pci_handle->stats.mhi_force_wake_failure);
4830 	hif_debug("mhi_force_wake_success: %d",
4831 		  pci_handle->stats.mhi_force_wake_success);
4832 	hif_debug("soc_force_wake_register_write_success: %d",
4833 		  pci_handle->stats.soc_force_wake_register_write_success);
4834 	hif_debug("soc_force_wake_failure: %d",
4835 		  pci_handle->stats.soc_force_wake_failure);
4836 	hif_debug("soc_force_wake_success: %d",
4837 		  pci_handle->stats.soc_force_wake_success);
4838 	hif_debug("mhi_force_wake_release_failure: %d",
4839 		  pci_handle->stats.mhi_force_wake_release_failure);
4840 	hif_debug("mhi_force_wake_release_success: %d",
4841 		  pci_handle->stats.mhi_force_wake_release_success);
4842 	hif_debug("oc_force_wake_release_success: %d",
4843 		  pci_handle->stats.soc_force_wake_release_success);
4844 }
4845 #endif /* FORCE_WAKE */
4846 
4847