xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 4865edfd190c086bbe2c69aae12a8226f877b91e)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <linux/pci.h>
20 #include <linux/slab.h>
21 #include <linux/interrupt.h>
22 #include <linux/if_arp.h>
23 #ifdef CONFIG_PCI_MSM
24 #include <linux/msm_pcie.h>
25 #endif
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "pld_common.h"
43 #include "mp_dev.h"
44 #include "hif_debug.h"
45 
46 #include "if_pci_internal.h"
47 #include "ce_tasklet.h"
48 #include "targaddrs.h"
49 #include "hif_exec.h"
50 
51 #include "pci_api.h"
52 #include "ahb_api.h"
53 
54 /* Maximum ms timeout for host to wake up target */
55 #define PCIE_WAKE_TIMEOUT 1000
56 #define RAMDUMP_EVENT_TIMEOUT 2500
57 
58 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
59  * PCIe data bus error
60  * As workaround for this issue - changing the reset sequence to
61  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
62  */
63 #define CPU_WARM_RESET_WAR
64 
65 #ifdef CONFIG_WIN
66 extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
67 #endif
68 
69 /*
70  * Top-level interrupt handler for all PCI interrupts from a Target.
71  * When a block of MSI interrupts is allocated, this top-level handler
72  * is not used; instead, we directly call the correct sub-handler.
73  */
74 struct ce_irq_reg_table {
75 	uint32_t irq_enable;
76 	uint32_t irq_status;
77 };
78 
79 #ifndef QCA_WIFI_3_0_ADRASTEA
80 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
81 {
82 }
83 #else
84 void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
85 {
86 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
87 	unsigned int target_enable0, target_enable1;
88 	unsigned int target_cause0, target_cause1;
89 
90 	target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0);
91 	target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1);
92 	target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0);
93 	target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1);
94 
95 	if ((target_enable0 & target_cause0) ||
96 	    (target_enable1 & target_cause1)) {
97 		hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0);
98 		hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0);
99 
100 		if (scn->notice_send)
101 			pld_intr_notify_q6(sc->dev);
102 	}
103 }
104 #endif
105 
106 
107 /**
108  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
109  * @scn: scn
110  *
111  * Return: N/A
112  */
113 static void pci_dispatch_interrupt(struct hif_softc *scn)
114 {
115 	uint32_t intr_summary;
116 	int id;
117 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
118 
119 	if (scn->hif_init_done != true)
120 		return;
121 
122 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
123 		return;
124 
125 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
126 
127 	if (intr_summary == 0) {
128 		if ((scn->target_status != TARGET_STATUS_RESET) &&
129 			(!qdf_atomic_read(&scn->link_suspended))) {
130 
131 			hif_write32_mb(scn->mem +
132 				(SOC_CORE_BASE_ADDRESS |
133 				PCIE_INTR_ENABLE_ADDRESS),
134 				HOST_GROUP0_MASK);
135 
136 			hif_read32_mb(scn->mem +
137 					(SOC_CORE_BASE_ADDRESS |
138 					PCIE_INTR_ENABLE_ADDRESS));
139 		}
140 		Q_TARGET_ACCESS_END(scn);
141 		return;
142 	}
143 	Q_TARGET_ACCESS_END(scn);
144 
145 	scn->ce_irq_summary = intr_summary;
146 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
147 		if (intr_summary & (1 << id)) {
148 			intr_summary &= ~(1 << id);
149 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
150 		}
151 	}
152 }
153 
154 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
155 {
156 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
157 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
158 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
159 
160 	volatile int tmp;
161 	uint16_t val = 0;
162 	uint32_t bar0 = 0;
163 	uint32_t fw_indicator_address, fw_indicator;
164 	bool ssr_irq = false;
165 	unsigned int host_cause, host_enable;
166 
167 	if (LEGACY_INTERRUPTS(sc)) {
168 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
169 			return IRQ_HANDLED;
170 
171 		if (ADRASTEA_BU) {
172 			host_enable = hif_read32_mb(sc->mem +
173 						    PCIE_INTR_ENABLE_ADDRESS);
174 			host_cause = hif_read32_mb(sc->mem +
175 						   PCIE_INTR_CAUSE_ADDRESS);
176 			if (!(host_enable & host_cause)) {
177 				hif_pci_route_adrastea_interrupt(sc);
178 				return IRQ_HANDLED;
179 			}
180 		}
181 
182 		/* Clear Legacy PCI line interrupts
183 		 * IMPORTANT: INTR_CLR regiser has to be set
184 		 * after INTR_ENABLE is set to 0,
185 		 * otherwise interrupt can not be really cleared
186 		 */
187 		hif_write32_mb(sc->mem +
188 			      (SOC_CORE_BASE_ADDRESS |
189 			       PCIE_INTR_ENABLE_ADDRESS), 0);
190 
191 		hif_write32_mb(sc->mem +
192 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
193 			       ADRASTEA_BU ?
194 			       (host_enable & host_cause) :
195 			      HOST_GROUP0_MASK);
196 
197 		if (ADRASTEA_BU)
198 			hif_write32_mb(sc->mem + 0x2f100c, (host_cause >> 1));
199 
200 		/* IMPORTANT: this extra read transaction is required to
201 		 * flush the posted write buffer
202 		 */
203 		if (!ADRASTEA_BU) {
204 		tmp =
205 			hif_read32_mb(sc->mem +
206 				     (SOC_CORE_BASE_ADDRESS |
207 				      PCIE_INTR_ENABLE_ADDRESS));
208 
209 		if (tmp == 0xdeadbeef) {
210 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
211 			       __func__);
212 
213 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
214 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
215 			       __func__, val);
216 
217 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
218 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
219 			       __func__, val);
220 
221 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
222 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
223 			       val);
224 
225 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
226 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
227 			       val);
228 
229 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
230 					      &bar0);
231 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
232 			       bar0);
233 
234 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
235 				  __func__,
236 				  hif_read32_mb(sc->mem +
237 						PCIE_LOCAL_BASE_ADDRESS
238 						+ RTC_STATE_ADDRESS));
239 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
240 				  __func__,
241 				  hif_read32_mb(sc->mem +
242 						PCIE_LOCAL_BASE_ADDRESS
243 						+ PCIE_SOC_WAKE_ADDRESS));
244 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
245 				  __func__,
246 				  hif_read32_mb(sc->mem + 0x80008),
247 				  hif_read32_mb(sc->mem + 0x8000c));
248 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
249 				  __func__,
250 				  hif_read32_mb(sc->mem + 0x80010),
251 				  hif_read32_mb(sc->mem + 0x80014));
252 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
253 				  __func__,
254 				  hif_read32_mb(sc->mem + 0x80018),
255 				  hif_read32_mb(sc->mem + 0x8001c));
256 			QDF_BUG(0);
257 		}
258 
259 		PCI_CLR_CAUSE0_REGISTER(sc);
260 		}
261 
262 		if (HAS_FW_INDICATOR) {
263 			fw_indicator_address = hif_state->fw_indicator_address;
264 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
265 			if ((fw_indicator != ~0) &&
266 			   (fw_indicator & FW_IND_EVENT_PENDING))
267 				ssr_irq = true;
268 		}
269 
270 		if (Q_TARGET_ACCESS_END(scn) < 0)
271 			return IRQ_HANDLED;
272 	}
273 	/* TBDXXX: Add support for WMAC */
274 
275 	if (ssr_irq) {
276 		sc->irq_event = irq;
277 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
278 
279 		qdf_atomic_inc(&scn->active_tasklet_cnt);
280 		tasklet_schedule(&sc->intr_tq);
281 	} else {
282 		pci_dispatch_interrupt(scn);
283 	}
284 
285 	return IRQ_HANDLED;
286 }
287 
288 static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
289 {
290 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
291 
292 	(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
293 
294 	return IRQ_HANDLED;
295 }
296 
297 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
298 {
299 	return 1;               /* FIX THIS */
300 }
301 
302 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
303 {
304 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
305 	int i = 0;
306 
307 	if (!irq || !size) {
308 		return -EINVAL;
309 	}
310 
311 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
312 		irq[0] = sc->irq;
313 		return 1;
314 	}
315 
316 	if (sc->num_msi_intrs > size) {
317 		qdf_print("Not enough space in irq buffer to return irqs\n");
318 		return -EINVAL;
319 	}
320 
321 	for (i = 0; i < sc->num_msi_intrs; i++) {
322 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
323 	}
324 
325 	return sc->num_msi_intrs;
326 }
327 
328 
329 /**
330  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
331  * @scn: hif_softc
332  *
333  * Return: void
334  */
335 #if CONFIG_ATH_PCIE_MAX_PERF == 0
336 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
337 {
338 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
339 	A_target_id_t pci_addr = scn->mem;
340 
341 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
342 	/*
343 	 * If the deferred sleep timer is running cancel it
344 	 * and put the soc into sleep.
345 	 */
346 	if (hif_state->fake_sleep == true) {
347 		qdf_timer_stop(&hif_state->sleep_timer);
348 		if (hif_state->verified_awake == false) {
349 			hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
350 				      PCIE_SOC_WAKE_ADDRESS,
351 				      PCIE_SOC_WAKE_RESET);
352 		}
353 		hif_state->fake_sleep = false;
354 	}
355 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
356 }
357 #else
358 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
359 {
360 }
361 #endif
362 
363 #define A_PCIE_LOCAL_REG_READ(mem, addr) \
364 	hif_read32_mb((char *)(mem) + \
365 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
366 
367 #define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \
368 	hif_write32_mb(((char *)(mem) + \
369 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
370 
371 #ifdef QCA_WIFI_3_0
372 /**
373  * hif_targ_is_awake() - check to see if the target is awake
374  * @hif_ctx: hif context
375  *
376  * emulation never goes to sleep
377  *
378  * Return: true if target is awake
379  */
380 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
381 {
382 	return true;
383 }
384 #else
385 /**
386  * hif_targ_is_awake() - check to see if the target is awake
387  * @hif_ctx: hif context
388  *
389  * Return: true if the targets clocks are on
390  */
391 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
392 {
393 	uint32_t val;
394 
395 	if (scn->recovery)
396 		return false;
397 	val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS
398 		+ RTC_STATE_ADDRESS);
399 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
400 }
401 #endif
402 
403 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
404 static void hif_pci_device_reset(struct hif_pci_softc *sc)
405 {
406 	void __iomem *mem = sc->mem;
407 	int i;
408 	uint32_t val;
409 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
410 
411 	if (!scn->hostdef)
412 		return;
413 
414 	/* NB: Don't check resetok here.  This form of reset
415 	 * is integral to correct operation.
416 	 */
417 
418 	if (!SOC_GLOBAL_RESET_ADDRESS)
419 		return;
420 
421 	if (!mem)
422 		return;
423 
424 	HIF_ERROR("%s: Reset Device", __func__);
425 
426 	/*
427 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
428 	 * writing WAKE_V, the Target may scribble over Host memory!
429 	 */
430 	A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
431 			       PCIE_SOC_WAKE_V_MASK);
432 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
433 		if (hif_targ_is_awake(scn, mem))
434 			break;
435 
436 		qdf_mdelay(1);
437 	}
438 
439 	/* Put Target, including PCIe, into RESET. */
440 	val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS);
441 	val |= 1;
442 	A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
443 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
444 		if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
445 		    RTC_STATE_COLD_RESET_MASK)
446 			break;
447 
448 		qdf_mdelay(1);
449 	}
450 
451 	/* Pull Target, including PCIe, out of RESET. */
452 	val &= ~1;
453 	A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
454 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
455 		if (!
456 		    (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
457 		     RTC_STATE_COLD_RESET_MASK))
458 			break;
459 
460 		qdf_mdelay(1);
461 	}
462 
463 	A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
464 }
465 
466 /* CPU warm reset function
467  * Steps:
468  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
469  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU intializes FW
470  *    correctly on WARM reset
471  * 3. Clear TARGET CPU LF timer interrupt
472  * 4. Reset all CEs to clear any pending CE tarnsactions
473  * 5. Warm reset CPU
474  */
475 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
476 {
477 	void __iomem *mem = sc->mem;
478 	int i;
479 	uint32_t val;
480 	uint32_t fw_indicator;
481 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
482 
483 	/* NB: Don't check resetok here.  This form of reset is
484 	 * integral to correct operation.
485 	 */
486 
487 	if (!mem)
488 		return;
489 
490 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
491 
492 	/*
493 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
494 	 * writing WAKE_V, the Target may scribble over Host memory!
495 	 */
496 	A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
497 			       PCIE_SOC_WAKE_V_MASK);
498 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
499 		if (hif_targ_is_awake(scn, mem))
500 			break;
501 		qdf_mdelay(1);
502 	}
503 
504 	/*
505 	 * Disable Pending interrupts
506 	 */
507 	val =
508 		hif_read32_mb(mem +
509 			     (SOC_CORE_BASE_ADDRESS |
510 			      PCIE_INTR_CAUSE_ADDRESS));
511 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
512 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
513 	/* Target CPU Intr Cause */
514 	val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
515 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
516 
517 	val =
518 		hif_read32_mb(mem +
519 			     (SOC_CORE_BASE_ADDRESS |
520 			      PCIE_INTR_ENABLE_ADDRESS));
521 	hif_write32_mb((mem +
522 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
523 	hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
524 		      HOST_GROUP0_MASK);
525 
526 	qdf_mdelay(100);
527 
528 	/* Clear FW_INDICATOR_ADDRESS */
529 	if (HAS_FW_INDICATOR) {
530 		fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
531 		hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0);
532 	}
533 
534 	/* Clear Target LF Timer interrupts */
535 	val =
536 		hif_read32_mb(mem +
537 			     (RTC_SOC_BASE_ADDRESS +
538 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
539 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
540 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
541 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
542 	hif_write32_mb(mem +
543 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
544 		      val);
545 
546 	/* Reset CE */
547 	val =
548 		hif_read32_mb(mem +
549 			     (RTC_SOC_BASE_ADDRESS |
550 			      SOC_RESET_CONTROL_ADDRESS));
551 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
552 	hif_write32_mb((mem +
553 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
554 		      val);
555 	val =
556 		hif_read32_mb(mem +
557 			     (RTC_SOC_BASE_ADDRESS |
558 			      SOC_RESET_CONTROL_ADDRESS));
559 	qdf_mdelay(10);
560 
561 	/* CE unreset */
562 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
563 	hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
564 		      val);
565 	val =
566 		hif_read32_mb(mem +
567 			     (RTC_SOC_BASE_ADDRESS |
568 			      SOC_RESET_CONTROL_ADDRESS));
569 	qdf_mdelay(10);
570 
571 	/* Read Target CPU Intr Cause */
572 	val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
573 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
574 		    __func__, val);
575 
576 	/* CPU warm RESET */
577 	val =
578 		hif_read32_mb(mem +
579 			     (RTC_SOC_BASE_ADDRESS |
580 			      SOC_RESET_CONTROL_ADDRESS));
581 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
582 	hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
583 		      val);
584 	val =
585 		hif_read32_mb(mem +
586 			     (RTC_SOC_BASE_ADDRESS |
587 			      SOC_RESET_CONTROL_ADDRESS));
588 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
589 		    __func__, val);
590 
591 	qdf_mdelay(100);
592 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
593 
594 }
595 
596 #ifndef QCA_WIFI_3_0
597 /* only applicable to legacy ce */
598 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
599 {
600 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
601 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
602 	void __iomem *mem = sc->mem;
603 	uint32_t val;
604 
605 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
606 		return ATH_ISR_NOSCHED;
607 	val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
608 	if (Q_TARGET_ACCESS_END(scn) < 0)
609 		return ATH_ISR_SCHED;
610 
611 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
612 
613 	if (val & FW_IND_HELPER)
614 		return 0;
615 
616 	return 1;
617 }
618 #endif
619 
620 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
621 {
622 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
623 	uint16_t device_id = 0;
624 	uint32_t val;
625 	uint16_t timeout_count = 0;
626 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
627 
628 	/* Check device ID from PCIe configuration space for link status */
629 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
630 	if (device_id != sc->devid) {
631 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
632 			  __func__, device_id, sc->devid);
633 		return -EACCES;
634 	}
635 
636 	/* Check PCIe local register for bar/memory access */
637 	val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
638 			   RTC_STATE_ADDRESS);
639 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
640 
641 	/* Try to wake up taget if it sleeps */
642 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
643 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
644 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
645 		hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
646 		PCIE_SOC_WAKE_ADDRESS));
647 
648 	/* Check if taget can be woken up */
649 	while (!hif_targ_is_awake(scn, sc->mem)) {
650 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
651 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
652 				__func__,
653 				hif_read32_mb(sc->mem +
654 					     PCIE_LOCAL_BASE_ADDRESS +
655 					     RTC_STATE_ADDRESS),
656 				hif_read32_mb(sc->mem +
657 					     PCIE_LOCAL_BASE_ADDRESS +
658 					PCIE_SOC_WAKE_ADDRESS));
659 			return -EACCES;
660 		}
661 
662 		hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
663 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
664 
665 		qdf_mdelay(100);
666 		timeout_count += 100;
667 	}
668 
669 	/* Check Power register for SoC internal bus issues */
670 	val =
671 		hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS +
672 			     SOC_POWER_REG_OFFSET);
673 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
674 
675 	return 0;
676 }
677 
678 /**
679  * __hif_pci_dump_registers(): dump other PCI debug registers
680  * @scn: struct hif_softc
681  *
682  * This function dumps pci debug registers.  The parrent function
683  * dumps the copy engine registers before calling this function.
684  *
685  * Return: void
686  */
687 static void __hif_pci_dump_registers(struct hif_softc *scn)
688 {
689 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
690 	void __iomem *mem = sc->mem;
691 	uint32_t val, i, j;
692 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
693 	uint32_t ce_base;
694 
695 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
696 		return;
697 
698 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
699 	val =
700 		hif_read32_mb(mem + GPIO_BASE_ADDRESS +
701 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
702 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
703 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
704 	hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET,
705 		      val);
706 
707 	/* DEBUG_CONTROL_ENABLE = 0x1 */
708 	val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
709 			   WLAN_DEBUG_CONTROL_OFFSET);
710 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
711 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
712 	hif_write32_mb(mem + GPIO_BASE_ADDRESS +
713 		      WLAN_DEBUG_CONTROL_OFFSET, val);
714 
715 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
716 	       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
717 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
718 	       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
719 			    WLAN_DEBUG_CONTROL_OFFSET));
720 
721 	HIF_INFO_MED("%s: Debug CE", __func__);
722 	/* Loop CE debug output */
723 	/* AMBA_DEBUG_BUS_SEL = 0xc */
724 	val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
725 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
726 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
727 	hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
728 
729 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
730 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
731 		val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
732 				   CE_WRAPPER_DEBUG_OFFSET);
733 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
734 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
735 		hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
736 			      CE_WRAPPER_DEBUG_OFFSET, val);
737 
738 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
739 			    __func__, wrapper_idx[i],
740 			    hif_read32_mb(mem + GPIO_BASE_ADDRESS +
741 				AMBA_DEBUG_BUS_OFFSET),
742 			    hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
743 				CE_WRAPPER_DEBUG_OFFSET));
744 
745 		if (wrapper_idx[i] <= 7) {
746 			for (j = 0; j <= 5; j++) {
747 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
748 				/* For (j=0~5) write CE_DEBUG_SEL = j */
749 				val =
750 					hif_read32_mb(mem + ce_base +
751 						     CE_DEBUG_OFFSET);
752 				val &= ~CE_DEBUG_SEL_MASK;
753 				val |= CE_DEBUG_SEL_SET(j);
754 				hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET,
755 					      val);
756 
757 				/* read (@gpio_athr_wlan_reg)
758 				 * WLAN_DEBUG_OUT_DATA
759 				 */
760 				val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
761 						   WLAN_DEBUG_OUT_OFFSET);
762 				val = WLAN_DEBUG_OUT_DATA_GET(val);
763 
764 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
765 					    __func__, j,
766 					    hif_read32_mb(mem + ce_base +
767 						    CE_DEBUG_OFFSET), val);
768 			}
769 		} else {
770 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
771 			val =
772 				hif_read32_mb(mem + GPIO_BASE_ADDRESS +
773 					     WLAN_DEBUG_OUT_OFFSET);
774 			val = WLAN_DEBUG_OUT_DATA_GET(val);
775 
776 			HIF_INFO_MED("%s: out: %x", __func__, val);
777 		}
778 	}
779 
780 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
781 	/* Loop PCIe debug output */
782 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
783 	val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
784 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
785 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
786 	hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
787 
788 	for (i = 0; i <= 8; i++) {
789 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
790 		val =
791 			hif_read32_mb(mem + GPIO_BASE_ADDRESS +
792 				     AMBA_DEBUG_BUS_OFFSET);
793 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
794 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
795 		hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
796 			      val);
797 
798 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
799 		val =
800 			hif_read32_mb(mem + GPIO_BASE_ADDRESS +
801 				     WLAN_DEBUG_OUT_OFFSET);
802 		val = WLAN_DEBUG_OUT_DATA_GET(val);
803 
804 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
805 		       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
806 				    WLAN_DEBUG_OUT_OFFSET), val,
807 		       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
808 				    WLAN_DEBUG_OUT_OFFSET));
809 	}
810 
811 	Q_TARGET_ACCESS_END(scn);
812 }
813 
814 /**
815  * hif_dump_registers(): dump bus debug registers
816  * @scn: struct hif_opaque_softc
817  *
818  * This function dumps hif bus debug registers
819  *
820  * Return: 0 for success or error code
821  */
822 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
823 {
824 	int status;
825 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
826 
827 	status = hif_dump_ce_registers(scn);
828 
829 	if (status)
830 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
831 
832 	/* dump non copy engine pci registers */
833 	__hif_pci_dump_registers(scn);
834 
835 	return 0;
836 }
837 
838 /*
839  * Handler for a per-engine interrupt on a PARTICULAR CE.
840  * This is used in cases where each CE has a private
841  * MSI interrupt.
842  */
843 static irqreturn_t ce_per_engine_handler(int irq, void *arg)
844 {
845 	int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
846 
847 	/*
848 	 * NOTE: We are able to derive CE_id from irq because we
849 	 * use a one-to-one mapping for CE's 0..5.
850 	 * CE's 6 & 7 do not use interrupts at all.
851 	 *
852 	 * This mapping must be kept in sync with the mapping
853 	 * used by firmware.
854 	 */
855 
856 	ce_per_engine_service(arg, CE_id);
857 
858 	return IRQ_HANDLED;
859 }
860 
861 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
862 
863 /* worker thread to schedule wlan_tasklet in SLUB debug build */
864 static void reschedule_tasklet_work_handler(void *arg)
865 {
866 	struct hif_pci_softc *sc = arg;
867 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
868 
869 	if (!scn) {
870 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
871 		return;
872 	}
873 
874 	if (scn->hif_init_done == false) {
875 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
876 		return;
877 	}
878 
879 	tasklet_schedule(&sc->intr_tq);
880 }
881 
882 /**
883  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
884  * work
885  * @sc: HIF PCI Context
886  *
887  * Return: void
888  */
889 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
890 {
891 	qdf_create_work(0, &sc->reschedule_tasklet_work,
892 				reschedule_tasklet_work_handler, NULL);
893 }
894 #else
895 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
896 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
897 
898 void wlan_tasklet(unsigned long data)
899 {
900 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
901 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
902 
903 	if (scn->hif_init_done == false)
904 		goto end;
905 
906 	if (qdf_atomic_read(&scn->link_suspended))
907 		goto end;
908 
909 	if (!ADRASTEA_BU) {
910 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
911 		if (scn->target_status == TARGET_STATUS_RESET)
912 			goto end;
913 	}
914 
915 end:
916 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
917 	qdf_atomic_dec(&scn->active_tasklet_cnt);
918 }
919 
920 #ifdef FEATURE_RUNTIME_PM
921 static const char *hif_pm_runtime_state_to_string(uint32_t state)
922 {
923 	switch (state) {
924 	case HIF_PM_RUNTIME_STATE_NONE:
925 		return "INIT_STATE";
926 	case HIF_PM_RUNTIME_STATE_ON:
927 		return "ON";
928 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
929 		return "INPROGRESS";
930 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
931 		return "SUSPENDED";
932 	default:
933 		return "INVALID STATE";
934 	}
935 }
936 
937 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
938 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
939 /**
940  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
941  * @sc: hif_pci_softc context
942  * @msg: log message
943  *
944  * log runtime pm stats when something seems off.
945  *
946  * Return: void
947  */
948 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
949 {
950 	struct hif_pm_runtime_lock *ctx;
951 
952 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
953 			msg, atomic_read(&sc->dev->power.usage_count),
954 			hif_pm_runtime_state_to_string(
955 					atomic_read(&sc->pm_state)),
956 			sc->prevent_suspend_cnt);
957 
958 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
959 			sc->dev->power.runtime_status,
960 			sc->dev->power.runtime_error,
961 			sc->dev->power.disable_depth,
962 			sc->dev->power.autosuspend_delay);
963 
964 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
965 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
966 			sc->pm_stats.request_resume);
967 
968 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
969 			sc->pm_stats.allow_suspend,
970 			sc->pm_stats.prevent_suspend);
971 
972 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
973 			sc->pm_stats.prevent_suspend_timeout,
974 			sc->pm_stats.allow_suspend_timeout);
975 
976 	HIF_ERROR("Suspended: %u, resumed: %u count",
977 			sc->pm_stats.suspended,
978 			sc->pm_stats.resumed);
979 
980 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
981 			sc->pm_stats.suspend_err,
982 			sc->pm_stats.runtime_get_err);
983 
984 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
985 
986 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
987 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
988 	}
989 
990 	WARN_ON(1);
991 }
992 
993 /**
994  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
995  * @s: file to print to
996  * @data: unused
997  *
998  * debugging tool added to the debug fs for displaying runtimepm stats
999  *
1000  * Return: 0
1001  */
1002 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
1003 {
1004 	struct hif_pci_softc *sc = s->private;
1005 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
1006 		"SUSPENDED"};
1007 	unsigned int msecs_age;
1008 	int pm_state = atomic_read(&sc->pm_state);
1009 	unsigned long timer_expires;
1010 	struct hif_pm_runtime_lock *ctx;
1011 
1012 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
1013 			autopm_state[pm_state]);
1014 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
1015 			sc->pm_stats.last_resume_caller);
1016 
1017 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
1018 		msecs_age = jiffies_to_msecs(
1019 				jiffies - sc->pm_stats.suspend_jiffies);
1020 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1021 				msecs_age / 1000, msecs_age % 1000);
1022 	}
1023 
1024 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1025 			atomic_read(&sc->dev->power.usage_count));
1026 
1027 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1028 			sc->prevent_suspend_cnt);
1029 
1030 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1031 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1032 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1033 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1034 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1035 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1036 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1037 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1038 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1039 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1040 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1041 
1042 	timer_expires = sc->runtime_timer_expires;
1043 	if (timer_expires > 0) {
1044 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1045 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1046 				msecs_age / 1000, msecs_age % 1000);
1047 	}
1048 
1049 	spin_lock_bh(&sc->runtime_lock);
1050 	if (list_empty(&sc->prevent_suspend_list)) {
1051 		spin_unlock_bh(&sc->runtime_lock);
1052 		return 0;
1053 	}
1054 
1055 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1056 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1057 		seq_printf(s, "%s", ctx->name);
1058 		if (ctx->timeout)
1059 			seq_printf(s, "(%d ms)", ctx->timeout);
1060 		seq_puts(s, " ");
1061 	}
1062 	seq_puts(s, "\n");
1063 	spin_unlock_bh(&sc->runtime_lock);
1064 
1065 	return 0;
1066 }
1067 #undef HIF_PCI_RUNTIME_PM_STATS
1068 
1069 /**
1070  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1071  * @inode
1072  * @file
1073  *
1074  * Return: linux error code of single_open.
1075  */
1076 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1077 {
1078 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1079 			inode->i_private);
1080 }
1081 
1082 static const struct file_operations hif_pci_runtime_pm_fops = {
1083 	.owner          = THIS_MODULE,
1084 	.open           = hif_pci_runtime_pm_open,
1085 	.release        = single_release,
1086 	.read           = seq_read,
1087 	.llseek         = seq_lseek,
1088 };
1089 
1090 /**
1091  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1092  * @sc: pci context
1093  *
1094  * creates a debugfs entry to debug the runtime pm feature.
1095  */
1096 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1097 {
1098 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1099 					0400, NULL, sc,
1100 					&hif_pci_runtime_pm_fops);
1101 }
1102 
1103 /**
1104  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1105  * @sc: pci context
1106  *
1107  * removes the debugfs entry to debug the runtime pm feature.
1108  */
1109 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1110 {
1111 	debugfs_remove(sc->pm_dentry);
1112 }
1113 
1114 static void hif_runtime_init(struct device *dev, int delay)
1115 {
1116 	pm_runtime_set_autosuspend_delay(dev, delay);
1117 	pm_runtime_use_autosuspend(dev);
1118 	pm_runtime_allow(dev);
1119 	pm_runtime_mark_last_busy(dev);
1120 	pm_runtime_put_noidle(dev);
1121 	pm_suspend_ignore_children(dev, true);
1122 }
1123 
1124 static void hif_runtime_exit(struct device *dev)
1125 {
1126 	pm_runtime_get_noresume(dev);
1127 	pm_runtime_set_active(dev);
1128 }
1129 
1130 static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
1131 
1132 /**
1133  * hif_pm_runtime_start(): start the runtime pm
1134  * @sc: pci context
1135  *
1136  * After this call, runtime pm will be active.
1137  */
1138 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1139 {
1140 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1141 	uint32_t mode = hif_get_conparam(ol_sc);
1142 
1143 	if (!ol_sc->hif_config.enable_runtime_pm) {
1144 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1145 		return;
1146 	}
1147 
1148 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1149 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1150 				__func__);
1151 		return;
1152 	}
1153 
1154 	setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1155 			(unsigned long)sc);
1156 
1157 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1158 			ol_sc->hif_config.runtime_pm_delay);
1159 
1160 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1161 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1162 	hif_runtime_pm_debugfs_create(sc);
1163 }
1164 
1165 /**
1166  * hif_pm_runtime_stop(): stop runtime pm
1167  * @sc: pci context
1168  *
1169  * Turns off runtime pm and frees corresponding resources
1170  * that were acquired by hif_runtime_pm_start().
1171  */
1172 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1173 {
1174 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1175 	uint32_t mode = hif_get_conparam(ol_sc);
1176 
1177 	if (!ol_sc->hif_config.enable_runtime_pm)
1178 		return;
1179 
1180 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1181 		return;
1182 
1183 	hif_runtime_exit(sc->dev);
1184 	hif_pm_runtime_resume(sc->dev);
1185 
1186 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1187 
1188 	hif_runtime_pm_debugfs_remove(sc);
1189 	del_timer_sync(&sc->runtime_timer);
1190 	/* doesn't wait for penting trafic unlike cld-2.0 */
1191 }
1192 
1193 /**
1194  * hif_pm_runtime_open(): initialize runtime pm
1195  * @sc: pci data structure
1196  *
1197  * Early initialization
1198  */
1199 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1200 {
1201 	spin_lock_init(&sc->runtime_lock);
1202 
1203 	qdf_atomic_init(&sc->pm_state);
1204 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1205 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1206 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1207 }
1208 
1209 /**
1210  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1211  * @sc: pci context
1212  *
1213  * Ensure we have only one vote against runtime suspend before closing
1214  * the runtime suspend feature.
1215  *
1216  * all gets by the wlan driver should have been returned
1217  * one vote should remain as part of cnss_runtime_exit
1218  *
1219  * needs to be revisited if we share the root complex.
1220  */
1221 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1222 {
1223 	struct hif_pm_runtime_lock *ctx, *tmp;
1224 
1225 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1226 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1227 	else
1228 		return;
1229 
1230 	spin_lock_bh(&sc->runtime_lock);
1231 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1232 		spin_unlock_bh(&sc->runtime_lock);
1233 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1234 		spin_lock_bh(&sc->runtime_lock);
1235 	}
1236 	spin_unlock_bh(&sc->runtime_lock);
1237 
1238 	/* ensure 1 and only 1 usage count so that when the wlan
1239 	 * driver is re-insmodded runtime pm won't be
1240 	 * disabled also ensures runtime pm doesn't get
1241 	 * broken on by being less than 1.
1242 	 */
1243 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1244 		atomic_set(&sc->dev->power.usage_count, 1);
1245 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1246 		hif_pm_runtime_put_auto(sc->dev);
1247 }
1248 
1249 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1250 					  struct hif_pm_runtime_lock *lock);
1251 
1252 /**
1253  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1254  * @sc: PCIe Context
1255  *
1256  * API is used to empty the runtime pm prevent suspend list.
1257  *
1258  * Return: void
1259  */
1260 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1261 {
1262 	struct hif_pm_runtime_lock *ctx, *tmp;
1263 
1264 	spin_lock_bh(&sc->runtime_lock);
1265 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1266 		__hif_pm_runtime_allow_suspend(sc, ctx);
1267 	}
1268 	spin_unlock_bh(&sc->runtime_lock);
1269 }
1270 
1271 /**
1272  * hif_pm_runtime_close(): close runtime pm
1273  * @sc: pci bus handle
1274  *
1275  * ensure runtime_pm is stopped before closing the driver
1276  */
1277 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1278 {
1279 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1280 
1281 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1282 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1283 		return;
1284 
1285 	hif_pm_runtime_stop(sc);
1286 
1287 	hif_is_recovery_in_progress(scn) ?
1288 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1289 		hif_pm_runtime_sanitize_on_exit(sc);
1290 }
1291 #else
1292 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1293 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1294 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1295 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1296 #endif
1297 
1298 /**
1299  * hif_disable_power_gating() - disable HW power gating
1300  * @hif_ctx: hif context
1301  *
1302  * disables pcie L1 power states
1303  */
1304 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1305 {
1306 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1307 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1308 
1309 	if (NULL == scn) {
1310 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1311 		       __func__);
1312 		return;
1313 	}
1314 
1315 	/* Disable ASPM when pkt log is enabled */
1316 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1317 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1318 }
1319 
1320 /**
1321  * hif_enable_power_gating() - enable HW power gating
1322  * @hif_ctx: hif context
1323  *
1324  * enables pcie L1 power states
1325  */
1326 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1327 {
1328 	if (NULL == sc) {
1329 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1330 		       __func__);
1331 		return;
1332 	}
1333 
1334 	/* Re-enable ASPM after firmware/OTP download is complete */
1335 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1336 }
1337 
1338 /**
1339  * hif_enable_power_management() - enable power management
1340  * @hif_ctx: hif context
1341  *
1342  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1343  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1344  *
1345  * note: epping mode does not call this function as it does not
1346  *       care about saving power.
1347  */
1348 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1349 				 bool is_packet_log_enabled)
1350 {
1351 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1352 
1353 	if (pci_ctx == NULL) {
1354 		HIF_ERROR("%s, hif_ctx null", __func__);
1355 		return;
1356 	}
1357 
1358 	hif_pm_runtime_start(pci_ctx);
1359 
1360 	if (!is_packet_log_enabled)
1361 		hif_enable_power_gating(pci_ctx);
1362 
1363 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1364 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1365 	    !ce_srng_based(hif_sc)) {
1366 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1367 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1368 			HIF_ERROR("%s, failed to set target to sleep",
1369 				  __func__);
1370 	}
1371 }
1372 
1373 /**
1374  * hif_disable_power_management() - disable power management
1375  * @hif_ctx: hif context
1376  *
1377  * Currently disables runtime pm. Should be updated to behave
1378  * if runtime pm is not started. Should be updated to take care
1379  * of aspm and soc sleep for driver load.
1380  */
1381 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1382 {
1383 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1384 
1385 	if (pci_ctx == NULL) {
1386 		HIF_ERROR("%s, hif_ctx null", __func__);
1387 		return;
1388 	}
1389 
1390 	hif_pm_runtime_stop(pci_ctx);
1391 }
1392 
1393 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1394 {
1395 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1396 
1397 	if (pci_ctx == NULL) {
1398 		HIF_ERROR("%s, hif_ctx null", __func__);
1399 		return;
1400 	}
1401 	hif_display_ce_stats(&pci_ctx->ce_sc);
1402 }
1403 
1404 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1405 {
1406 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1407 
1408 	if (pci_ctx == NULL) {
1409 		HIF_ERROR("%s, hif_ctx null", __func__);
1410 		return;
1411 	}
1412 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1413 }
1414 
1415 #define ATH_PCI_PROBE_RETRY_MAX 3
1416 /**
1417  * hif_bus_open(): hif_bus_open
1418  * @scn: scn
1419  * @bus_type: bus type
1420  *
1421  * Return: n/a
1422  */
1423 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1424 {
1425 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1426 
1427 	hif_ctx->bus_type = bus_type;
1428 	hif_pm_runtime_open(sc);
1429 
1430 	qdf_spinlock_create(&sc->irq_lock);
1431 
1432 	return hif_ce_open(hif_ctx);
1433 }
1434 
1435 /**
1436  * hif_wake_target_cpu() - wake the target's cpu
1437  * @scn: hif context
1438  *
1439  * Send an interrupt to the device to wake up the Target CPU
1440  * so it has an opportunity to notice any changed state.
1441  */
1442 static void hif_wake_target_cpu(struct hif_softc *scn)
1443 {
1444 	QDF_STATUS rv;
1445 	uint32_t core_ctrl;
1446 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1447 
1448 	rv = hif_diag_read_access(hif_hdl,
1449 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1450 				  &core_ctrl);
1451 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1452 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1453 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1454 
1455 	rv = hif_diag_write_access(hif_hdl,
1456 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1457 				   core_ctrl);
1458 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1459 }
1460 
1461 /**
1462  * soc_wake_reset() - allow the target to go to sleep
1463  * @scn: hif_softc
1464  *
1465  * Clear the force wake register.  This is done by
1466  * hif_sleep_entry and cancel defered timer sleep.
1467  */
1468 static void soc_wake_reset(struct hif_softc *scn)
1469 {
1470 	hif_write32_mb(scn->mem +
1471 		PCIE_LOCAL_BASE_ADDRESS +
1472 		PCIE_SOC_WAKE_ADDRESS,
1473 		PCIE_SOC_WAKE_RESET);
1474 }
1475 
1476 /**
1477  * hif_sleep_entry() - gate target sleep
1478  * @arg: hif context
1479  *
1480  * This function is the callback for the sleep timer.
1481  * Check if last force awake critical section was at least
1482  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1483  * allow the target to go to sleep and cancel the sleep timer.
1484  * otherwise reschedule the sleep timer.
1485  */
1486 static void hif_sleep_entry(void *arg)
1487 {
1488 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1489 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1490 	uint32_t idle_ms;
1491 
1492 	if (scn->recovery)
1493 		return;
1494 
1495 	if (hif_is_driver_unloading(scn))
1496 		return;
1497 
1498 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1499 	if (hif_state->verified_awake == false) {
1500 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1501 						    - hif_state->sleep_ticks);
1502 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1503 			if (!qdf_atomic_read(&scn->link_suspended)) {
1504 				soc_wake_reset(scn);
1505 				hif_state->fake_sleep = false;
1506 			}
1507 		} else {
1508 			qdf_timer_stop(&hif_state->sleep_timer);
1509 			qdf_timer_start(&hif_state->sleep_timer,
1510 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1511 		}
1512 	} else {
1513 		qdf_timer_stop(&hif_state->sleep_timer);
1514 		qdf_timer_start(&hif_state->sleep_timer,
1515 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1516 	}
1517 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1518 }
1519 
1520 #define HIF_HIA_MAX_POLL_LOOP    1000000
1521 #define HIF_HIA_POLLING_DELAY_MS 10
1522 
1523 #ifdef CONFIG_WIN
1524 static void hif_set_hia_extnd(struct hif_softc *scn)
1525 {
1526 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1527 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1528 	uint32_t target_type = tgt_info->target_type;
1529 
1530 	HIF_TRACE("%s: E", __func__);
1531 
1532 	if ((target_type == TARGET_TYPE_AR900B) ||
1533 			target_type == TARGET_TYPE_QCA9984 ||
1534 			target_type == TARGET_TYPE_QCA9888) {
1535 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1536 		 * in RTC space
1537 		 */
1538 		tgt_info->target_revision
1539 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn->mem
1540 					+ CHIP_ID_ADDRESS));
1541 		qdf_print(KERN_INFO"chip_id 0x%x chip_revision 0x%x\n",
1542 			target_type, tgt_info->target_revision);
1543 	}
1544 
1545 	{
1546 		uint32_t flag2_value = 0;
1547 		uint32_t flag2_targ_addr =
1548 			host_interest_item_address(target_type,
1549 			offsetof(struct host_interest_s, hi_skip_clock_init));
1550 
1551 		if ((ar900b_20_targ_clk != -1) &&
1552 			(frac != -1) && (intval != -1)) {
1553 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1554 				&flag2_value);
1555 			qdf_print("\n Setting clk_override\n");
1556 			flag2_value |= CLOCK_OVERRIDE;
1557 
1558 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1559 					flag2_value);
1560 			qdf_print("\n CLOCK PLL val set %d\n", flag2_value);
1561 		} else {
1562 			qdf_print(KERN_INFO"\n CLOCK PLL skipped\n");
1563 		}
1564 	}
1565 
1566 	if (target_type == TARGET_TYPE_AR900B
1567 			|| target_type == TARGET_TYPE_QCA9984
1568 			|| target_type == TARGET_TYPE_QCA9888) {
1569 
1570 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1571 		 * this would be supplied through module parameters,
1572 		 * if not supplied assumed default or same behavior as 1.0.
1573 		 * Assume 1.0 clock can't be tuned, reset to defaults
1574 		 */
1575 
1576 		qdf_print(KERN_INFO
1577 			  "%s: setting the target pll frac %x intval %x\n",
1578 			  __func__, frac, intval);
1579 
1580 		/* do not touch frac, and int val, let them be default -1,
1581 		 * if desired, host can supply these through module params
1582 		 */
1583 		if (frac != -1 || intval != -1) {
1584 			uint32_t flag2_value = 0;
1585 			uint32_t flag2_targ_addr;
1586 
1587 			flag2_targ_addr =
1588 				host_interest_item_address(target_type,
1589 				offsetof(struct host_interest_s,
1590 					hi_clock_info));
1591 			hif_diag_read_access(hif_hdl,
1592 				flag2_targ_addr, &flag2_value);
1593 			qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1594 				flag2_value);
1595 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1596 			qdf_print("\n INT Val %x  Address %x\n",
1597 				intval, flag2_value + 4);
1598 			hif_diag_write_access(hif_hdl,
1599 					flag2_value + 4, intval);
1600 		} else {
1601 			qdf_print(KERN_INFO
1602 				  "%s: no frac provided, skipping pre-configuring PLL\n",
1603 				  __func__);
1604 		}
1605 
1606 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1607 		if ((target_type == TARGET_TYPE_AR900B)
1608 			&& (tgt_info->target_revision == AR900B_REV_2)
1609 			&& ar900b_20_targ_clk != -1) {
1610 			uint32_t flag2_value = 0;
1611 			uint32_t flag2_targ_addr;
1612 
1613 			flag2_targ_addr
1614 				= host_interest_item_address(target_type,
1615 					offsetof(struct host_interest_s,
1616 					hi_desired_cpu_speed_hz));
1617 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1618 							&flag2_value);
1619 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x\n",
1620 				  flag2_value);
1621 			hif_diag_write_access(hif_hdl, flag2_value,
1622 				ar900b_20_targ_clk/*300000000u*/);
1623 		} else if (target_type == TARGET_TYPE_QCA9888) {
1624 			uint32_t flag2_targ_addr;
1625 
1626 			if (200000000u != qca9888_20_targ_clk) {
1627 				qca9888_20_targ_clk = 300000000u;
1628 				/* Setting the target clock speed to 300 mhz */
1629 			}
1630 
1631 			flag2_targ_addr
1632 				= host_interest_item_address(target_type,
1633 					offsetof(struct host_interest_s,
1634 					hi_desired_cpu_speed_hz));
1635 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1636 				qca9888_20_targ_clk);
1637 		} else {
1638 			qdf_print(KERN_INFO"%s: targ_clk is not provided, skipping pre-configuring PLL\n",
1639 				  __func__);
1640 		}
1641 	} else {
1642 		if (frac != -1 || intval != -1) {
1643 			uint32_t flag2_value = 0;
1644 			uint32_t flag2_targ_addr =
1645 				host_interest_item_address(target_type,
1646 					offsetof(struct host_interest_s,
1647 							hi_clock_info));
1648 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1649 						&flag2_value);
1650 			qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1651 							flag2_value);
1652 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1653 			qdf_print("\n INT Val %x  Address %x\n", intval,
1654 							flag2_value + 4);
1655 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1656 					intval);
1657 		}
1658 	}
1659 }
1660 
1661 #else
1662 
1663 static void hif_set_hia_extnd(struct hif_softc *scn)
1664 {
1665 }
1666 
1667 #endif
1668 
1669 /**
1670  * hif_set_hia() - fill out the host interest area
1671  * @scn: hif context
1672  *
1673  * This is replaced by hif_wlan_enable for integrated targets.
1674  * This fills out the host interest area.  The firmware will
1675  * process these memory addresses when it is first brought out
1676  * of reset.
1677  *
1678  * Return: 0 for success.
1679  */
1680 static int hif_set_hia(struct hif_softc *scn)
1681 {
1682 	QDF_STATUS rv;
1683 	uint32_t interconnect_targ_addr = 0;
1684 	uint32_t pcie_state_targ_addr = 0;
1685 	uint32_t pipe_cfg_targ_addr = 0;
1686 	uint32_t svc_to_pipe_map = 0;
1687 	uint32_t pcie_config_flags = 0;
1688 	uint32_t flag2_value = 0;
1689 	uint32_t flag2_targ_addr = 0;
1690 #ifdef QCA_WIFI_3_0
1691 	uint32_t host_interest_area = 0;
1692 	uint8_t i;
1693 #else
1694 	uint32_t ealloc_value = 0;
1695 	uint32_t ealloc_targ_addr = 0;
1696 	uint8_t banks_switched = 1;
1697 	uint32_t chip_id;
1698 #endif
1699 	uint32_t pipe_cfg_addr;
1700 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1701 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1702 	uint32_t target_type = tgt_info->target_type;
1703 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1704 	static struct CE_pipe_config *target_ce_config;
1705 	struct service_to_pipe *target_service_to_ce_map;
1706 
1707 	HIF_TRACE("%s: E", __func__);
1708 
1709 	hif_get_target_ce_config(scn,
1710 				 &target_ce_config, &target_ce_config_sz,
1711 				 &target_service_to_ce_map,
1712 				 &target_service_to_ce_map_sz,
1713 				 NULL, NULL);
1714 
1715 	if (ADRASTEA_BU)
1716 		return QDF_STATUS_SUCCESS;
1717 
1718 #ifdef QCA_WIFI_3_0
1719 	i = 0;
1720 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1721 		host_interest_area = hif_read32_mb(scn->mem +
1722 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1723 		if ((host_interest_area & 0x01) == 0) {
1724 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1725 			host_interest_area = 0;
1726 			i++;
1727 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1728 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1729 		} else {
1730 			host_interest_area &= (~0x01);
1731 			hif_write32_mb(scn->mem + 0x113014, 0);
1732 			break;
1733 		}
1734 	}
1735 
1736 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1737 		HIF_ERROR("%s: hia polling timeout", __func__);
1738 		return -EIO;
1739 	}
1740 
1741 	if (host_interest_area == 0) {
1742 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1743 		return -EIO;
1744 	}
1745 
1746 	interconnect_targ_addr = host_interest_area +
1747 			offsetof(struct host_interest_area_t,
1748 			hi_interconnect_state);
1749 
1750 	flag2_targ_addr = host_interest_area +
1751 			offsetof(struct host_interest_area_t, hi_option_flag2);
1752 
1753 #else
1754 	interconnect_targ_addr = hif_hia_item_address(target_type,
1755 		offsetof(struct host_interest_s, hi_interconnect_state));
1756 	ealloc_targ_addr = hif_hia_item_address(target_type,
1757 		offsetof(struct host_interest_s, hi_early_alloc));
1758 	flag2_targ_addr = hif_hia_item_address(target_type,
1759 		offsetof(struct host_interest_s, hi_option_flag2));
1760 #endif
1761 	/* Supply Target-side CE configuration */
1762 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1763 			  &pcie_state_targ_addr);
1764 	if (rv != QDF_STATUS_SUCCESS) {
1765 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1766 			  __func__, interconnect_targ_addr, rv);
1767 		goto done;
1768 	}
1769 	if (pcie_state_targ_addr == 0) {
1770 		rv = QDF_STATUS_E_FAILURE;
1771 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1772 		goto done;
1773 	}
1774 	pipe_cfg_addr = pcie_state_targ_addr +
1775 			  offsetof(struct pcie_state_s,
1776 			  pipe_cfg_addr);
1777 	rv = hif_diag_read_access(hif_hdl,
1778 			  pipe_cfg_addr,
1779 			  &pipe_cfg_targ_addr);
1780 	if (rv != QDF_STATUS_SUCCESS) {
1781 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1782 			__func__, pipe_cfg_addr, rv);
1783 		goto done;
1784 	}
1785 	if (pipe_cfg_targ_addr == 0) {
1786 		rv = QDF_STATUS_E_FAILURE;
1787 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1788 		goto done;
1789 	}
1790 
1791 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1792 			(uint8_t *) target_ce_config,
1793 			target_ce_config_sz);
1794 
1795 	if (rv != QDF_STATUS_SUCCESS) {
1796 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1797 		goto done;
1798 	}
1799 
1800 	rv = hif_diag_read_access(hif_hdl,
1801 			  pcie_state_targ_addr +
1802 			  offsetof(struct pcie_state_s,
1803 			   svc_to_pipe_map),
1804 			  &svc_to_pipe_map);
1805 	if (rv != QDF_STATUS_SUCCESS) {
1806 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1807 		goto done;
1808 	}
1809 	if (svc_to_pipe_map == 0) {
1810 		rv = QDF_STATUS_E_FAILURE;
1811 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1812 		goto done;
1813 	}
1814 
1815 	rv = hif_diag_write_mem(hif_hdl,
1816 			svc_to_pipe_map,
1817 			(uint8_t *) target_service_to_ce_map,
1818 			target_service_to_ce_map_sz);
1819 	if (rv != QDF_STATUS_SUCCESS) {
1820 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1821 		goto done;
1822 	}
1823 
1824 	rv = hif_diag_read_access(hif_hdl,
1825 			pcie_state_targ_addr +
1826 			offsetof(struct pcie_state_s,
1827 			config_flags),
1828 			&pcie_config_flags);
1829 	if (rv != QDF_STATUS_SUCCESS) {
1830 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1831 		goto done;
1832 	}
1833 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1834 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1835 #else
1836 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1837 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1838 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1839 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1840 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1841 #endif
1842 	rv = hif_diag_write_mem(hif_hdl,
1843 			pcie_state_targ_addr +
1844 			offsetof(struct pcie_state_s,
1845 			config_flags),
1846 			(uint8_t *) &pcie_config_flags,
1847 			sizeof(pcie_config_flags));
1848 	if (rv != QDF_STATUS_SUCCESS) {
1849 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1850 		goto done;
1851 	}
1852 
1853 #ifndef QCA_WIFI_3_0
1854 	/* configure early allocation */
1855 	ealloc_targ_addr = hif_hia_item_address(target_type,
1856 						offsetof(
1857 						struct host_interest_s,
1858 						hi_early_alloc));
1859 
1860 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1861 			&ealloc_value);
1862 	if (rv != QDF_STATUS_SUCCESS) {
1863 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1864 		goto done;
1865 	}
1866 
1867 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1868 	ealloc_value |=
1869 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1870 		 HI_EARLY_ALLOC_MAGIC_MASK);
1871 
1872 	rv = hif_diag_read_access(hif_hdl,
1873 			  CHIP_ID_ADDRESS |
1874 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1875 	if (rv != QDF_STATUS_SUCCESS) {
1876 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1877 		goto done;
1878 	}
1879 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1880 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1881 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1882 		case 0x2:       /* ROME 1.3 */
1883 			/* 2 banks are switched to IRAM */
1884 			banks_switched = 2;
1885 			break;
1886 		case 0x4:       /* ROME 2.1 */
1887 		case 0x5:       /* ROME 2.2 */
1888 			banks_switched = 6;
1889 			break;
1890 		case 0x8:       /* ROME 3.0 */
1891 		case 0x9:       /* ROME 3.1 */
1892 		case 0xA:       /* ROME 3.2 */
1893 			banks_switched = 9;
1894 			break;
1895 		case 0x0:       /* ROME 1.0 */
1896 		case 0x1:       /* ROME 1.1 */
1897 		default:
1898 			/* 3 banks are switched to IRAM */
1899 			banks_switched = 3;
1900 			break;
1901 		}
1902 	}
1903 
1904 	ealloc_value |=
1905 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1906 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1907 
1908 	rv = hif_diag_write_access(hif_hdl,
1909 				ealloc_targ_addr,
1910 				ealloc_value);
1911 	if (rv != QDF_STATUS_SUCCESS) {
1912 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1913 		goto done;
1914 	}
1915 #endif
1916 	if ((target_type == TARGET_TYPE_AR900B)
1917 			|| (target_type == TARGET_TYPE_QCA9984)
1918 			|| (target_type == TARGET_TYPE_QCA9888)
1919 			|| (target_type == TARGET_TYPE_AR9888)) {
1920 		hif_set_hia_extnd(scn);
1921 	}
1922 
1923 	/* Tell Target to proceed with initialization */
1924 	flag2_targ_addr = hif_hia_item_address(target_type,
1925 						offsetof(
1926 						struct host_interest_s,
1927 						hi_option_flag2));
1928 
1929 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1930 			  &flag2_value);
1931 	if (rv != QDF_STATUS_SUCCESS) {
1932 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1933 		goto done;
1934 	}
1935 
1936 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1937 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1938 			   flag2_value);
1939 	if (rv != QDF_STATUS_SUCCESS) {
1940 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1941 		goto done;
1942 	}
1943 
1944 	hif_wake_target_cpu(scn);
1945 
1946 done:
1947 
1948 	return rv;
1949 }
1950 
1951 /**
1952  * hif_bus_configure() - configure the pcie bus
1953  * @hif_sc: pointer to the hif context.
1954  *
1955  * return: 0 for success. nonzero for failure.
1956  */
1957 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1958 {
1959 	int status = 0;
1960 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1961 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1962 
1963 	hif_ce_prepare_config(hif_sc);
1964 
1965 	/* initialize sleep state adjust variables */
1966 	hif_state->sleep_timer_init = true;
1967 	hif_state->keep_awake_count = 0;
1968 	hif_state->fake_sleep = false;
1969 	hif_state->sleep_ticks = 0;
1970 
1971 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1972 			       hif_sleep_entry, (void *)hif_state,
1973 			       QDF_TIMER_TYPE_WAKE_APPS);
1974 	hif_state->sleep_timer_init = true;
1975 
1976 	status = hif_wlan_enable(hif_sc);
1977 	if (status) {
1978 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1979 			  __func__, status);
1980 		goto timer_free;
1981 	}
1982 
1983 	A_TARGET_ACCESS_LIKELY(hif_sc);
1984 
1985 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1986 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1987 	    !ce_srng_based(hif_sc)) {
1988 		/*
1989 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1990 		 * prevent sleep when we want to keep firmware always awake
1991 		 * note: when we want to keep firmware always awake,
1992 		 *       hif_target_sleep_state_adjust will point to a dummy
1993 		 *       function, and hif_pci_target_sleep_state_adjust must
1994 		 *       be called instead.
1995 		 * note: bus type check is here because AHB bus is reusing
1996 		 *       hif_pci_bus_configure code.
1997 		 */
1998 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1999 			if (hif_pci_target_sleep_state_adjust(hif_sc,
2000 					false, true) < 0) {
2001 				status = -EACCES;
2002 				goto disable_wlan;
2003 			}
2004 		}
2005 	}
2006 
2007 	/* todo: consider replacing this with an srng field */
2008 	if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2009 			(hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
2010 		hif_sc->per_ce_irq = true;
2011 	}
2012 
2013 	status = hif_config_ce(hif_sc);
2014 	if (status)
2015 		goto disable_wlan;
2016 
2017 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
2018 	if (hif_needs_bmi(hif_osc)) {
2019 		status = hif_set_hia(hif_sc);
2020 		if (status)
2021 			goto unconfig_ce;
2022 
2023 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2024 
2025 		hif_register_bmi_callbacks(hif_sc);
2026 	}
2027 
2028 	if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2029 			(hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2030 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2031 						__func__);
2032 	else {
2033 		status = hif_configure_irq(hif_sc);
2034 		if (status < 0)
2035 			goto unconfig_ce;
2036 	}
2037 
2038 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2039 
2040 	return status;
2041 
2042 unconfig_ce:
2043 	hif_unconfig_ce(hif_sc);
2044 disable_wlan:
2045 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2046 	hif_wlan_disable(hif_sc);
2047 
2048 timer_free:
2049 	qdf_timer_stop(&hif_state->sleep_timer);
2050 	qdf_timer_free(&hif_state->sleep_timer);
2051 	hif_state->sleep_timer_init = false;
2052 
2053 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2054 	return status;
2055 }
2056 
2057 /**
2058  * hif_bus_close(): hif_bus_close
2059  *
2060  * Return: n/a
2061  */
2062 void hif_pci_close(struct hif_softc *hif_sc)
2063 {
2064 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2065 
2066 	hif_pm_runtime_close(hif_pci_sc);
2067 	hif_ce_close(hif_sc);
2068 }
2069 
2070 #define BAR_NUM 0
2071 
2072 #ifndef CONFIG_PLD_PCIE_INIT
2073 static int hif_enable_pci(struct hif_pci_softc *sc,
2074 			  struct pci_dev *pdev,
2075 			  const struct pci_device_id *id)
2076 {
2077 	void __iomem *mem;
2078 	int ret = 0;
2079 	uint16_t device_id = 0;
2080 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2081 
2082 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2083 	if (device_id != id->device)  {
2084 		HIF_ERROR(
2085 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2086 		   __func__, device_id, id->device);
2087 		/* pci link is down, so returing with error code */
2088 		return -EIO;
2089 	}
2090 
2091 	/* FIXME: temp. commenting out assign_resource
2092 	 * call for dev_attach to work on 2.6.38 kernel
2093 	 */
2094 #if (!defined(__LINUX_ARM_ARCH__))
2095 	if (pci_assign_resource(pdev, BAR_NUM)) {
2096 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2097 		return -EIO;
2098 	}
2099 #endif
2100 	if (pci_enable_device(pdev)) {
2101 		HIF_ERROR("%s: pci_enable_device error",
2102 			   __func__);
2103 		return -EIO;
2104 	}
2105 
2106 	/* Request MMIO resources */
2107 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2108 	if (ret) {
2109 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2110 		ret = -EIO;
2111 		goto err_region;
2112 	}
2113 
2114 #ifdef CONFIG_ARM_LPAE
2115 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2116 	 * for 32 bits device also.
2117 	 */
2118 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2119 	if (ret) {
2120 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2121 		goto err_dma;
2122 	}
2123 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2124 	if (ret) {
2125 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2126 		goto err_dma;
2127 	}
2128 #else
2129 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2130 	if (ret) {
2131 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2132 		goto err_dma;
2133 	}
2134 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2135 	if (ret) {
2136 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2137 			   __func__);
2138 		goto err_dma;
2139 	}
2140 #endif
2141 
2142 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2143 
2144 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2145 	pci_set_master(pdev);
2146 
2147 	/* Arrange for access to Target SoC registers. */
2148 	mem = pci_iomap(pdev, BAR_NUM, 0);
2149 	if (!mem) {
2150 		HIF_ERROR("%s: PCI iomap error", __func__);
2151 		ret = -EIO;
2152 		goto err_iomap;
2153 	}
2154 
2155 	pr_err("*****BAR is %pK\n", mem);
2156 
2157 	sc->mem = mem;
2158 
2159 	HIF_INFO("%s, mem after pci_iomap:%pK\n",
2160 	       __func__, sc->mem);
2161 
2162 	/* Hawkeye emulation specific change */
2163 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2164 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2165 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2166 		(device_id == RUMIM2M_DEVICE_ID_NODE3)) {
2167 		mem = mem + 0x0c000000;
2168 		sc->mem = mem;
2169 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2170 			__func__, sc->mem);
2171 	}
2172 
2173 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2174 	ol_sc->mem = mem;
2175 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2176 	sc->pci_enabled = true;
2177 	return ret;
2178 
2179 err_iomap:
2180 	pci_clear_master(pdev);
2181 err_dma:
2182 	pci_release_region(pdev, BAR_NUM);
2183 err_region:
2184 	pci_disable_device(pdev);
2185 	return ret;
2186 }
2187 #else
2188 static int hif_enable_pci(struct hif_pci_softc *sc,
2189 			  struct pci_dev *pdev,
2190 			  const struct pci_device_id *id)
2191 {
2192 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2193 	sc->pci_enabled = true;
2194 	return 0;
2195 }
2196 #endif
2197 
2198 
2199 #ifndef CONFIG_PLD_PCIE_INIT
2200 static inline void hif_pci_deinit(struct hif_pci_softc *sc)
2201 {
2202 	pci_iounmap(sc->pdev, sc->mem);
2203 	pci_clear_master(sc->pdev);
2204 	pci_release_region(sc->pdev, BAR_NUM);
2205 	pci_disable_device(sc->pdev);
2206 }
2207 #else
2208 static inline void hif_pci_deinit(struct hif_pci_softc *sc) {}
2209 #endif
2210 
2211 static void hif_disable_pci(struct hif_pci_softc *sc)
2212 {
2213 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2214 
2215 	if (ol_sc == NULL) {
2216 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2217 		return;
2218 	}
2219 	hif_pci_device_reset(sc);
2220 
2221 	hif_pci_deinit(sc);
2222 
2223 	sc->mem = NULL;
2224 	ol_sc->mem = NULL;
2225 }
2226 
2227 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2228 {
2229 	int ret = 0;
2230 	int targ_awake_limit = 500;
2231 #ifndef QCA_WIFI_3_0
2232 	uint32_t fw_indicator;
2233 #endif
2234 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2235 
2236 	/*
2237 	 * Verify that the Target was started cleanly.*
2238 	 * The case where this is most likely is with an AUX-powered
2239 	 * Target and a Host in WoW mode. If the Host crashes,
2240 	 * loses power, or is restarted (without unloading the driver)
2241 	 * then the Target is left (aux) powered and running.  On a
2242 	 * subsequent driver load, the Target is in an unexpected state.
2243 	 * We try to catch that here in order to reset the Target and
2244 	 * retry the probe.
2245 	 */
2246 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2247 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2248 	while (!hif_targ_is_awake(scn, sc->mem)) {
2249 		if (0 == targ_awake_limit) {
2250 			HIF_ERROR("%s: target awake timeout", __func__);
2251 			ret = -EAGAIN;
2252 			goto end;
2253 		}
2254 		qdf_mdelay(1);
2255 		targ_awake_limit--;
2256 	}
2257 
2258 #if PCIE_BAR0_READY_CHECKING
2259 	{
2260 		int wait_limit = 200;
2261 		/* Synchronization point: wait the BAR0 is configured */
2262 		while (wait_limit-- &&
2263 			   !(hif_read32_mb(sc->mem +
2264 					  PCIE_LOCAL_BASE_ADDRESS +
2265 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2266 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2267 			qdf_mdelay(10);
2268 		}
2269 		if (wait_limit < 0) {
2270 			/* AR6320v1 doesn't support checking of BAR0
2271 			 * configuration, takes one sec to wait BAR0 ready
2272 			 */
2273 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2274 				    __func__);
2275 		}
2276 	}
2277 #endif
2278 
2279 #ifndef QCA_WIFI_3_0
2280 	fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS);
2281 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2282 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2283 
2284 	if (fw_indicator & FW_IND_INITIALIZED) {
2285 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2286 			   __func__);
2287 		ret = -EAGAIN;
2288 		goto end;
2289 	}
2290 #endif
2291 
2292 end:
2293 	return ret;
2294 }
2295 
2296 static void wlan_tasklet_msi(unsigned long data)
2297 {
2298 	struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
2299 	struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
2300 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2301 
2302 	if (scn->hif_init_done == false)
2303 		goto irq_handled;
2304 
2305 	if (qdf_atomic_read(&scn->link_suspended))
2306 		goto irq_handled;
2307 
2308 	qdf_atomic_inc(&scn->active_tasklet_cnt);
2309 
2310 	if (entry->id == HIF_MAX_TASKLET_NUM) {
2311 		/* the last tasklet is for fw IRQ */
2312 		(irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
2313 		if (scn->target_status == TARGET_STATUS_RESET)
2314 			goto irq_handled;
2315 	} else if (entry->id < scn->ce_count) {
2316 		ce_per_engine_service(scn, entry->id);
2317 	} else {
2318 		HIF_ERROR("%s: ERROR - invalid CE_id = %d",
2319 		       __func__, entry->id);
2320 	}
2321 	return;
2322 
2323 irq_handled:
2324 	qdf_atomic_dec(&scn->active_tasklet_cnt);
2325 
2326 }
2327 
2328 /* deprecated */
2329 static int hif_configure_msi(struct hif_pci_softc *sc)
2330 {
2331 	int ret = 0;
2332 	int num_msi_desired;
2333 	int rv = -1;
2334 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2335 
2336 	HIF_TRACE("%s: E", __func__);
2337 
2338 	num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
2339 	if (num_msi_desired < 1) {
2340 		HIF_ERROR("%s: MSI is not configured", __func__);
2341 		return -EINVAL;
2342 	}
2343 
2344 	if (num_msi_desired > 1) {
2345 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
2346 		rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
2347 						num_msi_desired);
2348 #else
2349 		rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
2350 #endif
2351 	}
2352 	HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
2353 		  __func__, num_msi_desired, rv);
2354 
2355 	if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
2356 		int i;
2357 
2358 		sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
2359 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
2360 			(void *)sc;
2361 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
2362 			HIF_MAX_TASKLET_NUM;
2363 		tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2364 			 (unsigned long)&sc->tasklet_entries[
2365 			 HIF_MAX_TASKLET_NUM-1]);
2366 		ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
2367 				  hif_pci_msi_fw_handler,
2368 				  IRQF_SHARED, "wlan_pci", sc);
2369 		if (ret) {
2370 			HIF_ERROR("%s: request_irq failed", __func__);
2371 			goto err_intr;
2372 		}
2373 		for (i = 0; i <= scn->ce_count; i++) {
2374 			sc->tasklet_entries[i].hif_handler = (void *)sc;
2375 			sc->tasklet_entries[i].id = i;
2376 			tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2377 				 (unsigned long)&sc->tasklet_entries[i]);
2378 			ret = request_irq((sc->pdev->irq +
2379 					   i + MSI_ASSIGN_CE_INITIAL),
2380 					  ce_per_engine_handler, IRQF_SHARED,
2381 					  "wlan_pci", sc);
2382 			if (ret) {
2383 				HIF_ERROR("%s: request_irq failed", __func__);
2384 				goto err_intr;
2385 			}
2386 		}
2387 	} else if (rv > 0) {
2388 		HIF_TRACE("%s: use single msi", __func__);
2389 
2390 		ret = pci_enable_msi(sc->pdev);
2391 		if (ret < 0) {
2392 			HIF_ERROR("%s: single MSI allocation failed",
2393 				  __func__);
2394 			/* Try for legacy PCI line interrupts */
2395 			sc->num_msi_intrs = 0;
2396 		} else {
2397 			sc->num_msi_intrs = 1;
2398 			tasklet_init(&sc->intr_tq,
2399 				wlan_tasklet, (unsigned long)sc);
2400 			ret = request_irq(sc->pdev->irq,
2401 					 hif_pci_legacy_ce_interrupt_handler,
2402 					  IRQF_SHARED, "wlan_pci", sc);
2403 			if (ret) {
2404 				HIF_ERROR("%s: request_irq failed", __func__);
2405 				goto err_intr;
2406 			}
2407 		}
2408 	} else {
2409 		sc->num_msi_intrs = 0;
2410 		ret = -EIO;
2411 		HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
2412 	}
2413 	ret = pci_enable_msi(sc->pdev);
2414 	if (ret < 0) {
2415 		HIF_ERROR("%s: single MSI interrupt allocation failed",
2416 			  __func__);
2417 		/* Try for legacy PCI line interrupts */
2418 		sc->num_msi_intrs = 0;
2419 	} else {
2420 		sc->num_msi_intrs = 1;
2421 		tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2422 		ret = request_irq(sc->pdev->irq,
2423 				  hif_pci_legacy_ce_interrupt_handler,
2424 				  IRQF_SHARED, "wlan_pci", sc);
2425 		if (ret) {
2426 			HIF_ERROR("%s: request_irq failed", __func__);
2427 			goto err_intr;
2428 		}
2429 	}
2430 
2431 	if (ret == 0) {
2432 		hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2433 			  PCIE_INTR_ENABLE_ADDRESS),
2434 			  HOST_GROUP0_MASK);
2435 		hif_write32_mb(sc->mem +
2436 			  PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
2437 			  PCIE_SOC_WAKE_RESET);
2438 	}
2439 	HIF_TRACE("%s: X, ret = %d", __func__, ret);
2440 
2441 	return ret;
2442 
2443 err_intr:
2444 	if (sc->num_msi_intrs >= 1)
2445 		pci_disable_msi(sc->pdev);
2446 	return ret;
2447 }
2448 
2449 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2450 {
2451 	int ret = 0;
2452 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2453 	uint32_t target_type = scn->target_info.target_type;
2454 
2455 	HIF_TRACE("%s: E", __func__);
2456 
2457 	/* do notn support MSI or MSI IRQ failed */
2458 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2459 	ret = request_irq(sc->pdev->irq,
2460 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2461 			  "wlan_pci", sc);
2462 	if (ret) {
2463 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2464 		goto end;
2465 	}
2466 	scn->wake_irq = sc->pdev->irq;
2467 	/* Use sc->irq instead of sc->pdev-irq
2468 	 * platform_device pdev doesn't have an irq field
2469 	 */
2470 	sc->irq = sc->pdev->irq;
2471 	/* Use Legacy PCI Interrupts */
2472 	hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2473 		  PCIE_INTR_ENABLE_ADDRESS),
2474 		  HOST_GROUP0_MASK);
2475 	hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2476 			       PCIE_INTR_ENABLE_ADDRESS));
2477 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2478 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2479 
2480 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2481 			(target_type == TARGET_TYPE_AR900B)  ||
2482 			(target_type == TARGET_TYPE_QCA9984) ||
2483 			(target_type == TARGET_TYPE_AR9888) ||
2484 			(target_type == TARGET_TYPE_QCA9888) ||
2485 			(target_type == TARGET_TYPE_AR6320V1) ||
2486 			(target_type == TARGET_TYPE_AR6320V2) ||
2487 			(target_type == TARGET_TYPE_AR6320V3)) {
2488 		hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2489 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2490 	}
2491 end:
2492 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2493 			  "%s: X, ret = %d", __func__, ret);
2494 	return ret;
2495 }
2496 
2497 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2498 {
2499 	int ret;
2500 	int ce_id, irq;
2501 	uint32_t msi_data_start;
2502 	uint32_t msi_data_count;
2503 	uint32_t msi_irq_start;
2504 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2505 
2506 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2507 					    &msi_data_count, &msi_data_start,
2508 					    &msi_irq_start);
2509 	if (ret)
2510 		return ret;
2511 
2512 	/* needs to match the ce_id -> irq data mapping
2513 	 * used in the srng parameter configuration
2514 	 */
2515 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2516 		unsigned int msi_data;
2517 
2518 		if (!ce_sc->tasklets[ce_id].inited)
2519 			continue;
2520 
2521 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2522 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2523 
2524 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2525 			  ce_id, msi_data, irq);
2526 
2527 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2528 	}
2529 
2530 	return ret;
2531 }
2532 
2533 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2534 {
2535 	int i, j, irq;
2536 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2537 	struct hif_exec_context *hif_ext_group;
2538 
2539 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2540 		hif_ext_group = hif_state->hif_ext_group[i];
2541 		if (hif_ext_group->irq_requested) {
2542 			hif_ext_group->irq_requested = false;
2543 			for (j = 0; j < hif_ext_group->numirq; j++) {
2544 				irq = hif_ext_group->os_irq[j];
2545 				free_irq(irq, hif_ext_group);
2546 			}
2547 			hif_ext_group->numirq = 0;
2548 		}
2549 	}
2550 }
2551 
2552 /**
2553  * hif_nointrs(): disable IRQ
2554  *
2555  * This function stops interrupt(s)
2556  *
2557  * @scn: struct hif_softc
2558  *
2559  * Return: none
2560  */
2561 void hif_pci_nointrs(struct hif_softc *scn)
2562 {
2563 	int i, ret;
2564 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2565 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2566 
2567 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2568 
2569 	if (scn->request_irq_done == false)
2570 		return;
2571 
2572 	hif_pci_deconfigure_grp_irq(scn);
2573 
2574 	ret = hif_ce_srng_msi_free_irq(scn);
2575 	if (ret != -EINVAL) {
2576 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2577 
2578 		if (scn->wake_irq)
2579 			free_irq(scn->wake_irq, scn);
2580 		scn->wake_irq = 0;
2581 	} else if (sc->num_msi_intrs > 0) {
2582 		/* MSI interrupt(s) */
2583 		for (i = 0; i < sc->num_msi_intrs; i++)
2584 			free_irq(sc->irq + i, sc);
2585 		sc->num_msi_intrs = 0;
2586 	} else {
2587 		/* Legacy PCI line interrupt
2588 		 * Use sc->irq instead of sc->pdev-irq
2589 		 * platform_device pdev doesn't have an irq field
2590 		 */
2591 		free_irq(sc->irq, sc);
2592 	}
2593 	scn->request_irq_done = false;
2594 }
2595 
2596 /**
2597  * hif_disable_bus(): hif_disable_bus
2598  *
2599  * This function disables the bus
2600  *
2601  * @bdev: bus dev
2602  *
2603  * Return: none
2604  */
2605 void hif_pci_disable_bus(struct hif_softc *scn)
2606 {
2607 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2608 	struct pci_dev *pdev;
2609 	void __iomem *mem;
2610 	struct hif_target_info *tgt_info = &scn->target_info;
2611 
2612 	/* Attach did not succeed, all resources have been
2613 	 * freed in error handler
2614 	 */
2615 	if (!sc)
2616 		return;
2617 
2618 	pdev = sc->pdev;
2619 	if (ADRASTEA_BU) {
2620 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2621 
2622 		hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2623 		hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS,
2624 			       HOST_GROUP0_MASK);
2625 	}
2626 
2627 #if defined(CPU_WARM_RESET_WAR)
2628 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2629 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2630 	 * verified for AR9888_REV1
2631 	 */
2632 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2633 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2634 		hif_pci_device_warm_reset(sc);
2635 	else
2636 		hif_pci_device_reset(sc);
2637 #else
2638 	hif_pci_device_reset(sc);
2639 #endif
2640 	mem = (void __iomem *)sc->mem;
2641 	if (mem) {
2642 #ifndef CONFIG_PLD_PCIE_INIT
2643 		pci_disable_msi(pdev);
2644 #endif
2645 		hif_dump_pipe_debug_count(scn);
2646 		if (scn->athdiag_procfs_inited) {
2647 			athdiag_procfs_remove();
2648 			scn->athdiag_procfs_inited = false;
2649 		}
2650 		hif_pci_deinit(sc);
2651 		scn->mem = NULL;
2652 	}
2653 	HIF_INFO("%s: X", __func__);
2654 }
2655 
2656 #define OL_ATH_PCI_PM_CONTROL 0x44
2657 
2658 #ifdef FEATURE_RUNTIME_PM
2659 /**
2660  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occuring
2661  * @scn: hif context
2662  * @flag: prevent linkdown if true otherwise allow
2663  *
2664  * this api should only be called as part of bus prevent linkdown
2665  */
2666 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2667 {
2668 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2669 
2670 	if (flag)
2671 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2672 	else
2673 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2674 }
2675 #else
2676 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2677 {
2678 }
2679 #endif
2680 
2681 #if defined(CONFIG_PCI_MSM)
2682 /**
2683  * hif_bus_prevent_linkdown(): allow or permit linkdown
2684  * @flag: true prevents linkdown, false allows
2685  *
2686  * Calls into the platform driver to vote against taking down the
2687  * pcie link.
2688  *
2689  * Return: n/a
2690  */
2691 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2692 {
2693 	int errno;
2694 
2695 	HIF_DBG("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2696 	hif_runtime_prevent_linkdown(scn, flag);
2697 
2698 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2699 	if (errno)
2700 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2701 			  __func__, errno);
2702 }
2703 #else
2704 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2705 {
2706 	HIF_DBG("wlan: %s pcie power collapse",
2707 			(flag ? "disable" : "enable"));
2708 	hif_runtime_prevent_linkdown(scn, flag);
2709 }
2710 #endif
2711 
2712 static int hif_mark_wake_irq_wakeable(struct hif_softc *scn)
2713 {
2714 	int errno;
2715 
2716 	errno = enable_irq_wake(scn->wake_irq);
2717 	if (errno) {
2718 		HIF_ERROR("%s: Failed to mark wake IRQ: %d", __func__, errno);
2719 		return errno;
2720 	}
2721 
2722 	return 0;
2723 }
2724 
2725 /**
2726  * hif_pci_bus_suspend(): prepare hif for suspend
2727  *
2728  * Enables pci bus wake irq based on link suspend voting.
2729  *
2730  * Return: 0 for success and non-zero error code for failure
2731  */
2732 int hif_pci_bus_suspend(struct hif_softc *scn)
2733 {
2734 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2735 		return 0;
2736 
2737 	/* pci link is staying up; enable wake irq */
2738 	return hif_mark_wake_irq_wakeable(scn);
2739 }
2740 
2741 /**
2742  * __hif_check_link_status() - API to check if PCIe link is active/not
2743  * @scn: HIF Context
2744  *
2745  * API reads the PCIe config space to verify if PCIe link training is
2746  * successful or not.
2747  *
2748  * Return: Success/Failure
2749  */
2750 static int __hif_check_link_status(struct hif_softc *scn)
2751 {
2752 	uint16_t dev_id = 0;
2753 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2754 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2755 
2756 	if (!sc) {
2757 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2758 		return -EINVAL;
2759 	}
2760 
2761 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2762 
2763 	if (dev_id == sc->devid)
2764 		return 0;
2765 
2766 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2767 	       __func__, dev_id);
2768 
2769 	scn->recovery = true;
2770 
2771 	if (cbk && cbk->set_recovery_in_progress)
2772 		cbk->set_recovery_in_progress(cbk->context, true);
2773 	else
2774 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2775 
2776 	pld_is_pci_link_down(sc->dev);
2777 	return -EACCES;
2778 }
2779 
2780 static int hif_unmark_wake_irq_wakeable(struct hif_softc *scn)
2781 {
2782 	int errno;
2783 
2784 	errno = disable_irq_wake(scn->wake_irq);
2785 	if (errno) {
2786 		HIF_ERROR("%s: Failed to unmark wake IRQ: %d", __func__, errno);
2787 		return errno;
2788 	}
2789 
2790 	return 0;
2791 }
2792 
2793 /**
2794  * hif_pci_bus_resume(): prepare hif for resume
2795  *
2796  * Disables pci bus wake irq based on link suspend voting.
2797  *
2798  * Return: 0 for success and non-zero error code for failure
2799  */
2800 int hif_pci_bus_resume(struct hif_softc *scn)
2801 {
2802 	int ret;
2803 
2804 	ret = __hif_check_link_status(scn);
2805 	if (ret)
2806 		return ret;
2807 
2808 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2809 		return 0;
2810 
2811 	/* pci link is up; disable wake irq */
2812 	return hif_unmark_wake_irq_wakeable(scn);
2813 }
2814 
2815 /**
2816  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2817  * @scn: hif context
2818  *
2819  * Ensure that if we received the wakeup message before the irq
2820  * was disabled that the message is pocessed before suspending.
2821  *
2822  * Return: -EBUSY if we fail to flush the tasklets.
2823  */
2824 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2825 {
2826 	if (hif_drain_tasklets(scn) != 0)
2827 		return -EBUSY;
2828 
2829 	/* Stop the HIF Sleep Timer */
2830 	hif_cancel_deferred_target_sleep(scn);
2831 
2832 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2833 		qdf_atomic_set(&scn->link_suspended, 1);
2834 
2835 	return 0;
2836 }
2837 
2838 /**
2839  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2840  * @scn: hif context
2841  *
2842  * Ensure that if we received the wakeup message before the irq
2843  * was disabled that the message is pocessed before suspending.
2844  *
2845  * Return: -EBUSY if we fail to flush the tasklets.
2846  */
2847 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2848 {
2849 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2850 		qdf_atomic_set(&scn->link_suspended, 0);
2851 
2852 	return 0;
2853 }
2854 
2855 #ifdef FEATURE_RUNTIME_PM
2856 /**
2857  * __hif_runtime_pm_set_state(): utility function
2858  * @state: state to set
2859  *
2860  * indexes into the runtime pm state and sets it.
2861  */
2862 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2863 				enum hif_pm_runtime_state state)
2864 {
2865 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2866 
2867 	if (NULL == sc) {
2868 		HIF_ERROR("%s: HIF_CTX not initialized",
2869 		       __func__);
2870 		return;
2871 	}
2872 
2873 	qdf_atomic_set(&sc->pm_state, state);
2874 }
2875 
2876 /**
2877  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2878  *
2879  * Notify hif that a runtime pm opperation has started
2880  */
2881 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2882 {
2883 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2884 }
2885 
2886 /**
2887  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2888  *
2889  * Notify hif that a the runtime pm state should be on
2890  */
2891 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2892 {
2893 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2894 }
2895 
2896 /**
2897  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2898  *
2899  * Notify hif that a runtime suspend attempt has been completed successfully
2900  */
2901 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2902 {
2903 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2904 }
2905 
2906 /**
2907  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2908  */
2909 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2910 {
2911 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2912 
2913 	if (sc == NULL)
2914 		return;
2915 
2916 	sc->pm_stats.suspended++;
2917 	sc->pm_stats.suspend_jiffies = jiffies;
2918 }
2919 
2920 /**
2921  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2922  *
2923  * log a failed runtime suspend
2924  * mark last busy to prevent immediate runtime suspend
2925  */
2926 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2927 {
2928 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2929 
2930 	if (sc == NULL)
2931 		return;
2932 
2933 	sc->pm_stats.suspend_err++;
2934 }
2935 
2936 /**
2937  * hif_log_runtime_resume_success() - log a successful runtime resume
2938  *
2939  * log a successfull runtime resume
2940  * mark last busy to prevent immediate runtime suspend
2941  */
2942 static void hif_log_runtime_resume_success(void *hif_ctx)
2943 {
2944 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2945 
2946 	if (sc == NULL)
2947 		return;
2948 
2949 	sc->pm_stats.resumed++;
2950 }
2951 
2952 /**
2953  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2954  *
2955  * Record the failure.
2956  * mark last busy to delay a retry.
2957  * adjust the runtime_pm state.
2958  */
2959 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2960 {
2961 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2962 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2963 
2964 	hif_log_runtime_suspend_failure(hif_ctx);
2965 	if (hif_pci_sc != NULL)
2966 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2967 	hif_runtime_pm_set_state_on(scn);
2968 }
2969 
2970 /**
2971  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2972  *
2973  * Makes sure that the pci link will be taken down by the suspend opperation.
2974  * If the hif layer is configured to leave the bus on, runtime suspend will
2975  * not save any power.
2976  *
2977  * Set the runtime suspend state to in progress.
2978  *
2979  * return -EINVAL if the bus won't go down.  otherwise return 0
2980  */
2981 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2982 {
2983 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2984 
2985 	if (!hif_can_suspend_link(hif_ctx)) {
2986 		HIF_ERROR("Runtime PM not supported for link up suspend");
2987 		return -EINVAL;
2988 	}
2989 
2990 	hif_runtime_pm_set_state_inprogress(scn);
2991 	return 0;
2992 }
2993 
2994 /**
2995  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
2996  *
2997  * Record the success.
2998  * adjust the runtime_pm state
2999  */
3000 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
3001 {
3002 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3003 
3004 	hif_runtime_pm_set_state_suspended(scn);
3005 	hif_log_runtime_suspend_success(scn);
3006 }
3007 
3008 /**
3009  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
3010  *
3011  * update the runtime pm state.
3012  */
3013 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
3014 {
3015 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3016 
3017 	hif_runtime_pm_set_state_inprogress(scn);
3018 }
3019 
3020 /**
3021  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
3022  *
3023  * record the success.
3024  * adjust the runtime_pm state
3025  */
3026 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
3027 {
3028 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
3029 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3030 
3031 	hif_log_runtime_resume_success(hif_ctx);
3032 	if (hif_pci_sc != NULL)
3033 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
3034 	hif_runtime_pm_set_state_on(scn);
3035 }
3036 
3037 /**
3038  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
3039  *
3040  * Return: 0 for success and non-zero error code for failure
3041  */
3042 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
3043 {
3044 	int errno;
3045 
3046 	errno = hif_bus_suspend(hif_ctx);
3047 	if (errno) {
3048 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
3049 		return errno;
3050 	}
3051 
3052 	errno = hif_apps_irqs_disable(hif_ctx);
3053 	if (errno) {
3054 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
3055 		goto bus_resume;
3056 	}
3057 
3058 	errno = hif_bus_suspend_noirq(hif_ctx);
3059 	if (errno) {
3060 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
3061 		goto irqs_enable;
3062 	}
3063 
3064 	/* link should always be down; skip enable wake irq */
3065 
3066 	return 0;
3067 
3068 irqs_enable:
3069 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3070 
3071 bus_resume:
3072 	QDF_BUG(!hif_bus_resume(hif_ctx));
3073 
3074 	return errno;
3075 }
3076 
3077 /**
3078  * hif_fastpath_resume() - resume fastpath for runtimepm
3079  *
3080  * ensure that the fastpath write index register is up to date
3081  * since runtime pm may cause ce_send_fast to skip the register
3082  * write.
3083  *
3084  * fastpath only applicable to legacy copy engine
3085  */
3086 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
3087 {
3088 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3089 	struct CE_state *ce_state;
3090 
3091 	if (!scn)
3092 		return;
3093 
3094 	if (scn->fastpath_mode_on) {
3095 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3096 			return;
3097 
3098 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
3099 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
3100 
3101 		/*war_ce_src_ring_write_idx_set */
3102 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
3103 				ce_state->src_ring->write_index);
3104 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
3105 		Q_TARGET_ACCESS_END(scn);
3106 	}
3107 }
3108 
3109 /**
3110  * hif_runtime_resume() - do the bus resume part of a runtime resume
3111  *
3112  *  Return: 0 for success and non-zero error code for failure
3113  */
3114 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
3115 {
3116 	/* link should always be down; skip disable wake irq */
3117 
3118 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
3119 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3120 	QDF_BUG(!hif_bus_resume(hif_ctx));
3121 	return 0;
3122 }
3123 #endif /* #ifdef FEATURE_RUNTIME_PM */
3124 
3125 #if CONFIG_PCIE_64BIT_MSI
3126 static void hif_free_msi_ctx(struct hif_softc *scn)
3127 {
3128 	struct hif_pci_softc *sc = scn->hif_sc;
3129 	struct hif_msi_info *info = &sc->msi_info;
3130 	struct device *dev = scn->qdf_dev->dev;
3131 
3132 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
3133 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
3134 	info->magic = NULL;
3135 	info->magic_dma = 0;
3136 }
3137 #else
3138 static void hif_free_msi_ctx(struct hif_softc *scn)
3139 {
3140 }
3141 #endif
3142 
3143 void hif_pci_disable_isr(struct hif_softc *scn)
3144 {
3145 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3146 
3147 	hif_exec_kill(&scn->osc);
3148 	hif_nointrs(scn);
3149 	hif_free_msi_ctx(scn);
3150 	/* Cancel the pending tasklet */
3151 	ce_tasklet_kill(scn);
3152 	tasklet_kill(&sc->intr_tq);
3153 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
3154 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
3155 }
3156 
3157 /* Function to reset SoC */
3158 void hif_pci_reset_soc(struct hif_softc *hif_sc)
3159 {
3160 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
3161 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
3162 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
3163 
3164 #if defined(CPU_WARM_RESET_WAR)
3165 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
3166 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
3167 	 * verified for AR9888_REV1
3168 	 */
3169 	if (tgt_info->target_version == AR9888_REV2_VERSION)
3170 		hif_pci_device_warm_reset(sc);
3171 	else
3172 		hif_pci_device_reset(sc);
3173 #else
3174 	hif_pci_device_reset(sc);
3175 #endif
3176 }
3177 
3178 #ifdef CONFIG_PCI_MSM
3179 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
3180 {
3181 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
3182 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
3183 }
3184 #else
3185 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3186 #endif
3187 
3188 /**
3189  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3190  * @sc: HIF PCIe Context
3191  *
3192  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3193  *
3194  * Return: Failure to caller
3195  */
3196 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3197 {
3198 	uint16_t val = 0;
3199 	uint32_t bar = 0;
3200 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3201 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3202 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3203 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
3204 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
3205 	A_target_id_t pci_addr = scn->mem;
3206 
3207 	HIF_ERROR("%s: keep_awake_count = %d",
3208 			__func__, hif_state->keep_awake_count);
3209 
3210 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3211 
3212 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3213 
3214 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3215 
3216 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3217 
3218 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3219 
3220 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3221 
3222 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3223 
3224 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3225 
3226 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3227 
3228 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3229 
3230 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3231 			hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3232 						PCIE_SOC_WAKE_ADDRESS));
3233 
3234 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3235 			hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3236 							RTC_STATE_ADDRESS));
3237 
3238 	HIF_ERROR("%s:error, wakeup target", __func__);
3239 	hif_msm_pcie_debug_info(sc);
3240 
3241 	if (!cfg->enable_self_recovery)
3242 		QDF_BUG(0);
3243 
3244 	scn->recovery = true;
3245 
3246 	if (cbk->set_recovery_in_progress)
3247 		cbk->set_recovery_in_progress(cbk->context, true);
3248 
3249 	pld_is_pci_link_down(sc->dev);
3250 	return -EACCES;
3251 }
3252 
3253 /*
3254  * For now, we use simple on-demand sleep/wake.
3255  * Some possible improvements:
3256  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3257  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3258  *   Careful, though, these functions may be used by
3259  *  interrupt handlers ("atomic")
3260  *  -Don't use host_reg_table for this code; instead use values directly
3261  *  -Use a separate timer to track activity and allow Target to sleep only
3262  *   if it hasn't done anything for a while; may even want to delay some
3263  *   processing for a short while in order to "batch" (e.g.) transmit
3264  *   requests with completion processing into "windows of up time".  Costs
3265  *   some performance, but improves power utilization.
3266  *  -On some platforms, it might be possible to eliminate explicit
3267  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3268  *   recover from the failure by forcing the Target awake.
3269  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3270  *   overhead in some cases. Perhaps this makes more sense when
3271  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3272  *   disabled.
3273  *  -It is possible to compile this code out and simply force the Target
3274  *   to remain awake.  That would yield optimal performance at the cost of
3275  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3276  *
3277  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3278  */
3279 /**
3280  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3281  * @scn: hif_softc pointer.
3282  * @sleep_ok: bool
3283  * @wait_for_it: bool
3284  *
3285  * Output the pipe error counts of each pipe to log file
3286  *
3287  * Return: int
3288  */
3289 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3290 			      bool sleep_ok, bool wait_for_it)
3291 {
3292 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3293 	A_target_id_t pci_addr = scn->mem;
3294 	static int max_delay;
3295 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3296 	static int debug;
3297 	if (scn->recovery)
3298 		return -EACCES;
3299 
3300 	if (qdf_atomic_read(&scn->link_suspended)) {
3301 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3302 		debug = true;
3303 		QDF_ASSERT(0);
3304 		return -EACCES;
3305 	}
3306 
3307 	if (debug) {
3308 		wait_for_it = true;
3309 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3310 				__func__);
3311 		QDF_ASSERT(0);
3312 	}
3313 
3314 	if (sleep_ok) {
3315 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3316 		hif_state->keep_awake_count--;
3317 		if (hif_state->keep_awake_count == 0) {
3318 			/* Allow sleep */
3319 			hif_state->verified_awake = false;
3320 			hif_state->sleep_ticks = qdf_system_ticks();
3321 		}
3322 		if (hif_state->fake_sleep == false) {
3323 			/* Set the Fake Sleep */
3324 			hif_state->fake_sleep = true;
3325 
3326 			/* Start the Sleep Timer */
3327 			qdf_timer_stop(&hif_state->sleep_timer);
3328 			qdf_timer_start(&hif_state->sleep_timer,
3329 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3330 		}
3331 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3332 	} else {
3333 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3334 
3335 		if (hif_state->fake_sleep) {
3336 			hif_state->verified_awake = true;
3337 		} else {
3338 			if (hif_state->keep_awake_count == 0) {
3339 				/* Force AWAKE */
3340 				hif_write32_mb(pci_addr +
3341 					      PCIE_LOCAL_BASE_ADDRESS +
3342 					      PCIE_SOC_WAKE_ADDRESS,
3343 					      PCIE_SOC_WAKE_V_MASK);
3344 			}
3345 		}
3346 		hif_state->keep_awake_count++;
3347 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3348 
3349 		if (wait_for_it && !hif_state->verified_awake) {
3350 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3351 			int tot_delay = 0;
3352 			int curr_delay = 5;
3353 
3354 			for (;; ) {
3355 				if (hif_targ_is_awake(scn, pci_addr)) {
3356 					hif_state->verified_awake = true;
3357 					break;
3358 				}
3359 				if (!hif_pci_targ_is_present(scn, pci_addr))
3360 					break;
3361 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3362 					return hif_log_soc_wakeup_timeout(sc);
3363 
3364 				OS_DELAY(curr_delay);
3365 				tot_delay += curr_delay;
3366 
3367 				if (curr_delay < 50)
3368 					curr_delay += 5;
3369 			}
3370 
3371 			/*
3372 			 * NB: If Target has to come out of Deep Sleep,
3373 			 * this may take a few Msecs. Typically, though
3374 			 * this delay should be <30us.
3375 			 */
3376 			if (tot_delay > max_delay)
3377 				max_delay = tot_delay;
3378 		}
3379 	}
3380 
3381 	if (debug && hif_state->verified_awake) {
3382 		debug = 0;
3383 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3384 			__func__,
3385 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3386 				PCIE_INTR_ENABLE_ADDRESS),
3387 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3388 				PCIE_INTR_CAUSE_ADDRESS),
3389 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3390 				CPU_INTR_ADDRESS),
3391 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3392 				PCIE_INTR_CLR_ADDRESS),
3393 			hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS +
3394 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3395 	}
3396 
3397 	return 0;
3398 }
3399 
3400 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3401 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3402 {
3403 	uint32_t value;
3404 	void *addr;
3405 
3406 	addr = scn->mem + offset;
3407 	value = hif_read32_mb(addr);
3408 
3409 	{
3410 		unsigned long irq_flags;
3411 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3412 
3413 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3414 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3415 		pcie_access_log[idx].is_write = false;
3416 		pcie_access_log[idx].addr = addr;
3417 		pcie_access_log[idx].value = value;
3418 		pcie_access_log_seqnum++;
3419 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3420 	}
3421 
3422 	return value;
3423 }
3424 
3425 void
3426 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3427 {
3428 	void *addr;
3429 
3430 	addr = scn->mem + (offset);
3431 	hif_write32_mb(addr, value);
3432 
3433 	{
3434 		unsigned long irq_flags;
3435 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3436 
3437 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3438 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3439 		pcie_access_log[idx].is_write = true;
3440 		pcie_access_log[idx].addr = addr;
3441 		pcie_access_log[idx].value = value;
3442 		pcie_access_log_seqnum++;
3443 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3444 	}
3445 }
3446 
3447 /**
3448  * hif_target_dump_access_log() - dump access log
3449  *
3450  * dump access log
3451  *
3452  * Return: n/a
3453  */
3454 void hif_target_dump_access_log(void)
3455 {
3456 	int idx, len, start_idx, cur_idx;
3457 	unsigned long irq_flags;
3458 
3459 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3460 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3461 		len = PCIE_ACCESS_LOG_NUM;
3462 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3463 	} else {
3464 		len = pcie_access_log_seqnum;
3465 		start_idx = 0;
3466 	}
3467 
3468 	for (idx = 0; idx < len; idx++) {
3469 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3470 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3471 		       __func__, idx,
3472 		       pcie_access_log[cur_idx].seqnum,
3473 		       pcie_access_log[cur_idx].is_write,
3474 		       pcie_access_log[cur_idx].addr,
3475 		       pcie_access_log[cur_idx].value);
3476 	}
3477 
3478 	pcie_access_log_seqnum = 0;
3479 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3480 }
3481 #endif
3482 
3483 #ifndef HIF_AHB
3484 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3485 {
3486 	QDF_BUG(0);
3487 	return -EINVAL;
3488 }
3489 
3490 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3491 {
3492 	QDF_BUG(0);
3493 	return -EINVAL;
3494 }
3495 #endif
3496 
3497 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3498 {
3499 	struct ce_tasklet_entry *tasklet_entry = context;
3500 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3501 }
3502 extern const char *ce_name[];
3503 
3504 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3505 {
3506 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3507 
3508 	return pci_scn->ce_msi_irq_num[ce_id];
3509 }
3510 
3511 /* hif_srng_msi_irq_disable() - disable the irq for msi
3512  * @hif_sc: hif context
3513  * @ce_id: which ce to disable copy complete interrupts for
3514  *
3515  * since MSI interrupts are not level based, the system can function
3516  * without disabling these interrupts.  Interrupt mitigation can be
3517  * added here for better system performance.
3518  */
3519 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3520 {
3521 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3522 }
3523 
3524 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3525 {
3526 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3527 }
3528 
3529 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3530 {}
3531 
3532 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3533 {}
3534 
3535 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3536 {
3537 	int ret;
3538 	int ce_id, irq;
3539 	uint32_t msi_data_start;
3540 	uint32_t msi_data_count;
3541 	uint32_t msi_irq_start;
3542 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3543 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3544 
3545 	/* do wake irq assignment */
3546 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3547 					  &msi_data_count, &msi_data_start,
3548 					  &msi_irq_start);
3549 	if (ret)
3550 		return ret;
3551 
3552 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3553 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3554 			  "wlan_wake_irq", scn);
3555 	if (ret)
3556 		return ret;
3557 
3558 	/* do ce irq assignments */
3559 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3560 					    &msi_data_count, &msi_data_start,
3561 					    &msi_irq_start);
3562 	if (ret)
3563 		goto free_wake_irq;
3564 
3565 	if (ce_srng_based(scn)) {
3566 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3567 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3568 	} else {
3569 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3570 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3571 	}
3572 
3573 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3574 
3575 	/* needs to match the ce_id -> irq data mapping
3576 	 * used in the srng parameter configuration
3577 	 */
3578 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3579 		unsigned int msi_data = (ce_id % msi_data_count) +
3580 			msi_irq_start;
3581 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3582 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3583 			 __func__, ce_id, msi_data, irq,
3584 			 &ce_sc->tasklets[ce_id]);
3585 
3586 		/* implies the ce is also initialized */
3587 		if (!ce_sc->tasklets[ce_id].inited)
3588 			continue;
3589 
3590 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3591 		ret = request_irq(irq, hif_ce_interrupt_handler,
3592 				  IRQF_SHARED,
3593 				  ce_name[ce_id],
3594 				  &ce_sc->tasklets[ce_id]);
3595 		if (ret)
3596 			goto free_irq;
3597 	}
3598 
3599 	return ret;
3600 
3601 free_irq:
3602 	/* the request_irq for the last ce_id failed so skip it. */
3603 	while (ce_id > 0 && ce_id < scn->ce_count) {
3604 		unsigned int msi_data;
3605 
3606 		ce_id--;
3607 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3608 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3609 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3610 	}
3611 
3612 free_wake_irq:
3613 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3614 	scn->wake_irq = 0;
3615 
3616 	return ret;
3617 }
3618 
3619 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3620 {
3621 	int i;
3622 
3623 	for (i = 0; i < hif_ext_group->numirq; i++)
3624 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3625 }
3626 
3627 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3628 {
3629 	int i;
3630 
3631 	for (i = 0; i < hif_ext_group->numirq; i++)
3632 		enable_irq(hif_ext_group->os_irq[i]);
3633 }
3634 
3635 
3636 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3637 			      struct hif_exec_context *hif_ext_group)
3638 {
3639 	int ret = 0;
3640 	int irq = 0;
3641 	int j;
3642 
3643 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3644 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3645 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3646 
3647 	for (j = 0; j < hif_ext_group->numirq; j++) {
3648 		irq = hif_ext_group->irq[j];
3649 
3650 		HIF_DBG("%s: request_irq = %d for grp %d",
3651 			  __func__, irq, hif_ext_group->grp_id);
3652 		ret = request_irq(irq,
3653 				  hif_ext_group_interrupt_handler,
3654 				  IRQF_SHARED, "wlan_EXT_GRP",
3655 				  hif_ext_group);
3656 		if (ret) {
3657 			HIF_ERROR("%s: request_irq failed ret = %d",
3658 				  __func__, ret);
3659 			return -EFAULT;
3660 		}
3661 		hif_ext_group->os_irq[j] = irq;
3662 	}
3663 	hif_ext_group->irq_requested = true;
3664 	return 0;
3665 }
3666 
3667 /**
3668  * hif_configure_irq() - configure interrupt
3669  *
3670  * This function configures interrupt(s)
3671  *
3672  * @sc: PCIe control struct
3673  * @hif_hdl: struct HIF_CE_state
3674  *
3675  * Return: 0 - for success
3676  */
3677 int hif_configure_irq(struct hif_softc *scn)
3678 {
3679 	int ret = 0;
3680 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3681 
3682 	HIF_TRACE("%s: E", __func__);
3683 	if (scn->polled_mode_on) {
3684 		scn->request_irq_done = false;
3685 		return 0;
3686 	}
3687 
3688 	hif_init_reschedule_tasklet_work(sc);
3689 
3690 	ret = hif_ce_msi_configure_irq(scn);
3691 	if (ret == 0) {
3692 		goto end;
3693 	}
3694 
3695 	if (ENABLE_MSI) {
3696 		ret = hif_configure_msi(sc);
3697 		if (ret == 0)
3698 			goto end;
3699 	}
3700 	/* MSI failed. Try legacy irq */
3701 	switch (scn->target_info.target_type) {
3702 	case TARGET_TYPE_IPQ4019:
3703 		ret = hif_ahb_configure_legacy_irq(sc);
3704 		break;
3705 	case TARGET_TYPE_QCA8074:
3706 		ret = hif_ahb_configure_irq(sc);
3707 		break;
3708 	default:
3709 		ret = hif_pci_configure_legacy_irq(sc);
3710 		break;
3711 	}
3712 	if (ret < 0) {
3713 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3714 			__func__, ret);
3715 		return ret;
3716 	}
3717 end:
3718 	scn->request_irq_done = true;
3719 	return 0;
3720 }
3721 
3722 /**
3723  * hif_target_sync() : ensure the target is ready
3724  * @scn: hif controll structure
3725  *
3726  * Informs fw that we plan to use legacy interupts so that
3727  * it can begin booting. Ensures that the fw finishes booting
3728  * before continuing. Should be called before trying to write
3729  * to the targets other registers for the first time.
3730  *
3731  * Return: none
3732  */
3733 static void hif_target_sync(struct hif_softc *scn)
3734 {
3735 	hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3736 				PCIE_INTR_ENABLE_ADDRESS),
3737 				PCIE_INTR_FIRMWARE_MASK);
3738 
3739 	hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3740 			PCIE_SOC_WAKE_ADDRESS,
3741 			PCIE_SOC_WAKE_V_MASK);
3742 	while (!hif_targ_is_awake(scn, scn->mem))
3743 		;
3744 
3745 	if (HAS_FW_INDICATOR) {
3746 		int wait_limit = 500;
3747 		int fw_ind = 0;
3748 
3749 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3750 		while (1) {
3751 			fw_ind = hif_read32_mb(scn->mem +
3752 					FW_INDICATOR_ADDRESS);
3753 			if (fw_ind & FW_IND_INITIALIZED)
3754 				break;
3755 			if (wait_limit-- < 0)
3756 				break;
3757 			hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3758 				PCIE_INTR_ENABLE_ADDRESS),
3759 				PCIE_INTR_FIRMWARE_MASK);
3760 
3761 			qdf_mdelay(10);
3762 		}
3763 		if (wait_limit < 0)
3764 			HIF_TRACE("%s: FW signal timed out",
3765 					__func__);
3766 		else
3767 			HIF_TRACE("%s: Got FW signal, retries = %x",
3768 					__func__, 500-wait_limit);
3769 	}
3770 	hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3771 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3772 }
3773 
3774 #ifdef CONFIG_PLD_PCIE_INIT
3775 static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3776 {
3777 	struct pld_soc_info info;
3778 
3779 	pld_get_soc_info(dev, &info);
3780 	sc->mem = info.v_addr;
3781 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3782 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3783 }
3784 #else
3785 static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3786 {}
3787 #endif
3788 
3789 /**
3790  * hif_enable_bus(): enable bus
3791  *
3792  * This function enables the bus
3793  *
3794  * @ol_sc: soft_sc struct
3795  * @dev: device pointer
3796  * @bdev: bus dev pointer
3797  * bid: bus id pointer
3798  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3799  * Return: QDF_STATUS
3800  */
3801 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3802 			  struct device *dev, void *bdev,
3803 			  const struct hif_bus_id *bid,
3804 			  enum hif_enable_type type)
3805 {
3806 	int ret = 0;
3807 	uint32_t hif_type, target_type;
3808 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3809 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3810 	uint16_t revision_id;
3811 	int probe_again = 0;
3812 	struct pci_dev *pdev = bdev;
3813 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3814 	struct hif_target_info *tgt_info;
3815 
3816 	if (!ol_sc) {
3817 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3818 		return QDF_STATUS_E_NOMEM;
3819 	}
3820 
3821 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3822 		  __func__, hif_get_conparam(ol_sc), id->device);
3823 
3824 	sc->pdev = pdev;
3825 	sc->dev = &pdev->dev;
3826 	sc->devid = id->device;
3827 	sc->cacheline_sz = dma_get_cache_alignment();
3828 	tgt_info = hif_get_target_info_handle(hif_hdl);
3829 	hif_pci_get_soc_info(sc, dev);
3830 again:
3831 	ret = hif_enable_pci(sc, pdev, id);
3832 	if (ret < 0) {
3833 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3834 		       __func__, ret);
3835 		goto err_enable_pci;
3836 	}
3837 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3838 
3839 	/* Temporary FIX: disable ASPM on peregrine.
3840 	 * Will be removed after the OTP is programmed
3841 	 */
3842 	hif_disable_power_gating(hif_hdl);
3843 
3844 	device_disable_async_suspend(&pdev->dev);
3845 	pci_read_config_word(pdev, 0x08, &revision_id);
3846 
3847 	ret = hif_get_device_type(id->device, revision_id,
3848 						&hif_type, &target_type);
3849 	if (ret < 0) {
3850 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3851 		goto err_tgtstate;
3852 	}
3853 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3854 		  __func__, hif_type, target_type);
3855 
3856 	hif_register_tbl_attach(ol_sc, hif_type);
3857 	hif_target_register_tbl_attach(ol_sc, target_type);
3858 
3859 	tgt_info->target_type = target_type;
3860 
3861 	if (ce_srng_based(ol_sc)) {
3862 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3863 	} else {
3864 		ret = hif_pci_probe_tgt_wakeup(sc);
3865 		if (ret < 0) {
3866 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3867 					__func__, ret);
3868 			if (ret == -EAGAIN)
3869 				probe_again++;
3870 			goto err_tgtstate;
3871 		}
3872 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3873 	}
3874 
3875 	if (!ol_sc->mem_pa) {
3876 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3877 		ret = -EIO;
3878 		goto err_tgtstate;
3879 	}
3880 
3881 	if (!ce_srng_based(ol_sc)) {
3882 		hif_target_sync(ol_sc);
3883 
3884 		if (ADRASTEA_BU)
3885 			hif_vote_link_up(hif_hdl);
3886 	}
3887 
3888 	return 0;
3889 
3890 err_tgtstate:
3891 	hif_disable_pci(sc);
3892 	sc->pci_enabled = false;
3893 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3894 	return QDF_STATUS_E_ABORTED;
3895 
3896 err_enable_pci:
3897 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3898 		int delay_time;
3899 
3900 		HIF_INFO("%s: pci reprobe", __func__);
3901 		/* 10, 40, 90, 100, 100, ... */
3902 		delay_time = max(100, 10 * (probe_again * probe_again));
3903 		qdf_mdelay(delay_time);
3904 		goto again;
3905 	}
3906 	return ret;
3907 }
3908 
3909 /**
3910  * hif_pci_irq_enable() - ce_irq_enable
3911  * @scn: hif_softc
3912  * @ce_id: ce_id
3913  *
3914  * Return: void
3915  */
3916 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3917 {
3918 	uint32_t tmp = 1 << ce_id;
3919 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3920 
3921 	qdf_spin_lock_irqsave(&sc->irq_lock);
3922 	scn->ce_irq_summary &= ~tmp;
3923 	if (scn->ce_irq_summary == 0) {
3924 		/* Enable Legacy PCI line interrupts */
3925 		if (LEGACY_INTERRUPTS(sc) &&
3926 			(scn->target_status != TARGET_STATUS_RESET) &&
3927 			(!qdf_atomic_read(&scn->link_suspended))) {
3928 
3929 			hif_write32_mb(scn->mem +
3930 				(SOC_CORE_BASE_ADDRESS |
3931 				PCIE_INTR_ENABLE_ADDRESS),
3932 				HOST_GROUP0_MASK);
3933 
3934 			hif_read32_mb(scn->mem +
3935 					(SOC_CORE_BASE_ADDRESS |
3936 					PCIE_INTR_ENABLE_ADDRESS));
3937 		}
3938 	}
3939 	if (scn->hif_init_done == true)
3940 		Q_TARGET_ACCESS_END(scn);
3941 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3942 
3943 	/* check for missed firmware crash */
3944 	hif_fw_interrupt_handler(0, scn);
3945 }
3946 
3947 /**
3948  * hif_pci_irq_disable() - ce_irq_disable
3949  * @scn: hif_softc
3950  * @ce_id: ce_id
3951  *
3952  * only applicable to legacy copy engine...
3953  *
3954  * Return: void
3955  */
3956 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3957 {
3958 	/* For Rome only need to wake up target */
3959 	/* target access is maintained untill interrupts are re-enabled */
3960 	Q_TARGET_ACCESS_BEGIN(scn);
3961 }
3962 
3963 #ifdef FEATURE_RUNTIME_PM
3964 
3965 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
3966 {
3967 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3968 
3969 	if (NULL == sc)
3970 		return;
3971 
3972 	sc->pm_stats.runtime_get++;
3973 	pm_runtime_get_noresume(sc->dev);
3974 }
3975 
3976 /**
3977  * hif_pm_runtime_get() - do a get opperation on the device
3978  *
3979  * A get opperation will prevent a runtime suspend untill a
3980  * corresponding put is done.  This api should be used when sending
3981  * data.
3982  *
3983  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3984  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3985  *
3986  * return: success if the bus is up and a get has been issued
3987  *   otherwise an error code.
3988  */
3989 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
3990 {
3991 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3992 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3993 	int ret;
3994 	int pm_state;
3995 
3996 	if (NULL == scn) {
3997 		HIF_ERROR("%s: Could not do runtime get, scn is null",
3998 				__func__);
3999 		return -EFAULT;
4000 	}
4001 
4002 	pm_state = qdf_atomic_read(&sc->pm_state);
4003 
4004 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
4005 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
4006 		sc->pm_stats.runtime_get++;
4007 		ret = __hif_pm_runtime_get(sc->dev);
4008 
4009 		/* Get can return 1 if the device is already active, just return
4010 		 * success in that case
4011 		 */
4012 		if (ret > 0)
4013 			ret = 0;
4014 
4015 		if (ret)
4016 			hif_pm_runtime_put(hif_ctx);
4017 
4018 		if (ret && ret != -EINPROGRESS) {
4019 			sc->pm_stats.runtime_get_err++;
4020 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
4021 				__func__, qdf_atomic_read(&sc->pm_state), ret);
4022 		}
4023 
4024 		return ret;
4025 	}
4026 
4027 	sc->pm_stats.request_resume++;
4028 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4029 	ret = hif_pm_request_resume(sc->dev);
4030 
4031 	return -EAGAIN;
4032 }
4033 
4034 /**
4035  * hif_pm_runtime_put() - do a put opperation on the device
4036  *
4037  * A put opperation will allow a runtime suspend after a corresponding
4038  * get was done.  This api should be used when sending data.
4039  *
4040  * This api will return a failure if runtime pm is stopped
4041  * This api will return failure if it would decrement the usage count below 0.
4042  *
4043  * return: QDF_STATUS_SUCCESS if the put is performed
4044  */
4045 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
4046 {
4047 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4048 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4049 	int pm_state, usage_count;
4050 	char *error = NULL;
4051 
4052 	if (NULL == scn) {
4053 		HIF_ERROR("%s: Could not do runtime put, scn is null",
4054 				__func__);
4055 		return -EFAULT;
4056 	}
4057 	usage_count = atomic_read(&sc->dev->power.usage_count);
4058 
4059 	if (usage_count == 1) {
4060 		pm_state = qdf_atomic_read(&sc->pm_state);
4061 
4062 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4063 			error = "Ignoring unexpected put when runtime pm is disabled";
4064 
4065 	} else if (usage_count == 0) {
4066 		error = "PUT Without a Get Operation";
4067 	}
4068 
4069 	if (error) {
4070 		hif_pci_runtime_pm_warn(sc, error);
4071 		return -EINVAL;
4072 	}
4073 
4074 	sc->pm_stats.runtime_put++;
4075 
4076 	hif_pm_runtime_mark_last_busy(sc->dev);
4077 	hif_pm_runtime_put_auto(sc->dev);
4078 
4079 	return 0;
4080 }
4081 
4082 
4083 /**
4084  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
4085  *                                      reason
4086  * @hif_sc: pci context
4087  * @lock: runtime_pm lock being acquired
4088  *
4089  * Return 0 if successful.
4090  */
4091 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4092 		*hif_sc, struct hif_pm_runtime_lock *lock)
4093 {
4094 	int ret = 0;
4095 
4096 	/*
4097 	 * We shouldn't be setting context->timeout to zero here when
4098 	 * context is active as we will have a case where Timeout API's
4099 	 * for the same context called back to back.
4100 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4101 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4102 	 * API to ensure the timeout version is no more active and
4103 	 * list entry of this context will be deleted during allow suspend.
4104 	 */
4105 	if (lock->active)
4106 		return 0;
4107 
4108 	ret = __hif_pm_runtime_get(hif_sc->dev);
4109 
4110 	/**
4111 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4112 	 * RPM_SUSPENDING. Any other negative value is an error.
4113 	 * We shouldn't be do runtime_put here as in later point allow
4114 	 * suspend gets called with the the context and there the usage count
4115 	 * is decremented, so suspend will be prevented.
4116 	 */
4117 
4118 	if (ret < 0 && ret != -EINPROGRESS) {
4119 		hif_sc->pm_stats.runtime_get_err++;
4120 		hif_pci_runtime_pm_warn(hif_sc,
4121 				"Prevent Suspend Runtime PM Error");
4122 	}
4123 
4124 	hif_sc->prevent_suspend_cnt++;
4125 
4126 	lock->active = true;
4127 
4128 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4129 
4130 	hif_sc->pm_stats.prevent_suspend++;
4131 
4132 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4133 		hif_pm_runtime_state_to_string(
4134 			qdf_atomic_read(&hif_sc->pm_state)),
4135 					ret);
4136 
4137 	return ret;
4138 }
4139 
4140 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4141 		struct hif_pm_runtime_lock *lock)
4142 {
4143 	int ret = 0;
4144 	int usage_count;
4145 
4146 	if (hif_sc->prevent_suspend_cnt == 0)
4147 		return ret;
4148 
4149 	if (!lock->active)
4150 		return ret;
4151 
4152 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4153 
4154 	/*
4155 	 * During Driver unload, platform driver increments the usage
4156 	 * count to prevent any runtime suspend getting called.
4157 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4158 	 * usage_count should be one. Ideally this shouldn't happen as
4159 	 * context->active should be active for allow suspend to happen
4160 	 * Handling this case here to prevent any failures.
4161 	 */
4162 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4163 				&& usage_count == 1) || usage_count == 0) {
4164 		hif_pci_runtime_pm_warn(hif_sc,
4165 				"Allow without a prevent suspend");
4166 		return -EINVAL;
4167 	}
4168 
4169 	list_del(&lock->list);
4170 
4171 	hif_sc->prevent_suspend_cnt--;
4172 
4173 	lock->active = false;
4174 	lock->timeout = 0;
4175 
4176 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4177 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4178 
4179 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4180 		hif_pm_runtime_state_to_string(
4181 			qdf_atomic_read(&hif_sc->pm_state)),
4182 					ret);
4183 
4184 	hif_sc->pm_stats.allow_suspend++;
4185 	return ret;
4186 }
4187 
4188 /**
4189  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4190  * @data: calback data that is the pci context
4191  *
4192  * if runtime locks are acquired with a timeout, this function releases
4193  * the locks when the last runtime lock expires.
4194  *
4195  * dummy implementation until lock acquisition is implemented.
4196  */
4197 static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
4198 {
4199 	struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
4200 	unsigned long timer_expires;
4201 	struct hif_pm_runtime_lock *context, *temp;
4202 
4203 	spin_lock_bh(&hif_sc->runtime_lock);
4204 
4205 	timer_expires = hif_sc->runtime_timer_expires;
4206 
4207 	/* Make sure we are not called too early, this should take care of
4208 	 * following case
4209 	 *
4210 	 * CPU0                         CPU1 (timeout function)
4211 	 * ----                         ----------------------
4212 	 * spin_lock_irq
4213 	 *                              timeout function called
4214 	 *
4215 	 * mod_timer()
4216 	 *
4217 	 * spin_unlock_irq
4218 	 *                              spin_lock_irq
4219 	 */
4220 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4221 		hif_sc->runtime_timer_expires = 0;
4222 		list_for_each_entry_safe(context, temp,
4223 				&hif_sc->prevent_suspend_list, list) {
4224 			if (context->timeout) {
4225 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4226 				hif_sc->pm_stats.allow_suspend_timeout++;
4227 			}
4228 		}
4229 	}
4230 
4231 	spin_unlock_bh(&hif_sc->runtime_lock);
4232 }
4233 
4234 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4235 		struct hif_pm_runtime_lock *data)
4236 {
4237 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4238 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4239 	struct hif_pm_runtime_lock *context = data;
4240 
4241 	if (!sc->hif_config.enable_runtime_pm)
4242 		return 0;
4243 
4244 	if (!context)
4245 		return -EINVAL;
4246 
4247 	if (in_irq())
4248 		WARN_ON(1);
4249 
4250 	spin_lock_bh(&hif_sc->runtime_lock);
4251 	context->timeout = 0;
4252 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4253 	spin_unlock_bh(&hif_sc->runtime_lock);
4254 
4255 	return 0;
4256 }
4257 
4258 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4259 				struct hif_pm_runtime_lock *data)
4260 {
4261 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4262 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4263 	struct hif_pm_runtime_lock *context = data;
4264 
4265 	if (!sc->hif_config.enable_runtime_pm)
4266 		return 0;
4267 
4268 	if (!context)
4269 		return -EINVAL;
4270 
4271 	if (in_irq())
4272 		WARN_ON(1);
4273 
4274 	spin_lock_bh(&hif_sc->runtime_lock);
4275 
4276 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4277 
4278 	/* The list can be empty as well in cases where
4279 	 * we have one context in the list and the allow
4280 	 * suspend came before the timer expires and we delete
4281 	 * context above from the list.
4282 	 * When list is empty prevent_suspend count will be zero.
4283 	 */
4284 	if (hif_sc->prevent_suspend_cnt == 0 &&
4285 			hif_sc->runtime_timer_expires > 0) {
4286 		del_timer(&hif_sc->runtime_timer);
4287 		hif_sc->runtime_timer_expires = 0;
4288 	}
4289 
4290 	spin_unlock_bh(&hif_sc->runtime_lock);
4291 
4292 	return 0;
4293 }
4294 
4295 /**
4296  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4297  * @ol_sc: HIF context
4298  * @lock: which lock is being acquired
4299  * @delay: Timeout in milliseconds
4300  *
4301  * Prevent runtime suspend with a timeout after which runtime suspend would be
4302  * allowed. This API uses a single timer to allow the suspend and timer is
4303  * modified if the timeout is changed before timer fires.
4304  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4305  * of starting the timer.
4306  *
4307  * It is wise to try not to use this API and correct the design if possible.
4308  *
4309  * Return: 0 on success and negative error code on failure
4310  */
4311 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4312 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4313 {
4314 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4315 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4316 
4317 	int ret = 0;
4318 	unsigned long expires;
4319 	struct hif_pm_runtime_lock *context = lock;
4320 
4321 	if (hif_is_load_or_unload_in_progress(sc)) {
4322 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4323 				__func__);
4324 		return -EINVAL;
4325 	}
4326 
4327 	if (hif_is_recovery_in_progress(sc)) {
4328 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4329 		return -EINVAL;
4330 	}
4331 
4332 	if (!sc->hif_config.enable_runtime_pm)
4333 		return 0;
4334 
4335 	if (!context)
4336 		return -EINVAL;
4337 
4338 	if (in_irq())
4339 		WARN_ON(1);
4340 
4341 	/*
4342 	 * Don't use internal timer if the timeout is less than auto suspend
4343 	 * delay.
4344 	 */
4345 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4346 		hif_pm_request_resume(hif_sc->dev);
4347 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4348 		return ret;
4349 	}
4350 
4351 	expires = jiffies + msecs_to_jiffies(delay);
4352 	expires += !expires;
4353 
4354 	spin_lock_bh(&hif_sc->runtime_lock);
4355 
4356 	context->timeout = delay;
4357 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4358 	hif_sc->pm_stats.prevent_suspend_timeout++;
4359 
4360 	/* Modify the timer only if new timeout is after already configured
4361 	 * timeout
4362 	 */
4363 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4364 		mod_timer(&hif_sc->runtime_timer, expires);
4365 		hif_sc->runtime_timer_expires = expires;
4366 	}
4367 
4368 	spin_unlock_bh(&hif_sc->runtime_lock);
4369 
4370 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4371 		hif_pm_runtime_state_to_string(
4372 			qdf_atomic_read(&hif_sc->pm_state)),
4373 					delay, ret);
4374 
4375 	return ret;
4376 }
4377 
4378 /**
4379  * hif_runtime_lock_init() - API to initialize Runtime PM context
4380  * @name: Context name
4381  *
4382  * This API initializes the Runtime PM context of the caller and
4383  * return the pointer.
4384  *
4385  * Return: None
4386  */
4387 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4388 {
4389 	struct hif_pm_runtime_lock *context;
4390 
4391 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4392 
4393 	context = qdf_mem_malloc(sizeof(*context));
4394 	if (!context) {
4395 		HIF_ERROR("%s: No memory for Runtime PM wakelock context",
4396 			  __func__);
4397 		return -ENOMEM;
4398 	}
4399 
4400 	context->name = name ? name : "Default";
4401 	lock->lock = context;
4402 
4403 	return 0;
4404 }
4405 
4406 /**
4407  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4408  * @data: Runtime PM context
4409  *
4410  * Return: void
4411  */
4412 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4413 			     struct hif_pm_runtime_lock *data)
4414 {
4415 	struct hif_pm_runtime_lock *context = data;
4416 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4417 
4418 	if (!context) {
4419 		HIF_ERROR("Runtime PM wakelock context is NULL");
4420 		return;
4421 	}
4422 
4423 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4424 
4425 	/*
4426 	 * Ensure to delete the context list entry and reduce the usage count
4427 	 * before freeing the context if context is active.
4428 	 */
4429 	if (sc) {
4430 		spin_lock_bh(&sc->runtime_lock);
4431 		__hif_pm_runtime_allow_suspend(sc, context);
4432 		spin_unlock_bh(&sc->runtime_lock);
4433 	}
4434 
4435 	qdf_mem_free(context);
4436 }
4437 #endif /* FEATURE_RUNTIME_PM */
4438 
4439 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4440 {
4441 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4442 
4443 	/* legacy case only has one irq */
4444 	return pci_scn->irq;
4445 }
4446 
4447 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4448 {
4449 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4450 	struct hif_target_info *tgt_info;
4451 
4452 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4453 
4454 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4455 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4456 		/*
4457 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4458 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4459 		 * well initialized/defined.
4460 		 */
4461 		return 0;
4462 	}
4463 
4464 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4465 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4466 		return 0;
4467 	}
4468 
4469 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%lx (max 0x%zx)\n",
4470 		 offset, offset + sizeof(unsigned int), sc->mem_len);
4471 
4472 	return -EINVAL;
4473 }
4474 
4475 /**
4476  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4477  * @scn: hif context
4478  *
4479  * Return: true if soc needs driver bmi otherwise false
4480  */
4481 bool hif_pci_needs_bmi(struct hif_softc *scn)
4482 {
4483 	return !ce_srng_based(scn);
4484 }
4485