xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 3149adf58a329e17232a4c0e58d460d025edd55a)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5  *
6  *
7  * Permission to use, copy, modify, and/or distribute this software for
8  * any purpose with or without fee is hereby granted, provided that the
9  * above copyright notice and this permission notice appear in all
10  * copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19  * PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 /*
23  * This file was originally distributed by Qualcomm Atheros, Inc.
24  * under proprietary terms before Copyright ownership was assigned
25  * to the Linux Foundation.
26  */
27 
28 #include <linux/pci.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31 #include <linux/if_arp.h>
32 #ifdef CONFIG_PCI_MSM
33 #include <linux/msm_pcie.h>
34 #endif
35 #include "hif_io32.h"
36 #include "if_pci.h"
37 #include "hif.h"
38 #include "target_type.h"
39 #include "hif_main.h"
40 #include "ce_main.h"
41 #include "ce_api.h"
42 #include "ce_internal.h"
43 #include "ce_reg.h"
44 #include "ce_bmi.h"
45 #include "regtable.h"
46 #include "hif_hw_version.h"
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include "qdf_status.h"
50 #include "qdf_atomic.h"
51 #include "pld_common.h"
52 #include "mp_dev.h"
53 #include "hif_debug.h"
54 
55 #include "if_pci_internal.h"
56 #include "ce_tasklet.h"
57 #include "targaddrs.h"
58 #include "hif_exec.h"
59 
60 #include "pci_api.h"
61 #include "ahb_api.h"
62 
63 /* Maximum ms timeout for host to wake up target */
64 #define PCIE_WAKE_TIMEOUT 1000
65 #define RAMDUMP_EVENT_TIMEOUT 2500
66 
67 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
68  * PCIe data bus error
69  * As workaround for this issue - changing the reset sequence to
70  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
71  */
72 #define CPU_WARM_RESET_WAR
73 
74 #ifdef CONFIG_WIN
75 extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
76 #endif
77 
78 /*
79  * Top-level interrupt handler for all PCI interrupts from a Target.
80  * When a block of MSI interrupts is allocated, this top-level handler
81  * is not used; instead, we directly call the correct sub-handler.
82  */
83 struct ce_irq_reg_table {
84 	uint32_t irq_enable;
85 	uint32_t irq_status;
86 };
87 
88 #ifndef QCA_WIFI_3_0_ADRASTEA
89 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
90 {
91 }
92 #else
93 void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
94 {
95 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
96 	unsigned int target_enable0, target_enable1;
97 	unsigned int target_cause0, target_cause1;
98 
99 	target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0);
100 	target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1);
101 	target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0);
102 	target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1);
103 
104 	if ((target_enable0 & target_cause0) ||
105 	    (target_enable1 & target_cause1)) {
106 		hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0);
107 		hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0);
108 
109 		if (scn->notice_send)
110 			pld_intr_notify_q6(sc->dev);
111 	}
112 }
113 #endif
114 
115 
116 /**
117  * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
118  * @scn: scn
119  *
120  * Return: N/A
121  */
122 static void pci_dispatch_interrupt(struct hif_softc *scn)
123 {
124 	uint32_t intr_summary;
125 	int id;
126 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
127 
128 	if (scn->hif_init_done != true)
129 		return;
130 
131 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
132 		return;
133 
134 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
135 
136 	if (intr_summary == 0) {
137 		if ((scn->target_status != TARGET_STATUS_RESET) &&
138 			(!qdf_atomic_read(&scn->link_suspended))) {
139 
140 			hif_write32_mb(scn->mem +
141 				(SOC_CORE_BASE_ADDRESS |
142 				PCIE_INTR_ENABLE_ADDRESS),
143 				HOST_GROUP0_MASK);
144 
145 			hif_read32_mb(scn->mem +
146 					(SOC_CORE_BASE_ADDRESS |
147 					PCIE_INTR_ENABLE_ADDRESS));
148 		}
149 		Q_TARGET_ACCESS_END(scn);
150 		return;
151 	}
152 	Q_TARGET_ACCESS_END(scn);
153 
154 	scn->ce_irq_summary = intr_summary;
155 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
156 		if (intr_summary & (1 << id)) {
157 			intr_summary &= ~(1 << id);
158 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
159 		}
160 	}
161 }
162 
163 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
164 {
165 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
166 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
167 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
168 
169 	volatile int tmp;
170 	uint16_t val = 0;
171 	uint32_t bar0 = 0;
172 	uint32_t fw_indicator_address, fw_indicator;
173 	bool ssr_irq = false;
174 	unsigned int host_cause, host_enable;
175 
176 	if (LEGACY_INTERRUPTS(sc)) {
177 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
178 			return IRQ_HANDLED;
179 
180 		if (ADRASTEA_BU) {
181 			host_enable = hif_read32_mb(sc->mem +
182 						    PCIE_INTR_ENABLE_ADDRESS);
183 			host_cause = hif_read32_mb(sc->mem +
184 						   PCIE_INTR_CAUSE_ADDRESS);
185 			if (!(host_enable & host_cause)) {
186 				hif_pci_route_adrastea_interrupt(sc);
187 				return IRQ_HANDLED;
188 			}
189 		}
190 
191 		/* Clear Legacy PCI line interrupts
192 		 * IMPORTANT: INTR_CLR regiser has to be set
193 		 * after INTR_ENABLE is set to 0,
194 		 * otherwise interrupt can not be really cleared
195 		 */
196 		hif_write32_mb(sc->mem +
197 			      (SOC_CORE_BASE_ADDRESS |
198 			       PCIE_INTR_ENABLE_ADDRESS), 0);
199 
200 		hif_write32_mb(sc->mem +
201 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
202 			       ADRASTEA_BU ?
203 			       (host_enable & host_cause) :
204 			      HOST_GROUP0_MASK);
205 
206 		if (ADRASTEA_BU)
207 			hif_write32_mb(sc->mem + 0x2f100c, (host_cause >> 1));
208 
209 		/* IMPORTANT: this extra read transaction is required to
210 		 * flush the posted write buffer
211 		 */
212 		if (!ADRASTEA_BU) {
213 		tmp =
214 			hif_read32_mb(sc->mem +
215 				     (SOC_CORE_BASE_ADDRESS |
216 				      PCIE_INTR_ENABLE_ADDRESS));
217 
218 		if (tmp == 0xdeadbeef) {
219 			HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
220 			       __func__);
221 
222 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
223 			HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
224 			       __func__, val);
225 
226 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
227 			HIF_ERROR("%s: PCI Device ID = 0x%04x",
228 			       __func__, val);
229 
230 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
231 			HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
232 			       val);
233 
234 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
235 			HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
236 			       val);
237 
238 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
239 					      &bar0);
240 			HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
241 			       bar0);
242 
243 			HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
244 				  __func__,
245 				  hif_read32_mb(sc->mem +
246 						PCIE_LOCAL_BASE_ADDRESS
247 						+ RTC_STATE_ADDRESS));
248 			HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
249 				  __func__,
250 				  hif_read32_mb(sc->mem +
251 						PCIE_LOCAL_BASE_ADDRESS
252 						+ PCIE_SOC_WAKE_ADDRESS));
253 			HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
254 				  __func__,
255 				  hif_read32_mb(sc->mem + 0x80008),
256 				  hif_read32_mb(sc->mem + 0x8000c));
257 			HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
258 				  __func__,
259 				  hif_read32_mb(sc->mem + 0x80010),
260 				  hif_read32_mb(sc->mem + 0x80014));
261 			HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
262 				  __func__,
263 				  hif_read32_mb(sc->mem + 0x80018),
264 				  hif_read32_mb(sc->mem + 0x8001c));
265 			QDF_BUG(0);
266 		}
267 
268 		PCI_CLR_CAUSE0_REGISTER(sc);
269 		}
270 
271 		if (HAS_FW_INDICATOR) {
272 			fw_indicator_address = hif_state->fw_indicator_address;
273 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
274 			if ((fw_indicator != ~0) &&
275 			   (fw_indicator & FW_IND_EVENT_PENDING))
276 				ssr_irq = true;
277 		}
278 
279 		if (Q_TARGET_ACCESS_END(scn) < 0)
280 			return IRQ_HANDLED;
281 	}
282 	/* TBDXXX: Add support for WMAC */
283 
284 	if (ssr_irq) {
285 		sc->irq_event = irq;
286 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
287 
288 		qdf_atomic_inc(&scn->active_tasklet_cnt);
289 		tasklet_schedule(&sc->intr_tq);
290 	} else {
291 		pci_dispatch_interrupt(scn);
292 	}
293 
294 	return IRQ_HANDLED;
295 }
296 
297 static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
298 {
299 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
300 
301 	(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
302 
303 	return IRQ_HANDLED;
304 }
305 
306 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
307 {
308 	return 1;               /* FIX THIS */
309 }
310 
311 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
312 {
313 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
314 	int i = 0;
315 
316 	if (!irq || !size) {
317 		return -EINVAL;
318 	}
319 
320 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
321 		irq[0] = sc->irq;
322 		return 1;
323 	}
324 
325 	if (sc->num_msi_intrs > size) {
326 		qdf_print("Not enough space in irq buffer to return irqs\n");
327 		return -EINVAL;
328 	}
329 
330 	for (i = 0; i < sc->num_msi_intrs; i++) {
331 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
332 	}
333 
334 	return sc->num_msi_intrs;
335 }
336 
337 
338 /**
339  * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
340  * @scn: hif_softc
341  *
342  * Return: void
343  */
344 #if CONFIG_ATH_PCIE_MAX_PERF == 0
345 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
346 {
347 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
348 	A_target_id_t pci_addr = scn->mem;
349 
350 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
351 	/*
352 	 * If the deferred sleep timer is running cancel it
353 	 * and put the soc into sleep.
354 	 */
355 	if (hif_state->fake_sleep == true) {
356 		qdf_timer_stop(&hif_state->sleep_timer);
357 		if (hif_state->verified_awake == false) {
358 			hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
359 				      PCIE_SOC_WAKE_ADDRESS,
360 				      PCIE_SOC_WAKE_RESET);
361 		}
362 		hif_state->fake_sleep = false;
363 	}
364 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
365 }
366 #else
367 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
368 {
369 }
370 #endif
371 
372 #define A_PCIE_LOCAL_REG_READ(mem, addr) \
373 	hif_read32_mb((char *)(mem) + \
374 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
375 
376 #define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \
377 	hif_write32_mb(((char *)(mem) + \
378 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
379 
380 #ifdef QCA_WIFI_3_0
381 /**
382  * hif_targ_is_awake() - check to see if the target is awake
383  * @hif_ctx: hif context
384  *
385  * emulation never goes to sleep
386  *
387  * Return: true if target is awake
388  */
389 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
390 {
391 	return true;
392 }
393 #else
394 /**
395  * hif_targ_is_awake() - check to see if the target is awake
396  * @hif_ctx: hif context
397  *
398  * Return: true if the targets clocks are on
399  */
400 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
401 {
402 	uint32_t val;
403 
404 	if (scn->recovery)
405 		return false;
406 	val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS
407 		+ RTC_STATE_ADDRESS);
408 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
409 }
410 #endif
411 
412 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
413 static void hif_pci_device_reset(struct hif_pci_softc *sc)
414 {
415 	void __iomem *mem = sc->mem;
416 	int i;
417 	uint32_t val;
418 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
419 
420 	if (!scn->hostdef)
421 		return;
422 
423 	/* NB: Don't check resetok here.  This form of reset
424 	 * is integral to correct operation.
425 	 */
426 
427 	if (!SOC_GLOBAL_RESET_ADDRESS)
428 		return;
429 
430 	if (!mem)
431 		return;
432 
433 	HIF_ERROR("%s: Reset Device", __func__);
434 
435 	/*
436 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
437 	 * writing WAKE_V, the Target may scribble over Host memory!
438 	 */
439 	A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
440 			       PCIE_SOC_WAKE_V_MASK);
441 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
442 		if (hif_targ_is_awake(scn, mem))
443 			break;
444 
445 		qdf_mdelay(1);
446 	}
447 
448 	/* Put Target, including PCIe, into RESET. */
449 	val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS);
450 	val |= 1;
451 	A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
452 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
453 		if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
454 		    RTC_STATE_COLD_RESET_MASK)
455 			break;
456 
457 		qdf_mdelay(1);
458 	}
459 
460 	/* Pull Target, including PCIe, out of RESET. */
461 	val &= ~1;
462 	A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
463 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
464 		if (!
465 		    (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
466 		     RTC_STATE_COLD_RESET_MASK))
467 			break;
468 
469 		qdf_mdelay(1);
470 	}
471 
472 	A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
473 }
474 
475 /* CPU warm reset function
476  * Steps:
477  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
478  * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU intializes FW
479  *    correctly on WARM reset
480  * 3. Clear TARGET CPU LF timer interrupt
481  * 4. Reset all CEs to clear any pending CE tarnsactions
482  * 5. Warm reset CPU
483  */
484 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
485 {
486 	void __iomem *mem = sc->mem;
487 	int i;
488 	uint32_t val;
489 	uint32_t fw_indicator;
490 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
491 
492 	/* NB: Don't check resetok here.  This form of reset is
493 	 * integral to correct operation.
494 	 */
495 
496 	if (!mem)
497 		return;
498 
499 	HIF_INFO_MED("%s: Target Warm Reset", __func__);
500 
501 	/*
502 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
503 	 * writing WAKE_V, the Target may scribble over Host memory!
504 	 */
505 	A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
506 			       PCIE_SOC_WAKE_V_MASK);
507 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
508 		if (hif_targ_is_awake(scn, mem))
509 			break;
510 		qdf_mdelay(1);
511 	}
512 
513 	/*
514 	 * Disable Pending interrupts
515 	 */
516 	val =
517 		hif_read32_mb(mem +
518 			     (SOC_CORE_BASE_ADDRESS |
519 			      PCIE_INTR_CAUSE_ADDRESS));
520 	HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
521 		    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
522 	/* Target CPU Intr Cause */
523 	val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
524 	HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
525 
526 	val =
527 		hif_read32_mb(mem +
528 			     (SOC_CORE_BASE_ADDRESS |
529 			      PCIE_INTR_ENABLE_ADDRESS));
530 	hif_write32_mb((mem +
531 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
532 	hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
533 		      HOST_GROUP0_MASK);
534 
535 	qdf_mdelay(100);
536 
537 	/* Clear FW_INDICATOR_ADDRESS */
538 	if (HAS_FW_INDICATOR) {
539 		fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
540 		hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0);
541 	}
542 
543 	/* Clear Target LF Timer interrupts */
544 	val =
545 		hif_read32_mb(mem +
546 			     (RTC_SOC_BASE_ADDRESS +
547 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
548 	HIF_INFO_MED("%s: addr 0x%x :  0x%x", __func__,
549 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
550 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
551 	hif_write32_mb(mem +
552 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
553 		      val);
554 
555 	/* Reset CE */
556 	val =
557 		hif_read32_mb(mem +
558 			     (RTC_SOC_BASE_ADDRESS |
559 			      SOC_RESET_CONTROL_ADDRESS));
560 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
561 	hif_write32_mb((mem +
562 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
563 		      val);
564 	val =
565 		hif_read32_mb(mem +
566 			     (RTC_SOC_BASE_ADDRESS |
567 			      SOC_RESET_CONTROL_ADDRESS));
568 	qdf_mdelay(10);
569 
570 	/* CE unreset */
571 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
572 	hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
573 		      val);
574 	val =
575 		hif_read32_mb(mem +
576 			     (RTC_SOC_BASE_ADDRESS |
577 			      SOC_RESET_CONTROL_ADDRESS));
578 	qdf_mdelay(10);
579 
580 	/* Read Target CPU Intr Cause */
581 	val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
582 	HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
583 		    __func__, val);
584 
585 	/* CPU warm RESET */
586 	val =
587 		hif_read32_mb(mem +
588 			     (RTC_SOC_BASE_ADDRESS |
589 			      SOC_RESET_CONTROL_ADDRESS));
590 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
591 	hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
592 		      val);
593 	val =
594 		hif_read32_mb(mem +
595 			     (RTC_SOC_BASE_ADDRESS |
596 			      SOC_RESET_CONTROL_ADDRESS));
597 	HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
598 		    __func__, val);
599 
600 	qdf_mdelay(100);
601 	HIF_INFO_MED("%s: Target Warm reset complete", __func__);
602 
603 }
604 
605 #ifndef QCA_WIFI_3_0
606 /* only applicable to legacy ce */
607 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
608 {
609 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
610 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
611 	void __iomem *mem = sc->mem;
612 	uint32_t val;
613 
614 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
615 		return ATH_ISR_NOSCHED;
616 	val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
617 	if (Q_TARGET_ACCESS_END(scn) < 0)
618 		return ATH_ISR_SCHED;
619 
620 	HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
621 
622 	if (val & FW_IND_HELPER)
623 		return 0;
624 
625 	return 1;
626 }
627 #endif
628 
629 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
630 {
631 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
632 	uint16_t device_id = 0;
633 	uint32_t val;
634 	uint16_t timeout_count = 0;
635 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
636 
637 	/* Check device ID from PCIe configuration space for link status */
638 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
639 	if (device_id != sc->devid) {
640 		HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
641 			  __func__, device_id, sc->devid);
642 		return -EACCES;
643 	}
644 
645 	/* Check PCIe local register for bar/memory access */
646 	val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
647 			   RTC_STATE_ADDRESS);
648 	HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
649 
650 	/* Try to wake up taget if it sleeps */
651 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
652 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
653 	HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
654 		hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
655 		PCIE_SOC_WAKE_ADDRESS));
656 
657 	/* Check if taget can be woken up */
658 	while (!hif_targ_is_awake(scn, sc->mem)) {
659 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
660 			HIF_ERROR("%s: wake up timeout, %08x, %08x",
661 				__func__,
662 				hif_read32_mb(sc->mem +
663 					     PCIE_LOCAL_BASE_ADDRESS +
664 					     RTC_STATE_ADDRESS),
665 				hif_read32_mb(sc->mem +
666 					     PCIE_LOCAL_BASE_ADDRESS +
667 					PCIE_SOC_WAKE_ADDRESS));
668 			return -EACCES;
669 		}
670 
671 		hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
672 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
673 
674 		qdf_mdelay(100);
675 		timeout_count += 100;
676 	}
677 
678 	/* Check Power register for SoC internal bus issues */
679 	val =
680 		hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS +
681 			     SOC_POWER_REG_OFFSET);
682 	HIF_INFO_MED("%s: Power register is %08x", __func__, val);
683 
684 	return 0;
685 }
686 
687 /**
688  * __hif_pci_dump_registers(): dump other PCI debug registers
689  * @scn: struct hif_softc
690  *
691  * This function dumps pci debug registers.  The parrent function
692  * dumps the copy engine registers before calling this function.
693  *
694  * Return: void
695  */
696 static void __hif_pci_dump_registers(struct hif_softc *scn)
697 {
698 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
699 	void __iomem *mem = sc->mem;
700 	uint32_t val, i, j;
701 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
702 	uint32_t ce_base;
703 
704 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
705 		return;
706 
707 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
708 	val =
709 		hif_read32_mb(mem + GPIO_BASE_ADDRESS +
710 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
711 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
712 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
713 	hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET,
714 		      val);
715 
716 	/* DEBUG_CONTROL_ENABLE = 0x1 */
717 	val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
718 			   WLAN_DEBUG_CONTROL_OFFSET);
719 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
720 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
721 	hif_write32_mb(mem + GPIO_BASE_ADDRESS +
722 		      WLAN_DEBUG_CONTROL_OFFSET, val);
723 
724 	HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
725 	       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
726 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
727 	       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
728 			    WLAN_DEBUG_CONTROL_OFFSET));
729 
730 	HIF_INFO_MED("%s: Debug CE", __func__);
731 	/* Loop CE debug output */
732 	/* AMBA_DEBUG_BUS_SEL = 0xc */
733 	val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
734 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
735 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
736 	hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
737 
738 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
739 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
740 		val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
741 				   CE_WRAPPER_DEBUG_OFFSET);
742 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
743 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
744 		hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
745 			      CE_WRAPPER_DEBUG_OFFSET, val);
746 
747 		HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
748 			    __func__, wrapper_idx[i],
749 			    hif_read32_mb(mem + GPIO_BASE_ADDRESS +
750 				AMBA_DEBUG_BUS_OFFSET),
751 			    hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
752 				CE_WRAPPER_DEBUG_OFFSET));
753 
754 		if (wrapper_idx[i] <= 7) {
755 			for (j = 0; j <= 5; j++) {
756 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
757 				/* For (j=0~5) write CE_DEBUG_SEL = j */
758 				val =
759 					hif_read32_mb(mem + ce_base +
760 						     CE_DEBUG_OFFSET);
761 				val &= ~CE_DEBUG_SEL_MASK;
762 				val |= CE_DEBUG_SEL_SET(j);
763 				hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET,
764 					      val);
765 
766 				/* read (@gpio_athr_wlan_reg)
767 				 * WLAN_DEBUG_OUT_DATA
768 				 */
769 				val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
770 						   WLAN_DEBUG_OUT_OFFSET);
771 				val = WLAN_DEBUG_OUT_DATA_GET(val);
772 
773 				HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
774 					    __func__, j,
775 					    hif_read32_mb(mem + ce_base +
776 						    CE_DEBUG_OFFSET), val);
777 			}
778 		} else {
779 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
780 			val =
781 				hif_read32_mb(mem + GPIO_BASE_ADDRESS +
782 					     WLAN_DEBUG_OUT_OFFSET);
783 			val = WLAN_DEBUG_OUT_DATA_GET(val);
784 
785 			HIF_INFO_MED("%s: out: %x", __func__, val);
786 		}
787 	}
788 
789 	HIF_INFO_MED("%s: Debug PCIe:", __func__);
790 	/* Loop PCIe debug output */
791 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
792 	val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
793 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
794 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
795 	hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
796 
797 	for (i = 0; i <= 8; i++) {
798 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
799 		val =
800 			hif_read32_mb(mem + GPIO_BASE_ADDRESS +
801 				     AMBA_DEBUG_BUS_OFFSET);
802 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
803 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
804 		hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
805 			      val);
806 
807 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
808 		val =
809 			hif_read32_mb(mem + GPIO_BASE_ADDRESS +
810 				     WLAN_DEBUG_OUT_OFFSET);
811 		val = WLAN_DEBUG_OUT_DATA_GET(val);
812 
813 		HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
814 		       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
815 				    WLAN_DEBUG_OUT_OFFSET), val,
816 		       hif_read32_mb(mem + GPIO_BASE_ADDRESS +
817 				    WLAN_DEBUG_OUT_OFFSET));
818 	}
819 
820 	Q_TARGET_ACCESS_END(scn);
821 }
822 
823 /**
824  * hif_dump_registers(): dump bus debug registers
825  * @scn: struct hif_opaque_softc
826  *
827  * This function dumps hif bus debug registers
828  *
829  * Return: 0 for success or error code
830  */
831 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
832 {
833 	int status;
834 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
835 
836 	status = hif_dump_ce_registers(scn);
837 
838 	if (status)
839 		HIF_ERROR("%s: Dump CE Registers Failed", __func__);
840 
841 	/* dump non copy engine pci registers */
842 	__hif_pci_dump_registers(scn);
843 
844 	return 0;
845 }
846 
847 /*
848  * Handler for a per-engine interrupt on a PARTICULAR CE.
849  * This is used in cases where each CE has a private
850  * MSI interrupt.
851  */
852 static irqreturn_t ce_per_engine_handler(int irq, void *arg)
853 {
854 	int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
855 
856 	/*
857 	 * NOTE: We are able to derive CE_id from irq because we
858 	 * use a one-to-one mapping for CE's 0..5.
859 	 * CE's 6 & 7 do not use interrupts at all.
860 	 *
861 	 * This mapping must be kept in sync with the mapping
862 	 * used by firmware.
863 	 */
864 
865 	ce_per_engine_service(arg, CE_id);
866 
867 	return IRQ_HANDLED;
868 }
869 
870 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
871 
872 /* worker thread to schedule wlan_tasklet in SLUB debug build */
873 static void reschedule_tasklet_work_handler(void *arg)
874 {
875 	struct hif_pci_softc *sc = arg;
876 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
877 
878 	if (!scn) {
879 		HIF_ERROR("%s: hif_softc is NULL\n", __func__);
880 		return;
881 	}
882 
883 	if (scn->hif_init_done == false) {
884 		HIF_ERROR("%s: wlan driver is unloaded", __func__);
885 		return;
886 	}
887 
888 	tasklet_schedule(&sc->intr_tq);
889 }
890 
891 /**
892  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
893  * work
894  * @sc: HIF PCI Context
895  *
896  * Return: void
897  */
898 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
899 {
900 	qdf_create_work(0, &sc->reschedule_tasklet_work,
901 				reschedule_tasklet_work_handler, NULL);
902 }
903 #else
904 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
905 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
906 
907 void wlan_tasklet(unsigned long data)
908 {
909 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
910 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
911 
912 	if (scn->hif_init_done == false)
913 		goto end;
914 
915 	if (qdf_atomic_read(&scn->link_suspended))
916 		goto end;
917 
918 	if (!ADRASTEA_BU) {
919 		(irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
920 		if (scn->target_status == TARGET_STATUS_RESET)
921 			goto end;
922 	}
923 
924 end:
925 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
926 	qdf_atomic_dec(&scn->active_tasklet_cnt);
927 }
928 
929 #ifdef FEATURE_RUNTIME_PM
930 static const char *hif_pm_runtime_state_to_string(uint32_t state)
931 {
932 	switch (state) {
933 	case HIF_PM_RUNTIME_STATE_NONE:
934 		return "INIT_STATE";
935 	case HIF_PM_RUNTIME_STATE_ON:
936 		return "ON";
937 	case HIF_PM_RUNTIME_STATE_INPROGRESS:
938 		return "INPROGRESS";
939 	case HIF_PM_RUNTIME_STATE_SUSPENDED:
940 		return "SUSPENDED";
941 	default:
942 		return "INVALID STATE";
943 	}
944 }
945 
946 #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
947 	seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
948 /**
949  * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
950  * @sc: hif_pci_softc context
951  * @msg: log message
952  *
953  * log runtime pm stats when something seems off.
954  *
955  * Return: void
956  */
957 static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
958 {
959 	struct hif_pm_runtime_lock *ctx;
960 
961 	HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
962 			msg, atomic_read(&sc->dev->power.usage_count),
963 			hif_pm_runtime_state_to_string(
964 					atomic_read(&sc->pm_state)),
965 			sc->prevent_suspend_cnt);
966 
967 	HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
968 			sc->dev->power.runtime_status,
969 			sc->dev->power.runtime_error,
970 			sc->dev->power.disable_depth,
971 			sc->dev->power.autosuspend_delay);
972 
973 	HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
974 			sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
975 			sc->pm_stats.request_resume);
976 
977 	HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
978 			sc->pm_stats.allow_suspend,
979 			sc->pm_stats.prevent_suspend);
980 
981 	HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
982 			sc->pm_stats.prevent_suspend_timeout,
983 			sc->pm_stats.allow_suspend_timeout);
984 
985 	HIF_ERROR("Suspended: %u, resumed: %u count",
986 			sc->pm_stats.suspended,
987 			sc->pm_stats.resumed);
988 
989 	HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
990 			sc->pm_stats.suspend_err,
991 			sc->pm_stats.runtime_get_err);
992 
993 	HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
994 
995 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
996 		HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
997 	}
998 
999 	WARN_ON(1);
1000 }
1001 
1002 /**
1003  * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
1004  * @s: file to print to
1005  * @data: unused
1006  *
1007  * debugging tool added to the debug fs for displaying runtimepm stats
1008  *
1009  * Return: 0
1010  */
1011 static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
1012 {
1013 	struct hif_pci_softc *sc = s->private;
1014 	static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
1015 		"SUSPENDED"};
1016 	unsigned int msecs_age;
1017 	int pm_state = atomic_read(&sc->pm_state);
1018 	unsigned long timer_expires;
1019 	struct hif_pm_runtime_lock *ctx;
1020 
1021 	seq_printf(s, "%30s: %s\n", "Runtime PM state",
1022 			autopm_state[pm_state]);
1023 	seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
1024 			sc->pm_stats.last_resume_caller);
1025 
1026 	if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
1027 		msecs_age = jiffies_to_msecs(
1028 				jiffies - sc->pm_stats.suspend_jiffies);
1029 		seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
1030 				msecs_age / 1000, msecs_age % 1000);
1031 	}
1032 
1033 	seq_printf(s, "%30s: %d\n", "PM Usage count",
1034 			atomic_read(&sc->dev->power.usage_count));
1035 
1036 	seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
1037 			sc->prevent_suspend_cnt);
1038 
1039 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
1040 	HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
1041 	HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
1042 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
1043 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
1044 	HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
1045 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
1046 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
1047 	HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
1048 	HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
1049 	HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
1050 
1051 	timer_expires = sc->runtime_timer_expires;
1052 	if (timer_expires > 0) {
1053 		msecs_age = jiffies_to_msecs(timer_expires - jiffies);
1054 		seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
1055 				msecs_age / 1000, msecs_age % 1000);
1056 	}
1057 
1058 	spin_lock_bh(&sc->runtime_lock);
1059 	if (list_empty(&sc->prevent_suspend_list)) {
1060 		spin_unlock_bh(&sc->runtime_lock);
1061 		return 0;
1062 	}
1063 
1064 	seq_printf(s, "%30s: ", "Active Wakeup_Sources");
1065 	list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
1066 		seq_printf(s, "%s", ctx->name);
1067 		if (ctx->timeout)
1068 			seq_printf(s, "(%d ms)", ctx->timeout);
1069 		seq_puts(s, " ");
1070 	}
1071 	seq_puts(s, "\n");
1072 	spin_unlock_bh(&sc->runtime_lock);
1073 
1074 	return 0;
1075 }
1076 #undef HIF_PCI_RUNTIME_PM_STATS
1077 
1078 /**
1079  * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
1080  * @inode
1081  * @file
1082  *
1083  * Return: linux error code of single_open.
1084  */
1085 static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
1086 {
1087 	return single_open(file, hif_pci_pm_runtime_debugfs_show,
1088 			inode->i_private);
1089 }
1090 
1091 static const struct file_operations hif_pci_runtime_pm_fops = {
1092 	.owner          = THIS_MODULE,
1093 	.open           = hif_pci_runtime_pm_open,
1094 	.release        = single_release,
1095 	.read           = seq_read,
1096 	.llseek         = seq_lseek,
1097 };
1098 
1099 /**
1100  * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
1101  * @sc: pci context
1102  *
1103  * creates a debugfs entry to debug the runtime pm feature.
1104  */
1105 static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
1106 {
1107 	sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
1108 					0400, NULL, sc,
1109 					&hif_pci_runtime_pm_fops);
1110 }
1111 
1112 /**
1113  * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
1114  * @sc: pci context
1115  *
1116  * removes the debugfs entry to debug the runtime pm feature.
1117  */
1118 static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
1119 {
1120 	debugfs_remove(sc->pm_dentry);
1121 }
1122 
1123 static void hif_runtime_init(struct device *dev, int delay)
1124 {
1125 	pm_runtime_set_autosuspend_delay(dev, delay);
1126 	pm_runtime_use_autosuspend(dev);
1127 	pm_runtime_allow(dev);
1128 	pm_runtime_mark_last_busy(dev);
1129 	pm_runtime_put_noidle(dev);
1130 	pm_suspend_ignore_children(dev, true);
1131 }
1132 
1133 static void hif_runtime_exit(struct device *dev)
1134 {
1135 	pm_runtime_get_noresume(dev);
1136 	pm_runtime_set_active(dev);
1137 }
1138 
1139 static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
1140 
1141 /**
1142  * hif_pm_runtime_start(): start the runtime pm
1143  * @sc: pci context
1144  *
1145  * After this call, runtime pm will be active.
1146  */
1147 static void hif_pm_runtime_start(struct hif_pci_softc *sc)
1148 {
1149 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1150 	uint32_t mode = hif_get_conparam(ol_sc);
1151 
1152 	if (!ol_sc->hif_config.enable_runtime_pm) {
1153 		HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
1154 		return;
1155 	}
1156 
1157 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
1158 		HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
1159 				__func__);
1160 		return;
1161 	}
1162 
1163 	setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
1164 			(unsigned long)sc);
1165 
1166 	HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
1167 			ol_sc->hif_config.runtime_pm_delay);
1168 
1169 	hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
1170 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
1171 	hif_runtime_pm_debugfs_create(sc);
1172 }
1173 
1174 /**
1175  * hif_pm_runtime_stop(): stop runtime pm
1176  * @sc: pci context
1177  *
1178  * Turns off runtime pm and frees corresponding resources
1179  * that were acquired by hif_runtime_pm_start().
1180  */
1181 static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
1182 {
1183 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1184 	uint32_t mode = hif_get_conparam(ol_sc);
1185 
1186 	if (!ol_sc->hif_config.enable_runtime_pm)
1187 		return;
1188 
1189 	if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
1190 		return;
1191 
1192 	hif_runtime_exit(sc->dev);
1193 	hif_pm_runtime_resume(sc->dev);
1194 
1195 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1196 
1197 	hif_runtime_pm_debugfs_remove(sc);
1198 	del_timer_sync(&sc->runtime_timer);
1199 	/* doesn't wait for penting trafic unlike cld-2.0 */
1200 }
1201 
1202 /**
1203  * hif_pm_runtime_open(): initialize runtime pm
1204  * @sc: pci data structure
1205  *
1206  * Early initialization
1207  */
1208 static void hif_pm_runtime_open(struct hif_pci_softc *sc)
1209 {
1210 	spin_lock_init(&sc->runtime_lock);
1211 
1212 	qdf_atomic_init(&sc->pm_state);
1213 	qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
1214 	qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
1215 	INIT_LIST_HEAD(&sc->prevent_suspend_list);
1216 }
1217 
1218 /**
1219  * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
1220  * @sc: pci context
1221  *
1222  * Ensure we have only one vote against runtime suspend before closing
1223  * the runtime suspend feature.
1224  *
1225  * all gets by the wlan driver should have been returned
1226  * one vote should remain as part of cnss_runtime_exit
1227  *
1228  * needs to be revisited if we share the root complex.
1229  */
1230 static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
1231 {
1232 	struct hif_pm_runtime_lock *ctx, *tmp;
1233 
1234 	if (atomic_read(&sc->dev->power.usage_count) != 1)
1235 		hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
1236 	else
1237 		return;
1238 
1239 	spin_lock_bh(&sc->runtime_lock);
1240 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1241 		spin_unlock_bh(&sc->runtime_lock);
1242 		hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
1243 		spin_lock_bh(&sc->runtime_lock);
1244 	}
1245 	spin_unlock_bh(&sc->runtime_lock);
1246 
1247 	/* ensure 1 and only 1 usage count so that when the wlan
1248 	 * driver is re-insmodded runtime pm won't be
1249 	 * disabled also ensures runtime pm doesn't get
1250 	 * broken on by being less than 1.
1251 	 */
1252 	if (atomic_read(&sc->dev->power.usage_count) <= 0)
1253 		atomic_set(&sc->dev->power.usage_count, 1);
1254 	while (atomic_read(&sc->dev->power.usage_count) > 1)
1255 		hif_pm_runtime_put_auto(sc->dev);
1256 }
1257 
1258 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
1259 					  struct hif_pm_runtime_lock *lock);
1260 
1261 /**
1262  * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
1263  * @sc: PCIe Context
1264  *
1265  * API is used to empty the runtime pm prevent suspend list.
1266  *
1267  * Return: void
1268  */
1269 static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
1270 {
1271 	struct hif_pm_runtime_lock *ctx, *tmp;
1272 
1273 	spin_lock_bh(&sc->runtime_lock);
1274 	list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
1275 		__hif_pm_runtime_allow_suspend(sc, ctx);
1276 	}
1277 	spin_unlock_bh(&sc->runtime_lock);
1278 }
1279 
1280 /**
1281  * hif_pm_runtime_close(): close runtime pm
1282  * @sc: pci bus handle
1283  *
1284  * ensure runtime_pm is stopped before closing the driver
1285  */
1286 static void hif_pm_runtime_close(struct hif_pci_softc *sc)
1287 {
1288 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1289 
1290 	qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
1291 	if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
1292 		return;
1293 
1294 	hif_pm_runtime_stop(sc);
1295 
1296 	hif_is_recovery_in_progress(scn) ?
1297 		hif_pm_runtime_sanitize_on_ssr_exit(sc) :
1298 		hif_pm_runtime_sanitize_on_exit(sc);
1299 }
1300 #else
1301 static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
1302 static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
1303 static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
1304 static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
1305 #endif
1306 
1307 /**
1308  * hif_disable_power_gating() - disable HW power gating
1309  * @hif_ctx: hif context
1310  *
1311  * disables pcie L1 power states
1312  */
1313 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1314 {
1315 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1316 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1317 
1318 	if (NULL == scn) {
1319 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1320 		       __func__);
1321 		return;
1322 	}
1323 
1324 	/* Disable ASPM when pkt log is enabled */
1325 	pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1326 	pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1327 }
1328 
1329 /**
1330  * hif_enable_power_gating() - enable HW power gating
1331  * @hif_ctx: hif context
1332  *
1333  * enables pcie L1 power states
1334  */
1335 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1336 {
1337 	if (NULL == sc) {
1338 		HIF_ERROR("%s: Could not disable ASPM scn is null",
1339 		       __func__);
1340 		return;
1341 	}
1342 
1343 	/* Re-enable ASPM after firmware/OTP download is complete */
1344 	pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1345 }
1346 
1347 /**
1348  * hif_enable_power_management() - enable power management
1349  * @hif_ctx: hif context
1350  *
1351  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1352  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1353  *
1354  * note: epping mode does not call this function as it does not
1355  *       care about saving power.
1356  */
1357 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1358 				 bool is_packet_log_enabled)
1359 {
1360 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1361 
1362 	if (pci_ctx == NULL) {
1363 		HIF_ERROR("%s, hif_ctx null", __func__);
1364 		return;
1365 	}
1366 
1367 	hif_pm_runtime_start(pci_ctx);
1368 
1369 	if (!is_packet_log_enabled)
1370 		hif_enable_power_gating(pci_ctx);
1371 
1372 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1373 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1374 	    !ce_srng_based(hif_sc)) {
1375 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1376 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1377 			HIF_ERROR("%s, failed to set target to sleep",
1378 				  __func__);
1379 	}
1380 }
1381 
1382 /**
1383  * hif_disable_power_management() - disable power management
1384  * @hif_ctx: hif context
1385  *
1386  * Currently disables runtime pm. Should be updated to behave
1387  * if runtime pm is not started. Should be updated to take care
1388  * of aspm and soc sleep for driver load.
1389  */
1390 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1391 {
1392 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1393 
1394 	if (pci_ctx == NULL) {
1395 		HIF_ERROR("%s, hif_ctx null", __func__);
1396 		return;
1397 	}
1398 
1399 	hif_pm_runtime_stop(pci_ctx);
1400 }
1401 
1402 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1403 {
1404 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1405 
1406 	if (pci_ctx == NULL) {
1407 		HIF_ERROR("%s, hif_ctx null", __func__);
1408 		return;
1409 	}
1410 	hif_display_ce_stats(&pci_ctx->ce_sc);
1411 }
1412 
1413 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1414 {
1415 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1416 
1417 	if (pci_ctx == NULL) {
1418 		HIF_ERROR("%s, hif_ctx null", __func__);
1419 		return;
1420 	}
1421 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1422 }
1423 
1424 #define ATH_PCI_PROBE_RETRY_MAX 3
1425 /**
1426  * hif_bus_open(): hif_bus_open
1427  * @scn: scn
1428  * @bus_type: bus type
1429  *
1430  * Return: n/a
1431  */
1432 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1433 {
1434 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1435 
1436 	hif_ctx->bus_type = bus_type;
1437 	hif_pm_runtime_open(sc);
1438 
1439 	qdf_spinlock_create(&sc->irq_lock);
1440 
1441 	return hif_ce_open(hif_ctx);
1442 }
1443 
1444 /**
1445  * hif_wake_target_cpu() - wake the target's cpu
1446  * @scn: hif context
1447  *
1448  * Send an interrupt to the device to wake up the Target CPU
1449  * so it has an opportunity to notice any changed state.
1450  */
1451 static void hif_wake_target_cpu(struct hif_softc *scn)
1452 {
1453 	QDF_STATUS rv;
1454 	uint32_t core_ctrl;
1455 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1456 
1457 	rv = hif_diag_read_access(hif_hdl,
1458 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1459 				  &core_ctrl);
1460 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1461 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1462 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1463 
1464 	rv = hif_diag_write_access(hif_hdl,
1465 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1466 				   core_ctrl);
1467 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1468 }
1469 
1470 /**
1471  * soc_wake_reset() - allow the target to go to sleep
1472  * @scn: hif_softc
1473  *
1474  * Clear the force wake register.  This is done by
1475  * hif_sleep_entry and cancel defered timer sleep.
1476  */
1477 static void soc_wake_reset(struct hif_softc *scn)
1478 {
1479 	hif_write32_mb(scn->mem +
1480 		PCIE_LOCAL_BASE_ADDRESS +
1481 		PCIE_SOC_WAKE_ADDRESS,
1482 		PCIE_SOC_WAKE_RESET);
1483 }
1484 
1485 /**
1486  * hif_sleep_entry() - gate target sleep
1487  * @arg: hif context
1488  *
1489  * This function is the callback for the sleep timer.
1490  * Check if last force awake critical section was at least
1491  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1492  * allow the target to go to sleep and cancel the sleep timer.
1493  * otherwise reschedule the sleep timer.
1494  */
1495 static void hif_sleep_entry(void *arg)
1496 {
1497 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1498 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1499 	uint32_t idle_ms;
1500 
1501 	if (scn->recovery)
1502 		return;
1503 
1504 	if (hif_is_driver_unloading(scn))
1505 		return;
1506 
1507 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1508 	if (hif_state->verified_awake == false) {
1509 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1510 						    - hif_state->sleep_ticks);
1511 		if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1512 			if (!qdf_atomic_read(&scn->link_suspended)) {
1513 				soc_wake_reset(scn);
1514 				hif_state->fake_sleep = false;
1515 			}
1516 		} else {
1517 			qdf_timer_stop(&hif_state->sleep_timer);
1518 			qdf_timer_start(&hif_state->sleep_timer,
1519 				    HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1520 		}
1521 	} else {
1522 		qdf_timer_stop(&hif_state->sleep_timer);
1523 		qdf_timer_start(&hif_state->sleep_timer,
1524 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1525 	}
1526 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1527 }
1528 
1529 #define HIF_HIA_MAX_POLL_LOOP    1000000
1530 #define HIF_HIA_POLLING_DELAY_MS 10
1531 
1532 #ifdef CONFIG_WIN
1533 static void hif_set_hia_extnd(struct hif_softc *scn)
1534 {
1535 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1536 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1537 	uint32_t target_type = tgt_info->target_type;
1538 
1539 	HIF_TRACE("%s: E", __func__);
1540 
1541 	if ((target_type == TARGET_TYPE_AR900B) ||
1542 			target_type == TARGET_TYPE_QCA9984 ||
1543 			target_type == TARGET_TYPE_QCA9888) {
1544 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1545 		 * in RTC space
1546 		 */
1547 		tgt_info->target_revision
1548 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn->mem
1549 					+ CHIP_ID_ADDRESS));
1550 		qdf_print(KERN_INFO"chip_id 0x%x chip_revision 0x%x\n",
1551 			target_type, tgt_info->target_revision);
1552 	}
1553 
1554 	{
1555 		uint32_t flag2_value = 0;
1556 		uint32_t flag2_targ_addr =
1557 			host_interest_item_address(target_type,
1558 			offsetof(struct host_interest_s, hi_skip_clock_init));
1559 
1560 		if ((ar900b_20_targ_clk != -1) &&
1561 			(frac != -1) && (intval != -1)) {
1562 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1563 				&flag2_value);
1564 			qdf_print("\n Setting clk_override\n");
1565 			flag2_value |= CLOCK_OVERRIDE;
1566 
1567 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1568 					flag2_value);
1569 			qdf_print("\n CLOCK PLL val set %d\n", flag2_value);
1570 		} else {
1571 			qdf_print(KERN_INFO"\n CLOCK PLL skipped\n");
1572 		}
1573 	}
1574 
1575 	if (target_type == TARGET_TYPE_AR900B
1576 			|| target_type == TARGET_TYPE_QCA9984
1577 			|| target_type == TARGET_TYPE_QCA9888) {
1578 
1579 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1580 		 * this would be supplied through module parameters,
1581 		 * if not supplied assumed default or same behavior as 1.0.
1582 		 * Assume 1.0 clock can't be tuned, reset to defaults
1583 		 */
1584 
1585 		qdf_print(KERN_INFO
1586 			  "%s: setting the target pll frac %x intval %x\n",
1587 			  __func__, frac, intval);
1588 
1589 		/* do not touch frac, and int val, let them be default -1,
1590 		 * if desired, host can supply these through module params
1591 		 */
1592 		if (frac != -1 || intval != -1) {
1593 			uint32_t flag2_value = 0;
1594 			uint32_t flag2_targ_addr;
1595 
1596 			flag2_targ_addr =
1597 				host_interest_item_address(target_type,
1598 				offsetof(struct host_interest_s,
1599 					hi_clock_info));
1600 			hif_diag_read_access(hif_hdl,
1601 				flag2_targ_addr, &flag2_value);
1602 			qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1603 				flag2_value);
1604 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1605 			qdf_print("\n INT Val %x  Address %x\n",
1606 				intval, flag2_value + 4);
1607 			hif_diag_write_access(hif_hdl,
1608 					flag2_value + 4, intval);
1609 		} else {
1610 			qdf_print(KERN_INFO
1611 				  "%s: no frac provided, skipping pre-configuring PLL\n",
1612 				  __func__);
1613 		}
1614 
1615 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1616 		if ((target_type == TARGET_TYPE_AR900B)
1617 			&& (tgt_info->target_revision == AR900B_REV_2)
1618 			&& ar900b_20_targ_clk != -1) {
1619 			uint32_t flag2_value = 0;
1620 			uint32_t flag2_targ_addr;
1621 
1622 			flag2_targ_addr
1623 				= host_interest_item_address(target_type,
1624 					offsetof(struct host_interest_s,
1625 					hi_desired_cpu_speed_hz));
1626 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1627 							&flag2_value);
1628 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x\n",
1629 				  flag2_value);
1630 			hif_diag_write_access(hif_hdl, flag2_value,
1631 				ar900b_20_targ_clk/*300000000u*/);
1632 		} else if (target_type == TARGET_TYPE_QCA9888) {
1633 			uint32_t flag2_targ_addr;
1634 
1635 			if (200000000u != qca9888_20_targ_clk) {
1636 				qca9888_20_targ_clk = 300000000u;
1637 				/* Setting the target clock speed to 300 mhz */
1638 			}
1639 
1640 			flag2_targ_addr
1641 				= host_interest_item_address(target_type,
1642 					offsetof(struct host_interest_s,
1643 					hi_desired_cpu_speed_hz));
1644 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1645 				qca9888_20_targ_clk);
1646 		} else {
1647 			qdf_print(KERN_INFO"%s: targ_clk is not provided, skipping pre-configuring PLL\n",
1648 				  __func__);
1649 		}
1650 	} else {
1651 		if (frac != -1 || intval != -1) {
1652 			uint32_t flag2_value = 0;
1653 			uint32_t flag2_targ_addr =
1654 				host_interest_item_address(target_type,
1655 					offsetof(struct host_interest_s,
1656 							hi_clock_info));
1657 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1658 						&flag2_value);
1659 			qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
1660 							flag2_value);
1661 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1662 			qdf_print("\n INT Val %x  Address %x\n", intval,
1663 							flag2_value + 4);
1664 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1665 					intval);
1666 		}
1667 	}
1668 }
1669 
1670 #else
1671 
1672 static void hif_set_hia_extnd(struct hif_softc *scn)
1673 {
1674 }
1675 
1676 #endif
1677 
1678 /**
1679  * hif_set_hia() - fill out the host interest area
1680  * @scn: hif context
1681  *
1682  * This is replaced by hif_wlan_enable for integrated targets.
1683  * This fills out the host interest area.  The firmware will
1684  * process these memory addresses when it is first brought out
1685  * of reset.
1686  *
1687  * Return: 0 for success.
1688  */
1689 static int hif_set_hia(struct hif_softc *scn)
1690 {
1691 	QDF_STATUS rv;
1692 	uint32_t interconnect_targ_addr = 0;
1693 	uint32_t pcie_state_targ_addr = 0;
1694 	uint32_t pipe_cfg_targ_addr = 0;
1695 	uint32_t svc_to_pipe_map = 0;
1696 	uint32_t pcie_config_flags = 0;
1697 	uint32_t flag2_value = 0;
1698 	uint32_t flag2_targ_addr = 0;
1699 #ifdef QCA_WIFI_3_0
1700 	uint32_t host_interest_area = 0;
1701 	uint8_t i;
1702 #else
1703 	uint32_t ealloc_value = 0;
1704 	uint32_t ealloc_targ_addr = 0;
1705 	uint8_t banks_switched = 1;
1706 	uint32_t chip_id;
1707 #endif
1708 	uint32_t pipe_cfg_addr;
1709 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1710 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1711 	uint32_t target_type = tgt_info->target_type;
1712 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1713 	static struct CE_pipe_config *target_ce_config;
1714 	struct service_to_pipe *target_service_to_ce_map;
1715 
1716 	HIF_TRACE("%s: E", __func__);
1717 
1718 	hif_get_target_ce_config(scn,
1719 				 &target_ce_config, &target_ce_config_sz,
1720 				 &target_service_to_ce_map,
1721 				 &target_service_to_ce_map_sz,
1722 				 NULL, NULL);
1723 
1724 	if (ADRASTEA_BU)
1725 		return QDF_STATUS_SUCCESS;
1726 
1727 #ifdef QCA_WIFI_3_0
1728 	i = 0;
1729 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1730 		host_interest_area = hif_read32_mb(scn->mem +
1731 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1732 		if ((host_interest_area & 0x01) == 0) {
1733 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1734 			host_interest_area = 0;
1735 			i++;
1736 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1737 				HIF_ERROR("%s: poll timeout(%d)", __func__, i);
1738 		} else {
1739 			host_interest_area &= (~0x01);
1740 			hif_write32_mb(scn->mem + 0x113014, 0);
1741 			break;
1742 		}
1743 	}
1744 
1745 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1746 		HIF_ERROR("%s: hia polling timeout", __func__);
1747 		return -EIO;
1748 	}
1749 
1750 	if (host_interest_area == 0) {
1751 		HIF_ERROR("%s: host_interest_area = 0", __func__);
1752 		return -EIO;
1753 	}
1754 
1755 	interconnect_targ_addr = host_interest_area +
1756 			offsetof(struct host_interest_area_t,
1757 			hi_interconnect_state);
1758 
1759 	flag2_targ_addr = host_interest_area +
1760 			offsetof(struct host_interest_area_t, hi_option_flag2);
1761 
1762 #else
1763 	interconnect_targ_addr = hif_hia_item_address(target_type,
1764 		offsetof(struct host_interest_s, hi_interconnect_state));
1765 	ealloc_targ_addr = hif_hia_item_address(target_type,
1766 		offsetof(struct host_interest_s, hi_early_alloc));
1767 	flag2_targ_addr = hif_hia_item_address(target_type,
1768 		offsetof(struct host_interest_s, hi_option_flag2));
1769 #endif
1770 	/* Supply Target-side CE configuration */
1771 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1772 			  &pcie_state_targ_addr);
1773 	if (rv != QDF_STATUS_SUCCESS) {
1774 		HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
1775 			  __func__, interconnect_targ_addr, rv);
1776 		goto done;
1777 	}
1778 	if (pcie_state_targ_addr == 0) {
1779 		rv = QDF_STATUS_E_FAILURE;
1780 		HIF_ERROR("%s: pcie state addr is 0", __func__);
1781 		goto done;
1782 	}
1783 	pipe_cfg_addr = pcie_state_targ_addr +
1784 			  offsetof(struct pcie_state_s,
1785 			  pipe_cfg_addr);
1786 	rv = hif_diag_read_access(hif_hdl,
1787 			  pipe_cfg_addr,
1788 			  &pipe_cfg_targ_addr);
1789 	if (rv != QDF_STATUS_SUCCESS) {
1790 		HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
1791 			__func__, pipe_cfg_addr, rv);
1792 		goto done;
1793 	}
1794 	if (pipe_cfg_targ_addr == 0) {
1795 		rv = QDF_STATUS_E_FAILURE;
1796 		HIF_ERROR("%s: pipe cfg addr is 0", __func__);
1797 		goto done;
1798 	}
1799 
1800 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1801 			(uint8_t *) target_ce_config,
1802 			target_ce_config_sz);
1803 
1804 	if (rv != QDF_STATUS_SUCCESS) {
1805 		HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
1806 		goto done;
1807 	}
1808 
1809 	rv = hif_diag_read_access(hif_hdl,
1810 			  pcie_state_targ_addr +
1811 			  offsetof(struct pcie_state_s,
1812 			   svc_to_pipe_map),
1813 			  &svc_to_pipe_map);
1814 	if (rv != QDF_STATUS_SUCCESS) {
1815 		HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
1816 		goto done;
1817 	}
1818 	if (svc_to_pipe_map == 0) {
1819 		rv = QDF_STATUS_E_FAILURE;
1820 		HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
1821 		goto done;
1822 	}
1823 
1824 	rv = hif_diag_write_mem(hif_hdl,
1825 			svc_to_pipe_map,
1826 			(uint8_t *) target_service_to_ce_map,
1827 			target_service_to_ce_map_sz);
1828 	if (rv != QDF_STATUS_SUCCESS) {
1829 		HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
1830 		goto done;
1831 	}
1832 
1833 	rv = hif_diag_read_access(hif_hdl,
1834 			pcie_state_targ_addr +
1835 			offsetof(struct pcie_state_s,
1836 			config_flags),
1837 			&pcie_config_flags);
1838 	if (rv != QDF_STATUS_SUCCESS) {
1839 		HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
1840 		goto done;
1841 	}
1842 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1843 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1844 #else
1845 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1846 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1847 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1848 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1849 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1850 #endif
1851 	rv = hif_diag_write_mem(hif_hdl,
1852 			pcie_state_targ_addr +
1853 			offsetof(struct pcie_state_s,
1854 			config_flags),
1855 			(uint8_t *) &pcie_config_flags,
1856 			sizeof(pcie_config_flags));
1857 	if (rv != QDF_STATUS_SUCCESS) {
1858 		HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
1859 		goto done;
1860 	}
1861 
1862 #ifndef QCA_WIFI_3_0
1863 	/* configure early allocation */
1864 	ealloc_targ_addr = hif_hia_item_address(target_type,
1865 						offsetof(
1866 						struct host_interest_s,
1867 						hi_early_alloc));
1868 
1869 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1870 			&ealloc_value);
1871 	if (rv != QDF_STATUS_SUCCESS) {
1872 		HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
1873 		goto done;
1874 	}
1875 
1876 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1877 	ealloc_value |=
1878 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1879 		 HI_EARLY_ALLOC_MAGIC_MASK);
1880 
1881 	rv = hif_diag_read_access(hif_hdl,
1882 			  CHIP_ID_ADDRESS |
1883 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1884 	if (rv != QDF_STATUS_SUCCESS) {
1885 		HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
1886 		goto done;
1887 	}
1888 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1889 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1890 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1891 		case 0x2:       /* ROME 1.3 */
1892 			/* 2 banks are switched to IRAM */
1893 			banks_switched = 2;
1894 			break;
1895 		case 0x4:       /* ROME 2.1 */
1896 		case 0x5:       /* ROME 2.2 */
1897 			banks_switched = 6;
1898 			break;
1899 		case 0x8:       /* ROME 3.0 */
1900 		case 0x9:       /* ROME 3.1 */
1901 		case 0xA:       /* ROME 3.2 */
1902 			banks_switched = 9;
1903 			break;
1904 		case 0x0:       /* ROME 1.0 */
1905 		case 0x1:       /* ROME 1.1 */
1906 		default:
1907 			/* 3 banks are switched to IRAM */
1908 			banks_switched = 3;
1909 			break;
1910 		}
1911 	}
1912 
1913 	ealloc_value |=
1914 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1915 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1916 
1917 	rv = hif_diag_write_access(hif_hdl,
1918 				ealloc_targ_addr,
1919 				ealloc_value);
1920 	if (rv != QDF_STATUS_SUCCESS) {
1921 		HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
1922 		goto done;
1923 	}
1924 #endif
1925 	if ((target_type == TARGET_TYPE_AR900B)
1926 			|| (target_type == TARGET_TYPE_QCA9984)
1927 			|| (target_type == TARGET_TYPE_QCA9888)
1928 			|| (target_type == TARGET_TYPE_AR9888)) {
1929 		hif_set_hia_extnd(scn);
1930 	}
1931 
1932 	/* Tell Target to proceed with initialization */
1933 	flag2_targ_addr = hif_hia_item_address(target_type,
1934 						offsetof(
1935 						struct host_interest_s,
1936 						hi_option_flag2));
1937 
1938 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1939 			  &flag2_value);
1940 	if (rv != QDF_STATUS_SUCCESS) {
1941 		HIF_ERROR("%s: get option val (%d)", __func__, rv);
1942 		goto done;
1943 	}
1944 
1945 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1946 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1947 			   flag2_value);
1948 	if (rv != QDF_STATUS_SUCCESS) {
1949 		HIF_ERROR("%s: set option val (%d)", __func__, rv);
1950 		goto done;
1951 	}
1952 
1953 	hif_wake_target_cpu(scn);
1954 
1955 done:
1956 
1957 	return rv;
1958 }
1959 
1960 /**
1961  * hif_bus_configure() - configure the pcie bus
1962  * @hif_sc: pointer to the hif context.
1963  *
1964  * return: 0 for success. nonzero for failure.
1965  */
1966 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1967 {
1968 	int status = 0;
1969 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1970 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1971 
1972 	hif_ce_prepare_config(hif_sc);
1973 
1974 	/* initialize sleep state adjust variables */
1975 	hif_state->sleep_timer_init = true;
1976 	hif_state->keep_awake_count = 0;
1977 	hif_state->fake_sleep = false;
1978 	hif_state->sleep_ticks = 0;
1979 
1980 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1981 			       hif_sleep_entry, (void *)hif_state,
1982 			       QDF_TIMER_TYPE_WAKE_APPS);
1983 	hif_state->sleep_timer_init = true;
1984 
1985 	status = hif_wlan_enable(hif_sc);
1986 	if (status) {
1987 		HIF_ERROR("%s: hif_wlan_enable error = %d",
1988 			  __func__, status);
1989 		goto timer_free;
1990 	}
1991 
1992 	A_TARGET_ACCESS_LIKELY(hif_sc);
1993 
1994 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1995 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1996 	    !ce_srng_based(hif_sc)) {
1997 		/*
1998 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1999 		 * prevent sleep when we want to keep firmware always awake
2000 		 * note: when we want to keep firmware always awake,
2001 		 *       hif_target_sleep_state_adjust will point to a dummy
2002 		 *       function, and hif_pci_target_sleep_state_adjust must
2003 		 *       be called instead.
2004 		 * note: bus type check is here because AHB bus is reusing
2005 		 *       hif_pci_bus_configure code.
2006 		 */
2007 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
2008 			if (hif_pci_target_sleep_state_adjust(hif_sc,
2009 					false, true) < 0) {
2010 				status = -EACCES;
2011 				goto disable_wlan;
2012 			}
2013 		}
2014 	}
2015 
2016 	/* todo: consider replacing this with an srng field */
2017 	if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2018 			(hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
2019 		hif_sc->per_ce_irq = true;
2020 	}
2021 
2022 	status = hif_config_ce(hif_sc);
2023 	if (status)
2024 		goto disable_wlan;
2025 
2026 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
2027 	if (hif_needs_bmi(hif_osc)) {
2028 		status = hif_set_hia(hif_sc);
2029 		if (status)
2030 			goto unconfig_ce;
2031 
2032 		HIF_INFO_MED("%s: hif_set_hia done", __func__);
2033 
2034 		hif_register_bmi_callbacks(hif_sc);
2035 	}
2036 
2037 	if ((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) &&
2038 			(hif_sc->bus_type == QDF_BUS_TYPE_PCI))
2039 		HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
2040 						__func__);
2041 	else {
2042 		status = hif_configure_irq(hif_sc);
2043 		if (status < 0)
2044 			goto unconfig_ce;
2045 	}
2046 
2047 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2048 
2049 	return status;
2050 
2051 unconfig_ce:
2052 	hif_unconfig_ce(hif_sc);
2053 disable_wlan:
2054 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
2055 	hif_wlan_disable(hif_sc);
2056 
2057 timer_free:
2058 	qdf_timer_stop(&hif_state->sleep_timer);
2059 	qdf_timer_free(&hif_state->sleep_timer);
2060 	hif_state->sleep_timer_init = false;
2061 
2062 	HIF_ERROR("%s: failed, status = %d", __func__, status);
2063 	return status;
2064 }
2065 
2066 /**
2067  * hif_bus_close(): hif_bus_close
2068  *
2069  * Return: n/a
2070  */
2071 void hif_pci_close(struct hif_softc *hif_sc)
2072 {
2073 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
2074 
2075 	hif_pm_runtime_close(hif_pci_sc);
2076 	hif_ce_close(hif_sc);
2077 }
2078 
2079 #define BAR_NUM 0
2080 
2081 #ifndef CONFIG_PLD_PCIE_INIT
2082 static int hif_enable_pci(struct hif_pci_softc *sc,
2083 			  struct pci_dev *pdev,
2084 			  const struct pci_device_id *id)
2085 {
2086 	void __iomem *mem;
2087 	int ret = 0;
2088 	uint16_t device_id = 0;
2089 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2090 
2091 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
2092 	if (device_id != id->device)  {
2093 		HIF_ERROR(
2094 		   "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
2095 		   __func__, device_id, id->device);
2096 		/* pci link is down, so returing with error code */
2097 		return -EIO;
2098 	}
2099 
2100 	/* FIXME: temp. commenting out assign_resource
2101 	 * call for dev_attach to work on 2.6.38 kernel
2102 	 */
2103 #if (!defined(__LINUX_ARM_ARCH__))
2104 	if (pci_assign_resource(pdev, BAR_NUM)) {
2105 		HIF_ERROR("%s: pci_assign_resource error", __func__);
2106 		return -EIO;
2107 	}
2108 #endif
2109 	if (pci_enable_device(pdev)) {
2110 		HIF_ERROR("%s: pci_enable_device error",
2111 			   __func__);
2112 		return -EIO;
2113 	}
2114 
2115 	/* Request MMIO resources */
2116 	ret = pci_request_region(pdev, BAR_NUM, "ath");
2117 	if (ret) {
2118 		HIF_ERROR("%s: PCI MMIO reservation error", __func__);
2119 		ret = -EIO;
2120 		goto err_region;
2121 	}
2122 
2123 #ifdef CONFIG_ARM_LPAE
2124 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
2125 	 * for 32 bits device also.
2126 	 */
2127 	ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2128 	if (ret) {
2129 		HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
2130 		goto err_dma;
2131 	}
2132 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2133 	if (ret) {
2134 		HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
2135 		goto err_dma;
2136 	}
2137 #else
2138 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2139 	if (ret) {
2140 		HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
2141 		goto err_dma;
2142 	}
2143 	ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2144 	if (ret) {
2145 		HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
2146 			   __func__);
2147 		goto err_dma;
2148 	}
2149 #endif
2150 
2151 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2152 
2153 	/* Set bus master bit in PCI_COMMAND to enable DMA */
2154 	pci_set_master(pdev);
2155 
2156 	/* Arrange for access to Target SoC registers. */
2157 	mem = pci_iomap(pdev, BAR_NUM, 0);
2158 	if (!mem) {
2159 		HIF_ERROR("%s: PCI iomap error", __func__);
2160 		ret = -EIO;
2161 		goto err_iomap;
2162 	}
2163 
2164 	pr_err("*****BAR is %pK\n", mem);
2165 
2166 	sc->mem = mem;
2167 
2168 	HIF_INFO("%s, mem after pci_iomap:%pK\n",
2169 	       __func__, sc->mem);
2170 
2171 	/* Hawkeye emulation specific change */
2172 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
2173 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
2174 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
2175 		(device_id == RUMIM2M_DEVICE_ID_NODE3)) {
2176 		mem = mem + 0x0c000000;
2177 		sc->mem = mem;
2178 		HIF_INFO("%s: Changing PCI mem base to %pK\n",
2179 			__func__, sc->mem);
2180 	}
2181 
2182 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
2183 	ol_sc->mem = mem;
2184 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
2185 	sc->pci_enabled = true;
2186 	return ret;
2187 
2188 err_iomap:
2189 	pci_clear_master(pdev);
2190 err_dma:
2191 	pci_release_region(pdev, BAR_NUM);
2192 err_region:
2193 	pci_disable_device(pdev);
2194 	return ret;
2195 }
2196 #else
2197 static int hif_enable_pci(struct hif_pci_softc *sc,
2198 			  struct pci_dev *pdev,
2199 			  const struct pci_device_id *id)
2200 {
2201 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
2202 	sc->pci_enabled = true;
2203 	return 0;
2204 }
2205 #endif
2206 
2207 
2208 #ifndef CONFIG_PLD_PCIE_INIT
2209 static inline void hif_pci_deinit(struct hif_pci_softc *sc)
2210 {
2211 	pci_iounmap(sc->pdev, sc->mem);
2212 	pci_clear_master(sc->pdev);
2213 	pci_release_region(sc->pdev, BAR_NUM);
2214 	pci_disable_device(sc->pdev);
2215 }
2216 #else
2217 static inline void hif_pci_deinit(struct hif_pci_softc *sc) {}
2218 #endif
2219 
2220 static void hif_disable_pci(struct hif_pci_softc *sc)
2221 {
2222 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
2223 
2224 	if (ol_sc == NULL) {
2225 		HIF_ERROR("%s: ol_sc = NULL", __func__);
2226 		return;
2227 	}
2228 	hif_pci_device_reset(sc);
2229 
2230 	hif_pci_deinit(sc);
2231 
2232 	sc->mem = NULL;
2233 	ol_sc->mem = NULL;
2234 }
2235 
2236 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
2237 {
2238 	int ret = 0;
2239 	int targ_awake_limit = 500;
2240 #ifndef QCA_WIFI_3_0
2241 	uint32_t fw_indicator;
2242 #endif
2243 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2244 
2245 	/*
2246 	 * Verify that the Target was started cleanly.*
2247 	 * The case where this is most likely is with an AUX-powered
2248 	 * Target and a Host in WoW mode. If the Host crashes,
2249 	 * loses power, or is restarted (without unloading the driver)
2250 	 * then the Target is left (aux) powered and running.  On a
2251 	 * subsequent driver load, the Target is in an unexpected state.
2252 	 * We try to catch that here in order to reset the Target and
2253 	 * retry the probe.
2254 	 */
2255 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2256 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2257 	while (!hif_targ_is_awake(scn, sc->mem)) {
2258 		if (0 == targ_awake_limit) {
2259 			HIF_ERROR("%s: target awake timeout", __func__);
2260 			ret = -EAGAIN;
2261 			goto end;
2262 		}
2263 		qdf_mdelay(1);
2264 		targ_awake_limit--;
2265 	}
2266 
2267 #if PCIE_BAR0_READY_CHECKING
2268 	{
2269 		int wait_limit = 200;
2270 		/* Synchronization point: wait the BAR0 is configured */
2271 		while (wait_limit-- &&
2272 			   !(hif_read32_mb(sc->mem +
2273 					  PCIE_LOCAL_BASE_ADDRESS +
2274 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2275 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2276 			qdf_mdelay(10);
2277 		}
2278 		if (wait_limit < 0) {
2279 			/* AR6320v1 doesn't support checking of BAR0
2280 			 * configuration, takes one sec to wait BAR0 ready
2281 			 */
2282 			HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
2283 				    __func__);
2284 		}
2285 	}
2286 #endif
2287 
2288 #ifndef QCA_WIFI_3_0
2289 	fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS);
2290 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2291 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2292 
2293 	if (fw_indicator & FW_IND_INITIALIZED) {
2294 		HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
2295 			   __func__);
2296 		ret = -EAGAIN;
2297 		goto end;
2298 	}
2299 #endif
2300 
2301 end:
2302 	return ret;
2303 }
2304 
2305 static void wlan_tasklet_msi(unsigned long data)
2306 {
2307 	struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
2308 	struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
2309 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2310 
2311 	if (scn->hif_init_done == false)
2312 		goto irq_handled;
2313 
2314 	if (qdf_atomic_read(&scn->link_suspended))
2315 		goto irq_handled;
2316 
2317 	qdf_atomic_inc(&scn->active_tasklet_cnt);
2318 
2319 	if (entry->id == HIF_MAX_TASKLET_NUM) {
2320 		/* the last tasklet is for fw IRQ */
2321 		(irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
2322 		if (scn->target_status == TARGET_STATUS_RESET)
2323 			goto irq_handled;
2324 	} else if (entry->id < scn->ce_count) {
2325 		ce_per_engine_service(scn, entry->id);
2326 	} else {
2327 		HIF_ERROR("%s: ERROR - invalid CE_id = %d",
2328 		       __func__, entry->id);
2329 	}
2330 	return;
2331 
2332 irq_handled:
2333 	qdf_atomic_dec(&scn->active_tasklet_cnt);
2334 
2335 }
2336 
2337 /* deprecated */
2338 static int hif_configure_msi(struct hif_pci_softc *sc)
2339 {
2340 	int ret = 0;
2341 	int num_msi_desired;
2342 	int rv = -1;
2343 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2344 
2345 	HIF_TRACE("%s: E", __func__);
2346 
2347 	num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
2348 	if (num_msi_desired < 1) {
2349 		HIF_ERROR("%s: MSI is not configured", __func__);
2350 		return -EINVAL;
2351 	}
2352 
2353 	if (num_msi_desired > 1) {
2354 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
2355 		rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
2356 						num_msi_desired);
2357 #else
2358 		rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
2359 #endif
2360 	}
2361 	HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
2362 		  __func__, num_msi_desired, rv);
2363 
2364 	if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
2365 		int i;
2366 
2367 		sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
2368 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
2369 			(void *)sc;
2370 		sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
2371 			HIF_MAX_TASKLET_NUM;
2372 		tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2373 			 (unsigned long)&sc->tasklet_entries[
2374 			 HIF_MAX_TASKLET_NUM-1]);
2375 		ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
2376 				  hif_pci_msi_fw_handler,
2377 				  IRQF_SHARED, "wlan_pci", sc);
2378 		if (ret) {
2379 			HIF_ERROR("%s: request_irq failed", __func__);
2380 			goto err_intr;
2381 		}
2382 		for (i = 0; i <= scn->ce_count; i++) {
2383 			sc->tasklet_entries[i].hif_handler = (void *)sc;
2384 			sc->tasklet_entries[i].id = i;
2385 			tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
2386 				 (unsigned long)&sc->tasklet_entries[i]);
2387 			ret = request_irq((sc->pdev->irq +
2388 					   i + MSI_ASSIGN_CE_INITIAL),
2389 					  ce_per_engine_handler, IRQF_SHARED,
2390 					  "wlan_pci", sc);
2391 			if (ret) {
2392 				HIF_ERROR("%s: request_irq failed", __func__);
2393 				goto err_intr;
2394 			}
2395 		}
2396 	} else if (rv > 0) {
2397 		HIF_TRACE("%s: use single msi", __func__);
2398 
2399 		ret = pci_enable_msi(sc->pdev);
2400 		if (ret < 0) {
2401 			HIF_ERROR("%s: single MSI allocation failed",
2402 				  __func__);
2403 			/* Try for legacy PCI line interrupts */
2404 			sc->num_msi_intrs = 0;
2405 		} else {
2406 			sc->num_msi_intrs = 1;
2407 			tasklet_init(&sc->intr_tq,
2408 				wlan_tasklet, (unsigned long)sc);
2409 			ret = request_irq(sc->pdev->irq,
2410 					 hif_pci_legacy_ce_interrupt_handler,
2411 					  IRQF_SHARED, "wlan_pci", sc);
2412 			if (ret) {
2413 				HIF_ERROR("%s: request_irq failed", __func__);
2414 				goto err_intr;
2415 			}
2416 		}
2417 	} else {
2418 		sc->num_msi_intrs = 0;
2419 		ret = -EIO;
2420 		HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
2421 	}
2422 	ret = pci_enable_msi(sc->pdev);
2423 	if (ret < 0) {
2424 		HIF_ERROR("%s: single MSI interrupt allocation failed",
2425 			  __func__);
2426 		/* Try for legacy PCI line interrupts */
2427 		sc->num_msi_intrs = 0;
2428 	} else {
2429 		sc->num_msi_intrs = 1;
2430 		tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2431 		ret = request_irq(sc->pdev->irq,
2432 				  hif_pci_legacy_ce_interrupt_handler,
2433 				  IRQF_SHARED, "wlan_pci", sc);
2434 		if (ret) {
2435 			HIF_ERROR("%s: request_irq failed", __func__);
2436 			goto err_intr;
2437 		}
2438 	}
2439 
2440 	if (ret == 0) {
2441 		hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2442 			  PCIE_INTR_ENABLE_ADDRESS),
2443 			  HOST_GROUP0_MASK);
2444 		hif_write32_mb(sc->mem +
2445 			  PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
2446 			  PCIE_SOC_WAKE_RESET);
2447 	}
2448 	HIF_TRACE("%s: X, ret = %d", __func__, ret);
2449 
2450 	return ret;
2451 
2452 err_intr:
2453 	if (sc->num_msi_intrs >= 1)
2454 		pci_disable_msi(sc->pdev);
2455 	return ret;
2456 }
2457 
2458 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2459 {
2460 	int ret = 0;
2461 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2462 	uint32_t target_type = scn->target_info.target_type;
2463 
2464 	HIF_TRACE("%s: E", __func__);
2465 
2466 	/* do notn support MSI or MSI IRQ failed */
2467 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2468 	ret = request_irq(sc->pdev->irq,
2469 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2470 			  "wlan_pci", sc);
2471 	if (ret) {
2472 		HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
2473 		goto end;
2474 	}
2475 	scn->wake_irq = sc->pdev->irq;
2476 	/* Use sc->irq instead of sc->pdev-irq
2477 	 * platform_device pdev doesn't have an irq field
2478 	 */
2479 	sc->irq = sc->pdev->irq;
2480 	/* Use Legacy PCI Interrupts */
2481 	hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2482 		  PCIE_INTR_ENABLE_ADDRESS),
2483 		  HOST_GROUP0_MASK);
2484 	hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
2485 			       PCIE_INTR_ENABLE_ADDRESS));
2486 	hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2487 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2488 
2489 	if ((target_type == TARGET_TYPE_IPQ4019) ||
2490 			(target_type == TARGET_TYPE_AR900B)  ||
2491 			(target_type == TARGET_TYPE_QCA9984) ||
2492 			(target_type == TARGET_TYPE_AR9888) ||
2493 			(target_type == TARGET_TYPE_QCA9888) ||
2494 			(target_type == TARGET_TYPE_AR6320V1) ||
2495 			(target_type == TARGET_TYPE_AR6320V2) ||
2496 			(target_type == TARGET_TYPE_AR6320V3)) {
2497 		hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2498 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2499 	}
2500 end:
2501 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2502 			  "%s: X, ret = %d", __func__, ret);
2503 	return ret;
2504 }
2505 
2506 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
2507 {
2508 	int ret;
2509 	int ce_id, irq;
2510 	uint32_t msi_data_start;
2511 	uint32_t msi_data_count;
2512 	uint32_t msi_irq_start;
2513 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2514 
2515 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2516 					    &msi_data_count, &msi_data_start,
2517 					    &msi_irq_start);
2518 	if (ret)
2519 		return ret;
2520 
2521 	/* needs to match the ce_id -> irq data mapping
2522 	 * used in the srng parameter configuration
2523 	 */
2524 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2525 		unsigned int msi_data;
2526 
2527 		if (!ce_sc->tasklets[ce_id].inited)
2528 			continue;
2529 
2530 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
2531 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
2532 
2533 		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
2534 			  ce_id, msi_data, irq);
2535 
2536 		free_irq(irq, &ce_sc->tasklets[ce_id]);
2537 	}
2538 
2539 	return ret;
2540 }
2541 
2542 static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2543 {
2544 	int i, j, irq;
2545 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2546 	struct hif_exec_context *hif_ext_group;
2547 
2548 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2549 		hif_ext_group = hif_state->hif_ext_group[i];
2550 		if (hif_ext_group->irq_requested) {
2551 			hif_ext_group->irq_requested = false;
2552 			for (j = 0; j < hif_ext_group->numirq; j++) {
2553 				irq = hif_ext_group->os_irq[j];
2554 				free_irq(irq, hif_ext_group);
2555 			}
2556 			hif_ext_group->numirq = 0;
2557 		}
2558 	}
2559 }
2560 
2561 /**
2562  * hif_nointrs(): disable IRQ
2563  *
2564  * This function stops interrupt(s)
2565  *
2566  * @scn: struct hif_softc
2567  *
2568  * Return: none
2569  */
2570 void hif_pci_nointrs(struct hif_softc *scn)
2571 {
2572 	int i, ret;
2573 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2574 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2575 
2576 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2577 
2578 	if (scn->request_irq_done == false)
2579 		return;
2580 
2581 	hif_pci_deconfigure_grp_irq(scn);
2582 
2583 	ret = hif_ce_srng_msi_free_irq(scn);
2584 	if (ret != -EINVAL) {
2585 		/* ce irqs freed in hif_ce_srng_msi_free_irq */
2586 
2587 		if (scn->wake_irq)
2588 			free_irq(scn->wake_irq, scn);
2589 		scn->wake_irq = 0;
2590 	} else if (sc->num_msi_intrs > 0) {
2591 		/* MSI interrupt(s) */
2592 		for (i = 0; i < sc->num_msi_intrs; i++)
2593 			free_irq(sc->irq + i, sc);
2594 		sc->num_msi_intrs = 0;
2595 	} else {
2596 		/* Legacy PCI line interrupt
2597 		 * Use sc->irq instead of sc->pdev-irq
2598 		 * platform_device pdev doesn't have an irq field
2599 		 */
2600 		free_irq(sc->irq, sc);
2601 	}
2602 	scn->request_irq_done = false;
2603 }
2604 
2605 /**
2606  * hif_disable_bus(): hif_disable_bus
2607  *
2608  * This function disables the bus
2609  *
2610  * @bdev: bus dev
2611  *
2612  * Return: none
2613  */
2614 void hif_pci_disable_bus(struct hif_softc *scn)
2615 {
2616 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2617 	struct pci_dev *pdev;
2618 	void __iomem *mem;
2619 	struct hif_target_info *tgt_info = &scn->target_info;
2620 
2621 	/* Attach did not succeed, all resources have been
2622 	 * freed in error handler
2623 	 */
2624 	if (!sc)
2625 		return;
2626 
2627 	pdev = sc->pdev;
2628 	if (ADRASTEA_BU) {
2629 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2630 
2631 		hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2632 		hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS,
2633 			       HOST_GROUP0_MASK);
2634 	}
2635 
2636 #if defined(CPU_WARM_RESET_WAR)
2637 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2638 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2639 	 * verified for AR9888_REV1
2640 	 */
2641 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2642 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2643 		hif_pci_device_warm_reset(sc);
2644 	else
2645 		hif_pci_device_reset(sc);
2646 #else
2647 	hif_pci_device_reset(sc);
2648 #endif
2649 	mem = (void __iomem *)sc->mem;
2650 	if (mem) {
2651 #ifndef CONFIG_PLD_PCIE_INIT
2652 		pci_disable_msi(pdev);
2653 #endif
2654 		hif_dump_pipe_debug_count(scn);
2655 		if (scn->athdiag_procfs_inited) {
2656 			athdiag_procfs_remove();
2657 			scn->athdiag_procfs_inited = false;
2658 		}
2659 		hif_pci_deinit(sc);
2660 		scn->mem = NULL;
2661 	}
2662 	HIF_INFO("%s: X", __func__);
2663 }
2664 
2665 #define OL_ATH_PCI_PM_CONTROL 0x44
2666 
2667 #ifdef FEATURE_RUNTIME_PM
2668 /**
2669  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occuring
2670  * @scn: hif context
2671  * @flag: prevent linkdown if true otherwise allow
2672  *
2673  * this api should only be called as part of bus prevent linkdown
2674  */
2675 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2676 {
2677 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2678 
2679 	if (flag)
2680 		qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
2681 	else
2682 		qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
2683 }
2684 #else
2685 static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
2686 {
2687 }
2688 #endif
2689 
2690 #if defined(CONFIG_PCI_MSM)
2691 /**
2692  * hif_bus_prevent_linkdown(): allow or permit linkdown
2693  * @flag: true prevents linkdown, false allows
2694  *
2695  * Calls into the platform driver to vote against taking down the
2696  * pcie link.
2697  *
2698  * Return: n/a
2699  */
2700 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2701 {
2702 	int errno;
2703 
2704 	HIF_DBG("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2705 	hif_runtime_prevent_linkdown(scn, flag);
2706 
2707 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2708 	if (errno)
2709 		HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
2710 			  __func__, errno);
2711 }
2712 #else
2713 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2714 {
2715 	HIF_DBG("wlan: %s pcie power collapse",
2716 			(flag ? "disable" : "enable"));
2717 	hif_runtime_prevent_linkdown(scn, flag);
2718 }
2719 #endif
2720 
2721 static int hif_mark_wake_irq_wakeable(struct hif_softc *scn)
2722 {
2723 	int errno;
2724 
2725 	errno = enable_irq_wake(scn->wake_irq);
2726 	if (errno) {
2727 		HIF_ERROR("%s: Failed to mark wake IRQ: %d", __func__, errno);
2728 		return errno;
2729 	}
2730 
2731 	return 0;
2732 }
2733 
2734 /**
2735  * hif_pci_bus_suspend(): prepare hif for suspend
2736  *
2737  * Enables pci bus wake irq based on link suspend voting.
2738  *
2739  * Return: 0 for success and non-zero error code for failure
2740  */
2741 int hif_pci_bus_suspend(struct hif_softc *scn)
2742 {
2743 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2744 		return 0;
2745 
2746 	/* pci link is staying up; enable wake irq */
2747 	return hif_mark_wake_irq_wakeable(scn);
2748 }
2749 
2750 /**
2751  * __hif_check_link_status() - API to check if PCIe link is active/not
2752  * @scn: HIF Context
2753  *
2754  * API reads the PCIe config space to verify if PCIe link training is
2755  * successful or not.
2756  *
2757  * Return: Success/Failure
2758  */
2759 static int __hif_check_link_status(struct hif_softc *scn)
2760 {
2761 	uint16_t dev_id = 0;
2762 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2763 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2764 
2765 	if (!sc) {
2766 		HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
2767 		return -EINVAL;
2768 	}
2769 
2770 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2771 
2772 	if (dev_id == sc->devid)
2773 		return 0;
2774 
2775 	HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2776 	       __func__, dev_id);
2777 
2778 	scn->recovery = true;
2779 
2780 	if (cbk && cbk->set_recovery_in_progress)
2781 		cbk->set_recovery_in_progress(cbk->context, true);
2782 	else
2783 		HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
2784 
2785 	pld_is_pci_link_down(sc->dev);
2786 	return -EACCES;
2787 }
2788 
2789 static int hif_unmark_wake_irq_wakeable(struct hif_softc *scn)
2790 {
2791 	int errno;
2792 
2793 	errno = disable_irq_wake(scn->wake_irq);
2794 	if (errno) {
2795 		HIF_ERROR("%s: Failed to unmark wake IRQ: %d", __func__, errno);
2796 		return errno;
2797 	}
2798 
2799 	return 0;
2800 }
2801 
2802 /**
2803  * hif_pci_bus_resume(): prepare hif for resume
2804  *
2805  * Disables pci bus wake irq based on link suspend voting.
2806  *
2807  * Return: 0 for success and non-zero error code for failure
2808  */
2809 int hif_pci_bus_resume(struct hif_softc *scn)
2810 {
2811 	int ret;
2812 
2813 	ret = __hif_check_link_status(scn);
2814 	if (ret)
2815 		return ret;
2816 
2817 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2818 		return 0;
2819 
2820 	/* pci link is up; disable wake irq */
2821 	return hif_unmark_wake_irq_wakeable(scn);
2822 }
2823 
2824 /**
2825  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2826  * @scn: hif context
2827  *
2828  * Ensure that if we recieved the wakeup message before the irq
2829  * was disabled that the message is pocessed before suspending.
2830  *
2831  * Return: -EBUSY if we fail to flush the tasklets.
2832  */
2833 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2834 {
2835 	if (hif_drain_tasklets(scn) != 0)
2836 		return -EBUSY;
2837 
2838 	/* Stop the HIF Sleep Timer */
2839 	hif_cancel_deferred_target_sleep(scn);
2840 
2841 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2842 		qdf_atomic_set(&scn->link_suspended, 1);
2843 
2844 	return 0;
2845 }
2846 
2847 /**
2848  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2849  * @scn: hif context
2850  *
2851  * Ensure that if we recieved the wakeup message before the irq
2852  * was disabled that the message is pocessed before suspending.
2853  *
2854  * Return: -EBUSY if we fail to flush the tasklets.
2855  */
2856 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2857 {
2858 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2859 		qdf_atomic_set(&scn->link_suspended, 0);
2860 
2861 	return 0;
2862 }
2863 
2864 #ifdef FEATURE_RUNTIME_PM
2865 /**
2866  * __hif_runtime_pm_set_state(): utility function
2867  * @state: state to set
2868  *
2869  * indexes into the runtime pm state and sets it.
2870  */
2871 static void __hif_runtime_pm_set_state(struct hif_softc *scn,
2872 				enum hif_pm_runtime_state state)
2873 {
2874 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2875 
2876 	if (NULL == sc) {
2877 		HIF_ERROR("%s: HIF_CTX not initialized",
2878 		       __func__);
2879 		return;
2880 	}
2881 
2882 	qdf_atomic_set(&sc->pm_state, state);
2883 }
2884 
2885 /**
2886  * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
2887  *
2888  * Notify hif that a runtime pm opperation has started
2889  */
2890 static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
2891 {
2892 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
2893 }
2894 
2895 /**
2896  * hif_runtime_pm_set_state_on():  adjust runtime pm state
2897  *
2898  * Notify hif that a the runtime pm state should be on
2899  */
2900 static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
2901 {
2902 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
2903 }
2904 
2905 /**
2906  * hif_runtime_pm_set_state_suspended():  adjust runtime pm state
2907  *
2908  * Notify hif that a runtime suspend attempt has been completed successfully
2909  */
2910 static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
2911 {
2912 	__hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
2913 }
2914 
2915 /**
2916  * hif_log_runtime_suspend_success() - log a successful runtime suspend
2917  */
2918 static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
2919 {
2920 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2921 
2922 	if (sc == NULL)
2923 		return;
2924 
2925 	sc->pm_stats.suspended++;
2926 	sc->pm_stats.suspend_jiffies = jiffies;
2927 }
2928 
2929 /**
2930  * hif_log_runtime_suspend_failure() - log a failed runtime suspend
2931  *
2932  * log a failed runtime suspend
2933  * mark last busy to prevent immediate runtime suspend
2934  */
2935 static void hif_log_runtime_suspend_failure(void *hif_ctx)
2936 {
2937 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2938 
2939 	if (sc == NULL)
2940 		return;
2941 
2942 	sc->pm_stats.suspend_err++;
2943 }
2944 
2945 /**
2946  * hif_log_runtime_resume_success() - log a successful runtime resume
2947  *
2948  * log a successfull runtime resume
2949  * mark last busy to prevent immediate runtime suspend
2950  */
2951 static void hif_log_runtime_resume_success(void *hif_ctx)
2952 {
2953 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
2954 
2955 	if (sc == NULL)
2956 		return;
2957 
2958 	sc->pm_stats.resumed++;
2959 }
2960 
2961 /**
2962  * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
2963  *
2964  * Record the failure.
2965  * mark last busy to delay a retry.
2966  * adjust the runtime_pm state.
2967  */
2968 void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
2969 {
2970 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
2971 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2972 
2973 	hif_log_runtime_suspend_failure(hif_ctx);
2974 	if (hif_pci_sc != NULL)
2975 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
2976 	hif_runtime_pm_set_state_on(scn);
2977 }
2978 
2979 /**
2980  * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
2981  *
2982  * Makes sure that the pci link will be taken down by the suspend opperation.
2983  * If the hif layer is configured to leave the bus on, runtime suspend will
2984  * not save any power.
2985  *
2986  * Set the runtime suspend state to in progress.
2987  *
2988  * return -EINVAL if the bus won't go down.  otherwise return 0
2989  */
2990 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
2991 {
2992 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2993 
2994 	if (!hif_can_suspend_link(hif_ctx)) {
2995 		HIF_ERROR("Runtime PM not supported for link up suspend");
2996 		return -EINVAL;
2997 	}
2998 
2999 	hif_runtime_pm_set_state_inprogress(scn);
3000 	return 0;
3001 }
3002 
3003 /**
3004  * hif_process_runtime_suspend_success() - bookkeeping of suspend success
3005  *
3006  * Record the success.
3007  * adjust the runtime_pm state
3008  */
3009 void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
3010 {
3011 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3012 
3013 	hif_runtime_pm_set_state_suspended(scn);
3014 	hif_log_runtime_suspend_success(scn);
3015 }
3016 
3017 /**
3018  * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
3019  *
3020  * update the runtime pm state.
3021  */
3022 void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
3023 {
3024 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3025 
3026 	hif_runtime_pm_set_state_inprogress(scn);
3027 }
3028 
3029 /**
3030  * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
3031  *
3032  * record the success.
3033  * adjust the runtime_pm state
3034  */
3035 void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
3036 {
3037 	struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
3038 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3039 
3040 	hif_log_runtime_resume_success(hif_ctx);
3041 	if (hif_pci_sc != NULL)
3042 		hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
3043 	hif_runtime_pm_set_state_on(scn);
3044 }
3045 
3046 /**
3047  * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
3048  *
3049  * Return: 0 for success and non-zero error code for failure
3050  */
3051 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
3052 {
3053 	int errno;
3054 
3055 	errno = hif_bus_suspend(hif_ctx);
3056 	if (errno) {
3057 		HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
3058 		return errno;
3059 	}
3060 
3061 	errno = hif_apps_irqs_disable(hif_ctx);
3062 	if (errno) {
3063 		HIF_ERROR("%s: failed disable irqs: %d", __func__, errno);
3064 		goto bus_resume;
3065 	}
3066 
3067 	errno = hif_bus_suspend_noirq(hif_ctx);
3068 	if (errno) {
3069 		HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
3070 		goto irqs_enable;
3071 	}
3072 
3073 	/* link should always be down; skip enable wake irq */
3074 
3075 	return 0;
3076 
3077 irqs_enable:
3078 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3079 
3080 bus_resume:
3081 	QDF_BUG(!hif_bus_resume(hif_ctx));
3082 
3083 	return errno;
3084 }
3085 
3086 /**
3087  * hif_fastpath_resume() - resume fastpath for runtimepm
3088  *
3089  * ensure that the fastpath write index register is up to date
3090  * since runtime pm may cause ce_send_fast to skip the register
3091  * write.
3092  *
3093  * fastpath only applicable to legacy copy engine
3094  */
3095 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
3096 {
3097 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3098 	struct CE_state *ce_state;
3099 
3100 	if (!scn)
3101 		return;
3102 
3103 	if (scn->fastpath_mode_on) {
3104 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
3105 			return;
3106 
3107 		ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
3108 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
3109 
3110 		/*war_ce_src_ring_write_idx_set */
3111 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
3112 				ce_state->src_ring->write_index);
3113 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
3114 		Q_TARGET_ACCESS_END(scn);
3115 	}
3116 }
3117 
3118 /**
3119  * hif_runtime_resume() - do the bus resume part of a runtime resume
3120  *
3121  *  Return: 0 for success and non-zero error code for failure
3122  */
3123 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
3124 {
3125 	/* link should always be down; skip disable wake irq */
3126 
3127 	QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
3128 	QDF_BUG(!hif_apps_irqs_enable(hif_ctx));
3129 	QDF_BUG(!hif_bus_resume(hif_ctx));
3130 	return 0;
3131 }
3132 #endif /* #ifdef FEATURE_RUNTIME_PM */
3133 
3134 #if CONFIG_PCIE_64BIT_MSI
3135 static void hif_free_msi_ctx(struct hif_softc *scn)
3136 {
3137 	struct hif_pci_softc *sc = scn->hif_sc;
3138 	struct hif_msi_info *info = &sc->msi_info;
3139 	struct device *dev = scn->qdf_dev->dev;
3140 
3141 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
3142 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
3143 	info->magic = NULL;
3144 	info->magic_dma = 0;
3145 }
3146 #else
3147 static void hif_free_msi_ctx(struct hif_softc *scn)
3148 {
3149 }
3150 #endif
3151 
3152 void hif_pci_disable_isr(struct hif_softc *scn)
3153 {
3154 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3155 
3156 	hif_exec_kill(&scn->osc);
3157 	hif_nointrs(scn);
3158 	hif_free_msi_ctx(scn);
3159 	/* Cancel the pending tasklet */
3160 	ce_tasklet_kill(scn);
3161 	tasklet_kill(&sc->intr_tq);
3162 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
3163 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
3164 }
3165 
3166 /* Function to reset SoC */
3167 void hif_pci_reset_soc(struct hif_softc *hif_sc)
3168 {
3169 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
3170 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
3171 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
3172 
3173 #if defined(CPU_WARM_RESET_WAR)
3174 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
3175 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
3176 	 * verified for AR9888_REV1
3177 	 */
3178 	if (tgt_info->target_version == AR9888_REV2_VERSION)
3179 		hif_pci_device_warm_reset(sc);
3180 	else
3181 		hif_pci_device_reset(sc);
3182 #else
3183 	hif_pci_device_reset(sc);
3184 #endif
3185 }
3186 
3187 #ifdef CONFIG_PCI_MSM
3188 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
3189 {
3190 	msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
3191 	msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
3192 }
3193 #else
3194 static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
3195 #endif
3196 
3197 /**
3198  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
3199  * @sc: HIF PCIe Context
3200  *
3201  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
3202  *
3203  * Return: Failure to caller
3204  */
3205 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
3206 {
3207 	uint16_t val = 0;
3208 	uint32_t bar = 0;
3209 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
3210 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3211 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
3212 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
3213 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
3214 	A_target_id_t pci_addr = scn->mem;
3215 
3216 	HIF_ERROR("%s: keep_awake_count = %d",
3217 			__func__, hif_state->keep_awake_count);
3218 
3219 	pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
3220 
3221 	HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
3222 
3223 	pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
3224 
3225 	HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
3226 
3227 	pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
3228 
3229 	HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
3230 
3231 	pci_read_config_word(sc->pdev, PCI_STATUS, &val);
3232 
3233 	HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
3234 
3235 	pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
3236 
3237 	HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
3238 
3239 	HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
3240 			hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3241 						PCIE_SOC_WAKE_ADDRESS));
3242 
3243 	HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
3244 			hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
3245 							RTC_STATE_ADDRESS));
3246 
3247 	HIF_ERROR("%s:error, wakeup target", __func__);
3248 	hif_msm_pcie_debug_info(sc);
3249 
3250 	if (!cfg->enable_self_recovery)
3251 		QDF_BUG(0);
3252 
3253 	scn->recovery = true;
3254 
3255 	if (cbk->set_recovery_in_progress)
3256 		cbk->set_recovery_in_progress(cbk->context, true);
3257 
3258 	pld_is_pci_link_down(sc->dev);
3259 	return -EACCES;
3260 }
3261 
3262 /*
3263  * For now, we use simple on-demand sleep/wake.
3264  * Some possible improvements:
3265  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
3266  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
3267  *   Careful, though, these functions may be used by
3268  *  interrupt handlers ("atomic")
3269  *  -Don't use host_reg_table for this code; instead use values directly
3270  *  -Use a separate timer to track activity and allow Target to sleep only
3271  *   if it hasn't done anything for a while; may even want to delay some
3272  *   processing for a short while in order to "batch" (e.g.) transmit
3273  *   requests with completion processing into "windows of up time".  Costs
3274  *   some performance, but improves power utilization.
3275  *  -On some platforms, it might be possible to eliminate explicit
3276  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
3277  *   recover from the failure by forcing the Target awake.
3278  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
3279  *   overhead in some cases. Perhaps this makes more sense when
3280  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
3281  *   disabled.
3282  *  -It is possible to compile this code out and simply force the Target
3283  *   to remain awake.  That would yield optimal performance at the cost of
3284  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
3285  *
3286  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
3287  */
3288 /**
3289  * hif_target_sleep_state_adjust() - on-demand sleep/wake
3290  * @scn: hif_softc pointer.
3291  * @sleep_ok: bool
3292  * @wait_for_it: bool
3293  *
3294  * Output the pipe error counts of each pipe to log file
3295  *
3296  * Return: int
3297  */
3298 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
3299 			      bool sleep_ok, bool wait_for_it)
3300 {
3301 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3302 	A_target_id_t pci_addr = scn->mem;
3303 	static int max_delay;
3304 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3305 	static int debug;
3306 	if (scn->recovery)
3307 		return -EACCES;
3308 
3309 	if (qdf_atomic_read(&scn->link_suspended)) {
3310 		HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
3311 		debug = true;
3312 		QDF_ASSERT(0);
3313 		return -EACCES;
3314 	}
3315 
3316 	if (debug) {
3317 		wait_for_it = true;
3318 		HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
3319 				__func__);
3320 		QDF_ASSERT(0);
3321 	}
3322 
3323 	if (sleep_ok) {
3324 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3325 		hif_state->keep_awake_count--;
3326 		if (hif_state->keep_awake_count == 0) {
3327 			/* Allow sleep */
3328 			hif_state->verified_awake = false;
3329 			hif_state->sleep_ticks = qdf_system_ticks();
3330 		}
3331 		if (hif_state->fake_sleep == false) {
3332 			/* Set the Fake Sleep */
3333 			hif_state->fake_sleep = true;
3334 
3335 			/* Start the Sleep Timer */
3336 			qdf_timer_stop(&hif_state->sleep_timer);
3337 			qdf_timer_start(&hif_state->sleep_timer,
3338 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
3339 		}
3340 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3341 	} else {
3342 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
3343 
3344 		if (hif_state->fake_sleep) {
3345 			hif_state->verified_awake = true;
3346 		} else {
3347 			if (hif_state->keep_awake_count == 0) {
3348 				/* Force AWAKE */
3349 				hif_write32_mb(pci_addr +
3350 					      PCIE_LOCAL_BASE_ADDRESS +
3351 					      PCIE_SOC_WAKE_ADDRESS,
3352 					      PCIE_SOC_WAKE_V_MASK);
3353 			}
3354 		}
3355 		hif_state->keep_awake_count++;
3356 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
3357 
3358 		if (wait_for_it && !hif_state->verified_awake) {
3359 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
3360 			int tot_delay = 0;
3361 			int curr_delay = 5;
3362 
3363 			for (;; ) {
3364 				if (hif_targ_is_awake(scn, pci_addr)) {
3365 					hif_state->verified_awake = true;
3366 					break;
3367 				}
3368 				if (!hif_pci_targ_is_present(scn, pci_addr))
3369 					break;
3370 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
3371 					return hif_log_soc_wakeup_timeout(sc);
3372 
3373 				OS_DELAY(curr_delay);
3374 				tot_delay += curr_delay;
3375 
3376 				if (curr_delay < 50)
3377 					curr_delay += 5;
3378 			}
3379 
3380 			/*
3381 			 * NB: If Target has to come out of Deep Sleep,
3382 			 * this may take a few Msecs. Typically, though
3383 			 * this delay should be <30us.
3384 			 */
3385 			if (tot_delay > max_delay)
3386 				max_delay = tot_delay;
3387 		}
3388 	}
3389 
3390 	if (debug && hif_state->verified_awake) {
3391 		debug = 0;
3392 		HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
3393 			__func__,
3394 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3395 				PCIE_INTR_ENABLE_ADDRESS),
3396 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3397 				PCIE_INTR_CAUSE_ADDRESS),
3398 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3399 				CPU_INTR_ADDRESS),
3400 			hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
3401 				PCIE_INTR_CLR_ADDRESS),
3402 			hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS +
3403 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
3404 	}
3405 
3406 	return 0;
3407 }
3408 
3409 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
3410 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
3411 {
3412 	uint32_t value;
3413 	void *addr;
3414 
3415 	addr = scn->mem + offset;
3416 	value = hif_read32_mb(addr);
3417 
3418 	{
3419 		unsigned long irq_flags;
3420 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3421 
3422 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3423 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3424 		pcie_access_log[idx].is_write = false;
3425 		pcie_access_log[idx].addr = addr;
3426 		pcie_access_log[idx].value = value;
3427 		pcie_access_log_seqnum++;
3428 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3429 	}
3430 
3431 	return value;
3432 }
3433 
3434 void
3435 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
3436 {
3437 	void *addr;
3438 
3439 	addr = scn->mem + (offset);
3440 	hif_write32_mb(addr, value);
3441 
3442 	{
3443 		unsigned long irq_flags;
3444 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3445 
3446 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3447 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
3448 		pcie_access_log[idx].is_write = true;
3449 		pcie_access_log[idx].addr = addr;
3450 		pcie_access_log[idx].value = value;
3451 		pcie_access_log_seqnum++;
3452 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3453 	}
3454 }
3455 
3456 /**
3457  * hif_target_dump_access_log() - dump access log
3458  *
3459  * dump access log
3460  *
3461  * Return: n/a
3462  */
3463 void hif_target_dump_access_log(void)
3464 {
3465 	int idx, len, start_idx, cur_idx;
3466 	unsigned long irq_flags;
3467 
3468 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
3469 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
3470 		len = PCIE_ACCESS_LOG_NUM;
3471 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
3472 	} else {
3473 		len = pcie_access_log_seqnum;
3474 		start_idx = 0;
3475 	}
3476 
3477 	for (idx = 0; idx < len; idx++) {
3478 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
3479 		HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
3480 		       __func__, idx,
3481 		       pcie_access_log[cur_idx].seqnum,
3482 		       pcie_access_log[cur_idx].is_write,
3483 		       pcie_access_log[cur_idx].addr,
3484 		       pcie_access_log[cur_idx].value);
3485 	}
3486 
3487 	pcie_access_log_seqnum = 0;
3488 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
3489 }
3490 #endif
3491 
3492 #ifndef HIF_AHB
3493 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
3494 {
3495 	QDF_BUG(0);
3496 	return -EINVAL;
3497 }
3498 
3499 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
3500 {
3501 	QDF_BUG(0);
3502 	return -EINVAL;
3503 }
3504 #endif
3505 
3506 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
3507 {
3508 	struct ce_tasklet_entry *tasklet_entry = context;
3509 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
3510 }
3511 extern const char *ce_name[];
3512 
3513 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3514 {
3515 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3516 
3517 	return pci_scn->ce_msi_irq_num[ce_id];
3518 }
3519 
3520 /* hif_srng_msi_irq_disable() - disable the irq for msi
3521  * @hif_sc: hif context
3522  * @ce_id: which ce to disable copy complete interrupts for
3523  *
3524  * since MSI interrupts are not level based, the system can function
3525  * without disabling these interrupts.  Interrupt mitigation can be
3526  * added here for better system performance.
3527  */
3528 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3529 {
3530 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3531 }
3532 
3533 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3534 {
3535 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
3536 }
3537 
3538 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
3539 {}
3540 
3541 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
3542 {}
3543 
3544 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3545 {
3546 	int ret;
3547 	int ce_id, irq;
3548 	uint32_t msi_data_start;
3549 	uint32_t msi_data_count;
3550 	uint32_t msi_irq_start;
3551 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3552 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3553 
3554 	/* do wake irq assignment */
3555 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3556 					  &msi_data_count, &msi_data_start,
3557 					  &msi_irq_start);
3558 	if (ret)
3559 		return ret;
3560 
3561 	scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
3562 	ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler, 0,
3563 			  "wlan_wake_irq", scn);
3564 	if (ret)
3565 		return ret;
3566 
3567 	/* do ce irq assignments */
3568 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3569 					    &msi_data_count, &msi_data_start,
3570 					    &msi_irq_start);
3571 	if (ret)
3572 		goto free_wake_irq;
3573 
3574 	if (ce_srng_based(scn)) {
3575 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3576 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3577 	} else {
3578 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3579 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3580 	}
3581 
3582 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3583 
3584 	/* needs to match the ce_id -> irq data mapping
3585 	 * used in the srng parameter configuration
3586 	 */
3587 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3588 		unsigned int msi_data = (ce_id % msi_data_count) +
3589 			msi_irq_start;
3590 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3591 		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
3592 			 __func__, ce_id, msi_data, irq,
3593 			 &ce_sc->tasklets[ce_id]);
3594 
3595 		/* implies the ce is also initialized */
3596 		if (!ce_sc->tasklets[ce_id].inited)
3597 			continue;
3598 
3599 		pci_sc->ce_msi_irq_num[ce_id] = irq;
3600 		ret = request_irq(irq, hif_ce_interrupt_handler,
3601 				  IRQF_SHARED,
3602 				  ce_name[ce_id],
3603 				  &ce_sc->tasklets[ce_id]);
3604 		if (ret)
3605 			goto free_irq;
3606 	}
3607 
3608 	return ret;
3609 
3610 free_irq:
3611 	/* the request_irq for the last ce_id failed so skip it. */
3612 	while (ce_id > 0 && ce_id < scn->ce_count) {
3613 		unsigned int msi_data;
3614 
3615 		ce_id--;
3616 		msi_data = (ce_id % msi_data_count) + msi_data_start;
3617 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3618 		free_irq(irq, &ce_sc->tasklets[ce_id]);
3619 	}
3620 
3621 free_wake_irq:
3622 	free_irq(scn->wake_irq, scn->qdf_dev->dev);
3623 	scn->wake_irq = 0;
3624 
3625 	return ret;
3626 }
3627 
3628 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3629 {
3630 	int i;
3631 
3632 	for (i = 0; i < hif_ext_group->numirq; i++)
3633 		disable_irq_nosync(hif_ext_group->os_irq[i]);
3634 }
3635 
3636 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3637 {
3638 	int i;
3639 
3640 	for (i = 0; i < hif_ext_group->numirq; i++)
3641 		enable_irq(hif_ext_group->os_irq[i]);
3642 }
3643 
3644 
3645 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3646 			      struct hif_exec_context *hif_ext_group)
3647 {
3648 	int ret = 0;
3649 	int irq = 0;
3650 	int j;
3651 
3652 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3653 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3654 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3655 
3656 	for (j = 0; j < hif_ext_group->numirq; j++) {
3657 		irq = hif_ext_group->irq[j];
3658 
3659 		HIF_DBG("%s: request_irq = %d for grp %d",
3660 			  __func__, irq, hif_ext_group->grp_id);
3661 		ret = request_irq(irq,
3662 				  hif_ext_group_interrupt_handler,
3663 				  IRQF_SHARED, "wlan_EXT_GRP",
3664 				  hif_ext_group);
3665 		if (ret) {
3666 			HIF_ERROR("%s: request_irq failed ret = %d",
3667 				  __func__, ret);
3668 			return -EFAULT;
3669 		}
3670 		hif_ext_group->os_irq[j] = irq;
3671 	}
3672 	hif_ext_group->irq_requested = true;
3673 	return 0;
3674 }
3675 
3676 /**
3677  * hif_configure_irq() - configure interrupt
3678  *
3679  * This function configures interrupt(s)
3680  *
3681  * @sc: PCIe control struct
3682  * @hif_hdl: struct HIF_CE_state
3683  *
3684  * Return: 0 - for success
3685  */
3686 int hif_configure_irq(struct hif_softc *scn)
3687 {
3688 	int ret = 0;
3689 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3690 
3691 	HIF_TRACE("%s: E", __func__);
3692 	if (scn->polled_mode_on) {
3693 		scn->request_irq_done = false;
3694 		return 0;
3695 	}
3696 
3697 	hif_init_reschedule_tasklet_work(sc);
3698 
3699 	ret = hif_ce_msi_configure_irq(scn);
3700 	if (ret == 0) {
3701 		goto end;
3702 	}
3703 
3704 	if (ENABLE_MSI) {
3705 		ret = hif_configure_msi(sc);
3706 		if (ret == 0)
3707 			goto end;
3708 	}
3709 	/* MSI failed. Try legacy irq */
3710 	switch (scn->target_info.target_type) {
3711 	case TARGET_TYPE_IPQ4019:
3712 		ret = hif_ahb_configure_legacy_irq(sc);
3713 		break;
3714 	case TARGET_TYPE_QCA8074:
3715 		ret = hif_ahb_configure_irq(sc);
3716 		break;
3717 	default:
3718 		ret = hif_pci_configure_legacy_irq(sc);
3719 		break;
3720 	}
3721 	if (ret < 0) {
3722 		HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
3723 			__func__, ret);
3724 		return ret;
3725 	}
3726 end:
3727 	scn->request_irq_done = true;
3728 	return 0;
3729 }
3730 
3731 /**
3732  * hif_target_sync() : ensure the target is ready
3733  * @scn: hif controll structure
3734  *
3735  * Informs fw that we plan to use legacy interupts so that
3736  * it can begin booting. Ensures that the fw finishes booting
3737  * before continuing. Should be called before trying to write
3738  * to the targets other registers for the first time.
3739  *
3740  * Return: none
3741  */
3742 static void hif_target_sync(struct hif_softc *scn)
3743 {
3744 	hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3745 				PCIE_INTR_ENABLE_ADDRESS),
3746 				PCIE_INTR_FIRMWARE_MASK);
3747 
3748 	hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3749 			PCIE_SOC_WAKE_ADDRESS,
3750 			PCIE_SOC_WAKE_V_MASK);
3751 	while (!hif_targ_is_awake(scn, scn->mem))
3752 		;
3753 
3754 	if (HAS_FW_INDICATOR) {
3755 		int wait_limit = 500;
3756 		int fw_ind = 0;
3757 
3758 		HIF_TRACE("%s: Loop checking FW signal", __func__);
3759 		while (1) {
3760 			fw_ind = hif_read32_mb(scn->mem +
3761 					FW_INDICATOR_ADDRESS);
3762 			if (fw_ind & FW_IND_INITIALIZED)
3763 				break;
3764 			if (wait_limit-- < 0)
3765 				break;
3766 			hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
3767 				PCIE_INTR_ENABLE_ADDRESS),
3768 				PCIE_INTR_FIRMWARE_MASK);
3769 
3770 			qdf_mdelay(10);
3771 		}
3772 		if (wait_limit < 0)
3773 			HIF_TRACE("%s: FW signal timed out",
3774 					__func__);
3775 		else
3776 			HIF_TRACE("%s: Got FW signal, retries = %x",
3777 					__func__, 500-wait_limit);
3778 	}
3779 	hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3780 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3781 }
3782 
3783 #ifdef CONFIG_PLD_PCIE_INIT
3784 static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3785 {
3786 	struct pld_soc_info info;
3787 
3788 	pld_get_soc_info(dev, &info);
3789 	sc->mem = info.v_addr;
3790 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3791 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3792 }
3793 #else
3794 static void hif_pci_get_soc_info(struct hif_pci_softc *sc, struct device *dev)
3795 {}
3796 #endif
3797 
3798 /**
3799  * hif_enable_bus(): enable bus
3800  *
3801  * This function enables the bus
3802  *
3803  * @ol_sc: soft_sc struct
3804  * @dev: device pointer
3805  * @bdev: bus dev pointer
3806  * bid: bus id pointer
3807  * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3808  * Return: QDF_STATUS
3809  */
3810 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3811 			  struct device *dev, void *bdev,
3812 			  const struct hif_bus_id *bid,
3813 			  enum hif_enable_type type)
3814 {
3815 	int ret = 0;
3816 	uint32_t hif_type, target_type;
3817 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3818 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3819 	uint16_t revision_id;
3820 	int probe_again = 0;
3821 	struct pci_dev *pdev = bdev;
3822 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3823 	struct hif_target_info *tgt_info;
3824 
3825 	if (!ol_sc) {
3826 		HIF_ERROR("%s: hif_ctx is NULL", __func__);
3827 		return QDF_STATUS_E_NOMEM;
3828 	}
3829 
3830 	HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
3831 		  __func__, hif_get_conparam(ol_sc), id->device);
3832 
3833 	sc->pdev = pdev;
3834 	sc->dev = &pdev->dev;
3835 	sc->devid = id->device;
3836 	sc->cacheline_sz = dma_get_cache_alignment();
3837 	tgt_info = hif_get_target_info_handle(hif_hdl);
3838 	hif_pci_get_soc_info(sc, dev);
3839 again:
3840 	ret = hif_enable_pci(sc, pdev, id);
3841 	if (ret < 0) {
3842 		HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
3843 		       __func__, ret);
3844 		goto err_enable_pci;
3845 	}
3846 	HIF_TRACE("%s: hif_enable_pci done", __func__);
3847 
3848 	/* Temporary FIX: disable ASPM on peregrine.
3849 	 * Will be removed after the OTP is programmed
3850 	 */
3851 	hif_disable_power_gating(hif_hdl);
3852 
3853 	device_disable_async_suspend(&pdev->dev);
3854 	pci_read_config_word(pdev, 0x08, &revision_id);
3855 
3856 	ret = hif_get_device_type(id->device, revision_id,
3857 						&hif_type, &target_type);
3858 	if (ret < 0) {
3859 		HIF_ERROR("%s: invalid device id/revision_id", __func__);
3860 		goto err_tgtstate;
3861 	}
3862 	HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
3863 		  __func__, hif_type, target_type);
3864 
3865 	hif_register_tbl_attach(ol_sc, hif_type);
3866 	hif_target_register_tbl_attach(ol_sc, target_type);
3867 
3868 	tgt_info->target_type = target_type;
3869 
3870 	if (ce_srng_based(ol_sc)) {
3871 		HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
3872 	} else {
3873 		ret = hif_pci_probe_tgt_wakeup(sc);
3874 		if (ret < 0) {
3875 			HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
3876 					__func__, ret);
3877 			if (ret == -EAGAIN)
3878 				probe_again++;
3879 			goto err_tgtstate;
3880 		}
3881 		HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
3882 	}
3883 
3884 	if (!ol_sc->mem_pa) {
3885 		HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
3886 		ret = -EIO;
3887 		goto err_tgtstate;
3888 	}
3889 
3890 	if (!ce_srng_based(ol_sc)) {
3891 		hif_target_sync(ol_sc);
3892 
3893 		if (ADRASTEA_BU)
3894 			hif_vote_link_up(hif_hdl);
3895 	}
3896 
3897 	return 0;
3898 
3899 err_tgtstate:
3900 	hif_disable_pci(sc);
3901 	sc->pci_enabled = false;
3902 	HIF_ERROR("%s: error, hif_disable_pci done", __func__);
3903 	return QDF_STATUS_E_ABORTED;
3904 
3905 err_enable_pci:
3906 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3907 		int delay_time;
3908 
3909 		HIF_INFO("%s: pci reprobe", __func__);
3910 		/* 10, 40, 90, 100, 100, ... */
3911 		delay_time = max(100, 10 * (probe_again * probe_again));
3912 		qdf_mdelay(delay_time);
3913 		goto again;
3914 	}
3915 	return ret;
3916 }
3917 
3918 /**
3919  * hif_pci_irq_enable() - ce_irq_enable
3920  * @scn: hif_softc
3921  * @ce_id: ce_id
3922  *
3923  * Return: void
3924  */
3925 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3926 {
3927 	uint32_t tmp = 1 << ce_id;
3928 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3929 
3930 	qdf_spin_lock_irqsave(&sc->irq_lock);
3931 	scn->ce_irq_summary &= ~tmp;
3932 	if (scn->ce_irq_summary == 0) {
3933 		/* Enable Legacy PCI line interrupts */
3934 		if (LEGACY_INTERRUPTS(sc) &&
3935 			(scn->target_status != TARGET_STATUS_RESET) &&
3936 			(!qdf_atomic_read(&scn->link_suspended))) {
3937 
3938 			hif_write32_mb(scn->mem +
3939 				(SOC_CORE_BASE_ADDRESS |
3940 				PCIE_INTR_ENABLE_ADDRESS),
3941 				HOST_GROUP0_MASK);
3942 
3943 			hif_read32_mb(scn->mem +
3944 					(SOC_CORE_BASE_ADDRESS |
3945 					PCIE_INTR_ENABLE_ADDRESS));
3946 		}
3947 	}
3948 	if (scn->hif_init_done == true)
3949 		Q_TARGET_ACCESS_END(scn);
3950 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3951 
3952 	/* check for missed firmware crash */
3953 	hif_fw_interrupt_handler(0, scn);
3954 }
3955 
3956 /**
3957  * hif_pci_irq_disable() - ce_irq_disable
3958  * @scn: hif_softc
3959  * @ce_id: ce_id
3960  *
3961  * only applicable to legacy copy engine...
3962  *
3963  * Return: void
3964  */
3965 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3966 {
3967 	/* For Rome only need to wake up target */
3968 	/* target access is maintained untill interrupts are re-enabled */
3969 	Q_TARGET_ACCESS_BEGIN(scn);
3970 }
3971 
3972 #ifdef FEATURE_RUNTIME_PM
3973 
3974 void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
3975 {
3976 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
3977 
3978 	if (NULL == sc)
3979 		return;
3980 
3981 	sc->pm_stats.runtime_get++;
3982 	pm_runtime_get_noresume(sc->dev);
3983 }
3984 
3985 /**
3986  * hif_pm_runtime_get() - do a get opperation on the device
3987  *
3988  * A get opperation will prevent a runtime suspend untill a
3989  * corresponding put is done.  This api should be used when sending
3990  * data.
3991  *
3992  * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
3993  * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
3994  *
3995  * return: success if the bus is up and a get has been issued
3996  *   otherwise an error code.
3997  */
3998 int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
3999 {
4000 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4001 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4002 	int ret;
4003 	int pm_state;
4004 
4005 	if (NULL == scn) {
4006 		HIF_ERROR("%s: Could not do runtime get, scn is null",
4007 				__func__);
4008 		return -EFAULT;
4009 	}
4010 
4011 	pm_state = qdf_atomic_read(&sc->pm_state);
4012 
4013 	if (pm_state  == HIF_PM_RUNTIME_STATE_ON ||
4014 			pm_state == HIF_PM_RUNTIME_STATE_NONE) {
4015 		sc->pm_stats.runtime_get++;
4016 		ret = __hif_pm_runtime_get(sc->dev);
4017 
4018 		/* Get can return 1 if the device is already active, just return
4019 		 * success in that case
4020 		 */
4021 		if (ret > 0)
4022 			ret = 0;
4023 
4024 		if (ret)
4025 			hif_pm_runtime_put(hif_ctx);
4026 
4027 		if (ret && ret != -EINPROGRESS) {
4028 			sc->pm_stats.runtime_get_err++;
4029 			HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
4030 				__func__, qdf_atomic_read(&sc->pm_state), ret);
4031 		}
4032 
4033 		return ret;
4034 	}
4035 
4036 	sc->pm_stats.request_resume++;
4037 	sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
4038 	ret = hif_pm_request_resume(sc->dev);
4039 
4040 	return -EAGAIN;
4041 }
4042 
4043 /**
4044  * hif_pm_runtime_put() - do a put opperation on the device
4045  *
4046  * A put opperation will allow a runtime suspend after a corresponding
4047  * get was done.  This api should be used when sending data.
4048  *
4049  * This api will return a failure if runtime pm is stopped
4050  * This api will return failure if it would decrement the usage count below 0.
4051  *
4052  * return: QDF_STATUS_SUCCESS if the put is performed
4053  */
4054 int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
4055 {
4056 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
4057 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4058 	int pm_state, usage_count;
4059 	char *error = NULL;
4060 
4061 	if (NULL == scn) {
4062 		HIF_ERROR("%s: Could not do runtime put, scn is null",
4063 				__func__);
4064 		return -EFAULT;
4065 	}
4066 	usage_count = atomic_read(&sc->dev->power.usage_count);
4067 
4068 	if (usage_count == 1) {
4069 		pm_state = qdf_atomic_read(&sc->pm_state);
4070 
4071 		if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
4072 			error = "Ignoring unexpected put when runtime pm is disabled";
4073 
4074 	} else if (usage_count == 0) {
4075 		error = "PUT Without a Get Operation";
4076 	}
4077 
4078 	if (error) {
4079 		hif_pci_runtime_pm_warn(sc, error);
4080 		return -EINVAL;
4081 	}
4082 
4083 	sc->pm_stats.runtime_put++;
4084 
4085 	hif_pm_runtime_mark_last_busy(sc->dev);
4086 	hif_pm_runtime_put_auto(sc->dev);
4087 
4088 	return 0;
4089 }
4090 
4091 
4092 /**
4093  * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
4094  *                                      reason
4095  * @hif_sc: pci context
4096  * @lock: runtime_pm lock being acquired
4097  *
4098  * Return 0 if successful.
4099  */
4100 static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
4101 		*hif_sc, struct hif_pm_runtime_lock *lock)
4102 {
4103 	int ret = 0;
4104 
4105 	/*
4106 	 * We shouldn't be setting context->timeout to zero here when
4107 	 * context is active as we will have a case where Timeout API's
4108 	 * for the same context called back to back.
4109 	 * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
4110 	 * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
4111 	 * API to ensure the timeout version is no more active and
4112 	 * list entry of this context will be deleted during allow suspend.
4113 	 */
4114 	if (lock->active)
4115 		return 0;
4116 
4117 	ret = __hif_pm_runtime_get(hif_sc->dev);
4118 
4119 	/**
4120 	 * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
4121 	 * RPM_SUSPENDING. Any other negative value is an error.
4122 	 * We shouldn't be do runtime_put here as in later point allow
4123 	 * suspend gets called with the the context and there the usage count
4124 	 * is decremented, so suspend will be prevented.
4125 	 */
4126 
4127 	if (ret < 0 && ret != -EINPROGRESS) {
4128 		hif_sc->pm_stats.runtime_get_err++;
4129 		hif_pci_runtime_pm_warn(hif_sc,
4130 				"Prevent Suspend Runtime PM Error");
4131 	}
4132 
4133 	hif_sc->prevent_suspend_cnt++;
4134 
4135 	lock->active = true;
4136 
4137 	list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
4138 
4139 	hif_sc->pm_stats.prevent_suspend++;
4140 
4141 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4142 		hif_pm_runtime_state_to_string(
4143 			qdf_atomic_read(&hif_sc->pm_state)),
4144 					ret);
4145 
4146 	return ret;
4147 }
4148 
4149 static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
4150 		struct hif_pm_runtime_lock *lock)
4151 {
4152 	int ret = 0;
4153 	int usage_count;
4154 
4155 	if (hif_sc->prevent_suspend_cnt == 0)
4156 		return ret;
4157 
4158 	if (!lock->active)
4159 		return ret;
4160 
4161 	usage_count = atomic_read(&hif_sc->dev->power.usage_count);
4162 
4163 	/*
4164 	 * During Driver unload, platform driver increments the usage
4165 	 * count to prevent any runtime suspend getting called.
4166 	 * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
4167 	 * usage_count should be one. Ideally this shouldn't happen as
4168 	 * context->active should be active for allow suspend to happen
4169 	 * Handling this case here to prevent any failures.
4170 	 */
4171 	if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
4172 				&& usage_count == 1) || usage_count == 0) {
4173 		hif_pci_runtime_pm_warn(hif_sc,
4174 				"Allow without a prevent suspend");
4175 		return -EINVAL;
4176 	}
4177 
4178 	list_del(&lock->list);
4179 
4180 	hif_sc->prevent_suspend_cnt--;
4181 
4182 	lock->active = false;
4183 	lock->timeout = 0;
4184 
4185 	hif_pm_runtime_mark_last_busy(hif_sc->dev);
4186 	ret = hif_pm_runtime_put_auto(hif_sc->dev);
4187 
4188 	HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
4189 		hif_pm_runtime_state_to_string(
4190 			qdf_atomic_read(&hif_sc->pm_state)),
4191 					ret);
4192 
4193 	hif_sc->pm_stats.allow_suspend++;
4194 	return ret;
4195 }
4196 
4197 /**
4198  * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
4199  * @data: calback data that is the pci context
4200  *
4201  * if runtime locks are aquired with a timeout, this function releases
4202  * the locks when the last runtime lock expires.
4203  *
4204  * dummy implementation until lock acquisition is implemented.
4205  */
4206 static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
4207 {
4208 	struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
4209 	unsigned long timer_expires;
4210 	struct hif_pm_runtime_lock *context, *temp;
4211 
4212 	spin_lock_bh(&hif_sc->runtime_lock);
4213 
4214 	timer_expires = hif_sc->runtime_timer_expires;
4215 
4216 	/* Make sure we are not called too early, this should take care of
4217 	 * following case
4218 	 *
4219 	 * CPU0                         CPU1 (timeout function)
4220 	 * ----                         ----------------------
4221 	 * spin_lock_irq
4222 	 *                              timeout function called
4223 	 *
4224 	 * mod_timer()
4225 	 *
4226 	 * spin_unlock_irq
4227 	 *                              spin_lock_irq
4228 	 */
4229 	if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
4230 		hif_sc->runtime_timer_expires = 0;
4231 		list_for_each_entry_safe(context, temp,
4232 				&hif_sc->prevent_suspend_list, list) {
4233 			if (context->timeout) {
4234 				__hif_pm_runtime_allow_suspend(hif_sc, context);
4235 				hif_sc->pm_stats.allow_suspend_timeout++;
4236 			}
4237 		}
4238 	}
4239 
4240 	spin_unlock_bh(&hif_sc->runtime_lock);
4241 }
4242 
4243 int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
4244 		struct hif_pm_runtime_lock *data)
4245 {
4246 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4247 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4248 	struct hif_pm_runtime_lock *context = data;
4249 
4250 	if (!sc->hif_config.enable_runtime_pm)
4251 		return 0;
4252 
4253 	if (!context)
4254 		return -EINVAL;
4255 
4256 	if (in_irq())
4257 		WARN_ON(1);
4258 
4259 	spin_lock_bh(&hif_sc->runtime_lock);
4260 	context->timeout = 0;
4261 	__hif_pm_runtime_prevent_suspend(hif_sc, context);
4262 	spin_unlock_bh(&hif_sc->runtime_lock);
4263 
4264 	return 0;
4265 }
4266 
4267 int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
4268 				struct hif_pm_runtime_lock *data)
4269 {
4270 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4271 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
4272 	struct hif_pm_runtime_lock *context = data;
4273 
4274 	if (!sc->hif_config.enable_runtime_pm)
4275 		return 0;
4276 
4277 	if (!context)
4278 		return -EINVAL;
4279 
4280 	if (in_irq())
4281 		WARN_ON(1);
4282 
4283 	spin_lock_bh(&hif_sc->runtime_lock);
4284 
4285 	__hif_pm_runtime_allow_suspend(hif_sc, context);
4286 
4287 	/* The list can be empty as well in cases where
4288 	 * we have one context in the list and the allow
4289 	 * suspend came before the timer expires and we delete
4290 	 * context above from the list.
4291 	 * When list is empty prevent_suspend count will be zero.
4292 	 */
4293 	if (hif_sc->prevent_suspend_cnt == 0 &&
4294 			hif_sc->runtime_timer_expires > 0) {
4295 		del_timer(&hif_sc->runtime_timer);
4296 		hif_sc->runtime_timer_expires = 0;
4297 	}
4298 
4299 	spin_unlock_bh(&hif_sc->runtime_lock);
4300 
4301 	return 0;
4302 }
4303 
4304 /**
4305  * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
4306  * @ol_sc: HIF context
4307  * @lock: which lock is being acquired
4308  * @delay: Timeout in milliseconds
4309  *
4310  * Prevent runtime suspend with a timeout after which runtime suspend would be
4311  * allowed. This API uses a single timer to allow the suspend and timer is
4312  * modified if the timeout is changed before timer fires.
4313  * If the timeout is less than autosuspend_delay then use mark_last_busy instead
4314  * of starting the timer.
4315  *
4316  * It is wise to try not to use this API and correct the design if possible.
4317  *
4318  * Return: 0 on success and negative error code on failure
4319  */
4320 int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
4321 		struct hif_pm_runtime_lock *lock, unsigned int delay)
4322 {
4323 	struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
4324 	struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
4325 
4326 	int ret = 0;
4327 	unsigned long expires;
4328 	struct hif_pm_runtime_lock *context = lock;
4329 
4330 	if (hif_is_load_or_unload_in_progress(sc)) {
4331 		HIF_ERROR("%s: Load/unload in progress, ignore!",
4332 				__func__);
4333 		return -EINVAL;
4334 	}
4335 
4336 	if (hif_is_recovery_in_progress(sc)) {
4337 		HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
4338 		return -EINVAL;
4339 	}
4340 
4341 	if (!sc->hif_config.enable_runtime_pm)
4342 		return 0;
4343 
4344 	if (!context)
4345 		return -EINVAL;
4346 
4347 	if (in_irq())
4348 		WARN_ON(1);
4349 
4350 	/*
4351 	 * Don't use internal timer if the timeout is less than auto suspend
4352 	 * delay.
4353 	 */
4354 	if (delay <= hif_sc->dev->power.autosuspend_delay) {
4355 		hif_pm_request_resume(hif_sc->dev);
4356 		hif_pm_runtime_mark_last_busy(hif_sc->dev);
4357 		return ret;
4358 	}
4359 
4360 	expires = jiffies + msecs_to_jiffies(delay);
4361 	expires += !expires;
4362 
4363 	spin_lock_bh(&hif_sc->runtime_lock);
4364 
4365 	context->timeout = delay;
4366 	ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
4367 	hif_sc->pm_stats.prevent_suspend_timeout++;
4368 
4369 	/* Modify the timer only if new timeout is after already configured
4370 	 * timeout
4371 	 */
4372 	if (time_after(expires, hif_sc->runtime_timer_expires)) {
4373 		mod_timer(&hif_sc->runtime_timer, expires);
4374 		hif_sc->runtime_timer_expires = expires;
4375 	}
4376 
4377 	spin_unlock_bh(&hif_sc->runtime_lock);
4378 
4379 	HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
4380 		hif_pm_runtime_state_to_string(
4381 			qdf_atomic_read(&hif_sc->pm_state)),
4382 					delay, ret);
4383 
4384 	return ret;
4385 }
4386 
4387 /**
4388  * hif_runtime_lock_init() - API to initialize Runtime PM context
4389  * @name: Context name
4390  *
4391  * This API initalizes the Runtime PM context of the caller and
4392  * return the pointer.
4393  *
4394  * Return: None
4395  */
4396 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
4397 {
4398 	struct hif_pm_runtime_lock *context;
4399 
4400 	HIF_INFO("Initializing Runtime PM wakelock %s", name);
4401 
4402 	context = qdf_mem_malloc(sizeof(*context));
4403 	if (!context) {
4404 		HIF_ERROR("%s: No memory for Runtime PM wakelock context",
4405 			  __func__);
4406 		return -ENOMEM;
4407 	}
4408 
4409 	context->name = name ? name : "Default";
4410 	lock->lock = context;
4411 
4412 	return 0;
4413 }
4414 
4415 /**
4416  * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
4417  * @data: Runtime PM context
4418  *
4419  * Return: void
4420  */
4421 void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
4422 			     struct hif_pm_runtime_lock *data)
4423 {
4424 	struct hif_pm_runtime_lock *context = data;
4425 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
4426 
4427 	if (!context) {
4428 		HIF_ERROR("Runtime PM wakelock context is NULL");
4429 		return;
4430 	}
4431 
4432 	HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
4433 
4434 	/*
4435 	 * Ensure to delete the context list entry and reduce the usage count
4436 	 * before freeing the context if context is active.
4437 	 */
4438 	if (sc) {
4439 		spin_lock_bh(&sc->runtime_lock);
4440 		__hif_pm_runtime_allow_suspend(sc, context);
4441 		spin_unlock_bh(&sc->runtime_lock);
4442 	}
4443 
4444 	qdf_mem_free(context);
4445 }
4446 #endif /* FEATURE_RUNTIME_PM */
4447 
4448 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
4449 {
4450 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4451 
4452 	/* legacy case only has one irq */
4453 	return pci_scn->irq;
4454 }
4455 
4456 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
4457 {
4458 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
4459 	struct hif_target_info *tgt_info;
4460 
4461 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
4462 
4463 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
4464 	    tgt_info->target_type == TARGET_TYPE_QCA8074) {
4465 		/*
4466 		 * Need to consider offset's memtype for QCA6290/QCA8074,
4467 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4468 		 * well initialized/defined.
4469 		 */
4470 		return 0;
4471 	}
4472 
4473 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4474 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4475 		return 0;
4476 	}
4477 
4478 	HIF_TRACE("Refusing to read memory at 0x%x - 0x%lx (max 0x%zx)\n",
4479 		 offset, offset + sizeof(unsigned int), sc->mem_len);
4480 
4481 	return -EINVAL;
4482 }
4483