xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 45a38684b07295822dc8eba39e293408f203eec8)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: if_ahb.c
21  *
22  * c file for ahb specific implementations.
23  */
24 
25 #include "hif.h"
26 #include "target_type.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_io32.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_tasklet.h"
33 #include "if_ahb.h"
34 #include "if_pci.h"
35 #include "ahb_api.h"
36 #include "pci_api.h"
37 #include "hif_napi.h"
38 #include "qal_vbus_dev.h"
39 
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
41 #define IRQF_DISABLED 0x00000020
42 #endif
43 
44 #define HIF_IC_CE0_IRQ_OFFSET 4
45 #define HIF_IC_MAX_IRQ 52
46 
47 static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
48 /* integrated chip irq names */
49 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
50 "misc-pulse1",
51 "misc-latch",
52 "sw-exception",
53 "watchdog",
54 "ce0",
55 "ce1",
56 "ce2",
57 "ce3",
58 "ce4",
59 "ce5",
60 "ce6",
61 "ce7",
62 "ce8",
63 "ce9",
64 "ce10",
65 "ce11",
66 "host2wbm-desc-feed",
67 "host2reo-re-injection",
68 "host2reo-command",
69 "host2rxdma-monitor-ring3",
70 "host2rxdma-monitor-ring2",
71 "host2rxdma-monitor-ring1",
72 "reo2ost-exception",
73 "wbm2host-rx-release",
74 "reo2host-status",
75 "reo2host-destination-ring4",
76 "reo2host-destination-ring3",
77 "reo2host-destination-ring2",
78 "reo2host-destination-ring1",
79 "rxdma2host-monitor-destination-mac3",
80 "rxdma2host-monitor-destination-mac2",
81 "rxdma2host-monitor-destination-mac1",
82 "ppdu-end-interrupts-mac3",
83 "ppdu-end-interrupts-mac2",
84 "ppdu-end-interrupts-mac1",
85 "rxdma2host-monitor-status-ring-mac3",
86 "rxdma2host-monitor-status-ring-mac2",
87 "rxdma2host-monitor-status-ring-mac1",
88 "host2rxdma-host-buf-ring-mac3",
89 "host2rxdma-host-buf-ring-mac2",
90 "host2rxdma-host-buf-ring-mac1",
91 "rxdma2host-destination-ring-mac3",
92 "rxdma2host-destination-ring-mac2",
93 "rxdma2host-destination-ring-mac1",
94 "host2tcl-input-ring4",
95 "host2tcl-input-ring3",
96 "host2tcl-input-ring2",
97 "host2tcl-input-ring1",
98 "wbm2host-tx-completions-ring3",
99 "wbm2host-tx-completions-ring2",
100 "wbm2host-tx-completions-ring1",
101 "tcl2host-status-ring",
102 };
103 
104 /** hif_ahb_get_irq_name() - get irqname
105  * This function gives irqnumber to irqname
106  * mapping.
107  *
108  * @irq_no: irq number
109  *
110  * Return: irq name
111  */
112 const char *hif_ahb_get_irq_name(int irq_no)
113 {
114 	return ic_irqname[irq_no];
115 }
116 
117 /**
118  * hif_disable_isr() - disable isr
119  *
120  * This function disables isr and kills tasklets
121  *
122  * @hif_ctx: struct hif_softc
123  *
124  * Return: void
125  */
126 void hif_ahb_disable_isr(struct hif_softc *scn)
127 {
128 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
129 
130 	hif_exec_kill(&scn->osc);
131 	hif_nointrs(scn);
132 	ce_tasklet_kill(scn);
133 	tasklet_kill(&sc->intr_tq);
134 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
135 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
136 }
137 
138 /**
139  * hif_dump_registers() - dump bus debug registers
140  * @scn: struct hif_opaque_softc
141  *
142  * This function dumps hif bus debug registers
143  *
144  * Return: 0 for success or error code
145  */
146 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
147 {
148 	int status;
149 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
150 
151 	status = hif_dump_ce_registers(scn);
152 	if (status)
153 		HIF_ERROR("%s: Dump CE Registers Failed status %d", __func__,
154 							status);
155 
156 	return 0;
157 }
158 
159 /**
160  * hif_ahb_close() - hif_bus_close
161  * @scn: pointer to the hif context.
162  *
163  * This is a callback function for hif_bus_close.
164  *
165  *
166  * Return: n/a
167  */
168 void hif_ahb_close(struct hif_softc *scn)
169 {
170 	hif_ce_close(scn);
171 }
172 
173 /**
174  * hif_bus_open() - hif_ahb open
175  * @hif_ctx: hif context
176  * @bus_type: bus type
177  *
178  * This is a callback function for hif_bus_open.
179  *
180  * Return: n/a
181  */
182 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
183 {
184 
185 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
186 
187 	qdf_spinlock_create(&sc->irq_lock);
188 	return hif_ce_open(hif_ctx);
189 }
190 
191 /**
192  * hif_bus_configure() - Configure the bus
193  * @scn: pointer to the hif context.
194  *
195  * This function configure the ahb bus
196  *
197  * return: 0 for success. nonzero for failure.
198  */
199 int hif_ahb_bus_configure(struct hif_softc *scn)
200 {
201 	return hif_pci_bus_configure(scn);
202 }
203 
204 /**
205  * hif_configure_msi_ahb - Configure MSI interrupts
206  * @sc : pointer to the hif context
207  *
208  * return: 0 for success. nonzero for failure.
209  */
210 
211 int hif_configure_msi_ahb(struct hif_pci_softc *sc)
212 {
213 	return 0;
214 }
215 
216 /**
217  * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ
218  * @sc: pointer to the hif context.
219  *
220  * This function registers the irq handler and enables legacy interrupts
221  *
222  * return: 0 for success. nonzero for failure.
223  */
224 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
225 {
226 	int ret = 0;
227 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
228 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
229 	int irq = 0;
230 
231 	/* do not support MSI or MSI IRQ failed */
232 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
233 	qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev, "legacy", &irq);
234 	if (irq < 0) {
235 		dev_err(&pdev->dev, "Unable to get irq\n");
236 		ret = -EFAULT;
237 		goto end;
238 	}
239 	ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler,
240 				IRQF_DISABLED, "wlan_ahb", sc);
241 	if (ret) {
242 		dev_err(&pdev->dev, "ath_request_irq failed\n");
243 		ret = -EFAULT;
244 		goto end;
245 	}
246 	sc->irq = irq;
247 
248 	/* Use Legacy PCI Interrupts */
249 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
250 				PCIE_INTR_ENABLE_ADDRESS),
251 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
252 	/* read once to flush */
253 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
254 				PCIE_INTR_ENABLE_ADDRESS));
255 
256 end:
257 	return ret;
258 }
259 
260 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
261 {
262 	int ret = 0;
263 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
264 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
265 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
266 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
267 	int irq = 0;
268 	int i;
269 
270 	/* configure per CE interrupts */
271 	for (i = 0; i < scn->ce_count; i++) {
272 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
273 			continue;
274 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
275 				   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i],
276 				   HIF_IC_CE0_IRQ_OFFSET + i, &irq);
277 		if (ret) {
278 			dev_err(&pdev->dev, "get irq failed\n");
279 			ret = -EFAULT;
280 			goto end;
281 		}
282 
283 		ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i] = irq;
284 		ret = pfrm_request_irq(&pdev->dev, irq,
285 				       hif_ahb_interrupt_handler,
286 				       IRQF_TRIGGER_RISING,
287 				       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i],
288 				       &hif_state->tasklets[i]);
289 		if (ret) {
290 			dev_err(&pdev->dev, "ath_request_irq failed\n");
291 			ret = -EFAULT;
292 			goto end;
293 		}
294 		hif_ahb_irq_enable(scn, i);
295 	}
296 
297 end:
298 	return ret;
299 }
300 
301 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
302 			      struct hif_exec_context *hif_ext_group)
303 {
304 	int ret = 0;
305 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
306 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
307 	int irq = 0;
308 	int j;
309 
310 	/* configure external interrupts */
311 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
312 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
313 	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
314 	hif_ext_group->work_complete = &hif_dummy_grp_done;
315 
316 	for (j = 0; j < hif_ext_group->numirq; j++) {
317 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
318 				   ic_irqname[hif_ext_group->irq[j]],
319 				   hif_ext_group->irq[j], &irq);
320 		if (ret) {
321 			dev_err(&pdev->dev, "get irq failed\n");
322 			ret = -EFAULT;
323 			goto end;
324 		}
325 		ic_irqnum[hif_ext_group->irq[j]] = irq;
326 		hif_ext_group->os_irq[j] = irq;
327 	}
328 
329 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
330 
331 	for (j = 0; j < hif_ext_group->numirq; j++) {
332 		irq = hif_ext_group->os_irq[j];
333 		irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
334 		ret = pfrm_request_irq(scn->qdf_dev->dev,
335 				       irq, hif_ext_group_interrupt_handler,
336 				       IRQF_TRIGGER_RISING,
337 				       ic_irqname[hif_ext_group->irq[j]],
338 				       hif_ext_group);
339 		if (ret) {
340 			dev_err(&pdev->dev, "ath_request_irq failed\n");
341 			ret = -EFAULT;
342 			goto end;
343 		}
344 	}
345 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
346 
347 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
348 	hif_ext_group->irq_requested = true;
349 
350 end:
351 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
352 	return ret;
353 }
354 
355 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
356 {
357 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
358 	struct hif_exec_context *hif_ext_group;
359 	int i, j;
360 	int irq = 0;
361 
362 	/* configure external interrupts */
363 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
364 		hif_ext_group = hif_state->hif_ext_group[i];
365 		if (hif_ext_group->irq_requested == true) {
366 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
367 			hif_ext_group->irq_requested = false;
368 			for (j = 0; j < hif_ext_group->numirq; j++) {
369 				irq = hif_ext_group->os_irq[j];
370 				hif_ext_group->irq_enabled = false;
371 				irq_clear_status_flags(irq,
372 						       IRQ_DISABLE_UNLAZY);
373 			}
374 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
375 
376 			/* Avoid holding the irq_lock while freeing the irq
377 			 * as the same lock is being held by the irq handler
378 			 * while disabling the irq. This causes a deadlock
379 			 * between free_irq and irq_handler.
380 			 */
381 			for (j = 0; j < hif_ext_group->numirq; j++) {
382 				irq = hif_ext_group->os_irq[j];
383 				pfrm_free_irq(scn->qdf_dev->dev,
384 					      irq, hif_ext_group);
385 			}
386 		}
387 	}
388 }
389 
390 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
391 {
392 	struct ce_tasklet_entry *tasklet_entry = context;
393 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
394 }
395 
396 /**
397  * hif_target_sync() : ensure the target is ready
398  * @scn: hif control structure
399  *
400  * Informs fw that we plan to use legacy interupts so that
401  * it can begin booting. Ensures that the fw finishes booting
402  * before continuing. Should be called before trying to write
403  * to the targets other registers for the first time.
404  *
405  * Return: none
406  */
407 int hif_target_sync_ahb(struct hif_softc *scn)
408 {
409 	int val = 0;
410 	int limit = 0;
411 
412 	while (limit < 50) {
413 		hif_write32_mb(scn, scn->mem +
414 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
415 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
416 		qdf_mdelay(10);
417 		val = hif_read32_mb(scn, scn->mem +
418 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
419 		if (val == 0)
420 			break;
421 		limit++;
422 	}
423 	hif_write32_mb(scn, scn->mem +
424 		(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
425 		PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
426 	hif_write32_mb(scn, scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
427 	if (HAS_FW_INDICATOR) {
428 		int wait_limit = 500;
429 		int fw_ind = 0;
430 
431 		while (1) {
432 			fw_ind = hif_read32_mb(scn, scn->mem +
433 					FW_INDICATOR_ADDRESS);
434 			if (fw_ind & FW_IND_INITIALIZED)
435 				break;
436 			if (wait_limit-- < 0)
437 				break;
438 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
439 				PCIE_INTR_ENABLE_ADDRESS),
440 				PCIE_INTR_FIRMWARE_MASK);
441 			qdf_mdelay(10);
442 		}
443 		if (wait_limit < 0) {
444 			HIF_TRACE("%s: FW signal timed out", __func__);
445 			return -EIO;
446 		}
447 		HIF_TRACE("%s: Got FW signal, retries = %x", __func__,
448 							500-wait_limit);
449 	}
450 
451 	return 0;
452 }
453 
454 /**
455  * hif_disable_bus() - Disable the bus
456  * @scn : pointer to the hif context
457  *
458  * This function disables the bus and helds the target in reset state
459  *
460  * Return: none
461  */
462 void hif_ahb_disable_bus(struct hif_softc *scn)
463 {
464 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
465 	void __iomem *mem;
466 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
467 	struct resource *memres = NULL;
468 	int mem_pa_size = 0;
469 	struct hif_target_info *tgt_info = NULL;
470 	struct qdf_vbus_resource *vmres = NULL;
471 	QDF_STATUS status;
472 
473 	tgt_info = &scn->target_info;
474 	/*Disable WIFI clock input*/
475 	if (sc->mem) {
476 		status = pfrm_platform_get_resource(
477 				scn->qdf_dev->dev,
478 				(struct qdf_pfm_hndl *)pdev, &vmres,
479 				IORESOURCE_MEM, 0);
480 		if (QDF_IS_STATUS_ERROR(status)) {
481 			HIF_INFO("%s: Failed to get IORESOURCE_MEM\n",
482 				 __func__);
483 			return;
484 		}
485 		memres = (struct resource *)vmres;
486 		if (memres)
487 			mem_pa_size = memres->end - memres->start + 1;
488 
489 		/* Should not be executed on 8074 platform */
490 		if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
491 		    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
492 		    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
493 		    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
494 			hif_ahb_clk_enable_disable(&pdev->dev, 0);
495 
496 			hif_ahb_device_reset(scn);
497 		}
498 		if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
499 			iounmap(sc->mem_ce);
500 			sc->mem_ce = NULL;
501 			scn->mem_ce = NULL;
502 		}
503 		mem = (void __iomem *)sc->mem;
504 		if (mem) {
505 			pfrm_devm_iounmap(&pdev->dev, mem);
506 			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
507 						     mem_pa_size);
508 			sc->mem = NULL;
509 		}
510 	}
511 	scn->mem = NULL;
512 }
513 
514 /**
515  * hif_enable_bus() - Enable the bus
516  * @dev: dev
517  * @bdev: bus dev
518  * @bid: bus id
519  * @type: bus type
520  *
521  * This function enables the radio bus by enabling necessary
522  * clocks and waits for the target to get ready to proceed futher
523  *
524  * Return: QDF_STATUS
525  */
526 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
527 		struct device *dev, void *bdev,
528 		const struct hif_bus_id *bid,
529 		enum hif_enable_type type)
530 {
531 	int ret = 0;
532 	int hif_type;
533 	int target_type;
534 	const struct platform_device_id *id = (struct platform_device_id *)bid;
535 	struct platform_device *pdev = bdev;
536 	struct hif_target_info *tgt_info = NULL;
537 	struct resource *memres = NULL;
538 	void __iomem *mem = NULL;
539 	uint32_t revision_id = 0;
540 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
541 	QDF_STATUS status;
542 	struct qdf_vbus_resource *vmres = NULL;
543 
544 	sc->pdev = (struct pci_dev *)pdev;
545 	sc->dev = &pdev->dev;
546 	sc->devid = id->driver_data;
547 
548 	ret = hif_get_device_type(id->driver_data, revision_id,
549 			&hif_type, &target_type);
550 	if (ret < 0) {
551 		HIF_ERROR("%s: invalid device  ret %d id %d revision_id %d",
552 			__func__, ret, (int)id->driver_data, revision_id);
553 		return QDF_STATUS_E_FAILURE;
554 	}
555 
556 	status = pfrm_platform_get_resource(&pdev->dev,
557 					    (struct qdf_pfm_hndl *)pdev,
558 					    &vmres,
559 					    IORESOURCE_MEM, 0);
560 	if (QDF_IS_STATUS_ERROR(status)) {
561 		HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__);
562 		return status;
563 	}
564 	memres = (struct resource *)vmres;
565 	if (!memres) {
566 		HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__);
567 		return QDF_STATUS_E_IO;
568 	}
569 
570 	ret = pfrm_dma_set_mask(dev, 32);
571 	if (ret) {
572 		HIF_INFO("ath: 32-bit DMA not available\n");
573 		status = QDF_STATUS_E_IO;
574 		goto err_cleanup1;
575 	}
576 
577 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
578 	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
579 #else
580 	ret = pfrm_dma_set_coherent_mask(dev, 32);
581 #endif
582 	if (ret) {
583 		HIF_ERROR("%s: failed to set dma mask error = %d",
584 				__func__, ret);
585 		return QDF_STATUS_E_IO;
586 	}
587 
588 	/* Arrange for access to Target SoC registers. */
589 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
590 	status = pfrm_devm_ioremap_resource(dev,
591 					    (struct qdf_vbus_resource *)memres,
592 					    &mem);
593 #else
594 	status = pfrm_devm_request_and_ioremap(
595 					dev,
596 					(struct qdf_vbus_resource *)memres,
597 					&mem);
598 #endif
599 	if (QDF_IS_STATUS_ERROR(status)) {
600 		HIF_INFO("ath: ioremap error\n");
601 		status = QDF_STATUS_E_IO;
602 		goto err_cleanup1;
603 	}
604 
605 	sc->mem = mem;
606 	ol_sc->mem = mem;
607 	ol_sc->mem_pa = memres->start;
608 
609 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
610 
611 	tgt_info->target_type = target_type;
612 	hif_register_tbl_attach(ol_sc, hif_type);
613 	hif_target_register_tbl_attach(ol_sc, target_type);
614 	/*
615 	 * In QCA5018 CE region moved to SOC outside WCSS block.
616 	 * Allocate separate I/O remap to access CE registers.
617 	 */
618 	if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
619 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
620 
621 		sc->mem_ce = ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE);
622 		if (IS_ERR(sc->mem_ce)) {
623 			HIF_INFO("CE: ioremap failed\n");
624 			return QDF_STATUS_E_IO;
625 		}
626 		ol_sc->mem_ce = sc->mem_ce;
627 	}
628 
629 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
630 			(tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
631 			(tgt_info->target_type != TARGET_TYPE_QCA5018) &&
632 			(tgt_info->target_type != TARGET_TYPE_QCA6018)) {
633 		if (hif_ahb_enable_radio(sc, pdev, id) != 0) {
634 			HIF_INFO("error in enabling soc\n");
635 			return QDF_STATUS_E_IO;
636 		}
637 
638 		if (hif_target_sync_ahb(ol_sc) < 0) {
639 			status = QDF_STATUS_E_IO;
640 			goto err_target_sync;
641 		}
642 	}
643 	HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x",
644 			__func__, hif_type, target_type);
645 
646 	return QDF_STATUS_SUCCESS;
647 err_target_sync:
648 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
649 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
650 	    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
651 	    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
652 		HIF_INFO("Error: Disabling target\n");
653 		hif_ahb_disable_bus(ol_sc);
654 	}
655 err_cleanup1:
656 	return status;
657 }
658 
659 
660 /**
661  * hif_reset_soc() - reset soc
662  *
663  * @hif_ctx: HIF context
664  *
665  * This function resets soc and helds the
666  * target in reset state
667  *
668  * Return: void
669  */
670 /* Function to reset SoC */
671 void hif_ahb_reset_soc(struct hif_softc *hif_ctx)
672 {
673 	hif_ahb_device_reset(hif_ctx);
674 }
675 
676 
677 /**
678  * hif_nointrs() - disable IRQ
679  *
680  * @scn: struct hif_softc
681  *
682  * This function stops interrupt(s)
683  *
684  * Return: none
685  */
686 void hif_ahb_nointrs(struct hif_softc *scn)
687 {
688 	int i;
689 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
690 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
691 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
692 
693 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
694 
695 	if (scn->request_irq_done == false)
696 		return;
697 
698 	if (sc->num_msi_intrs > 0) {
699 		/* MSI interrupt(s) */
700 		for (i = 0; i < sc->num_msi_intrs; i++) {
701 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
702 		}
703 		sc->num_msi_intrs = 0;
704 	} else {
705 		if (!scn->per_ce_irq) {
706 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
707 		} else {
708 			for (i = 0; i < scn->ce_count; i++) {
709 				if (host_ce_conf[i].flags
710 						& CE_ATTR_DISABLE_INTR)
711 					continue;
712 
713 				pfrm_free_irq(
714 					scn->qdf_dev->dev,
715 					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
716 					&hif_state->tasklets[i]);
717 			}
718 			hif_ahb_deconfigure_grp_irq(scn);
719 		}
720 	}
721 	scn->request_irq_done = false;
722 
723 }
724 
725 /**
726  * ce_irq_enable() - enable copy engine IRQ
727  * @scn: struct hif_softc
728  * @ce_id: ce_id
729  *
730  * This function enables the interrupt for the radio.
731  *
732  * Return: N/A
733  */
734 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
735 {
736 	uint32_t regval;
737 	uint32_t reg_offset = 0;
738 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
739 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
740 	struct hif_target_info *tgt_info = &scn->target_info;
741 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
742 
743 	if (scn->per_ce_irq) {
744 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
745 			reg_offset = HOST_IE_ADDRESS;
746 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
747 			regval = hif_read32_mb(scn, mem + reg_offset);
748 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
749 			hif_write32_mb(scn, mem + reg_offset, regval);
750 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
751 		}
752 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
753 			reg_offset = HOST_IE_ADDRESS_2;
754 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
755 			regval = hif_read32_mb(scn, mem + reg_offset);
756 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
757 			hif_write32_mb(scn, mem + reg_offset, regval);
758 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
759 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
760 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
761 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
762 				/* Enable destination ring interrupts for
763 				 * 8074, 8074V2, 6018 and 50xx
764 				 */
765 				regval = hif_read32_mb(scn, mem +
766 					HOST_IE_ADDRESS_3);
767 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
768 
769 				hif_write32_mb(scn, mem +
770 					       HOST_IE_ADDRESS_3, regval);
771 			}
772 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
773 		}
774 	} else {
775 		hif_pci_irq_enable(scn, ce_id);
776 	}
777 }
778 
779 /**
780  * ce_irq_disable() - disable copy engine IRQ
781  * @scn: struct hif_softc
782  * @ce_id: ce_id
783  *
784  * Return: N/A
785  */
786 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
787 {
788 	uint32_t regval;
789 	uint32_t reg_offset = 0;
790 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
791 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
792 	struct hif_target_info *tgt_info = &scn->target_info;
793 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
794 
795 	if (scn->per_ce_irq) {
796 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
797 			reg_offset = HOST_IE_ADDRESS;
798 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
799 			regval = hif_read32_mb(scn, mem + reg_offset);
800 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
801 			hif_write32_mb(scn, mem + reg_offset, regval);
802 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
803 		}
804 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
805 			reg_offset = HOST_IE_ADDRESS_2;
806 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
807 			regval = hif_read32_mb(scn, mem + reg_offset);
808 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
809 			hif_write32_mb(scn, mem + reg_offset, regval);
810 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
811 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
812 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
813 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
814 				/* Disable destination ring interrupts for
815 				 * 8074, 8074V2, 6018 and 50xx
816 				 */
817 				regval = hif_read32_mb(scn, mem +
818 					HOST_IE_ADDRESS_3);
819 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
820 
821 				hif_write32_mb(scn, mem +
822 					       HOST_IE_ADDRESS_3, regval);
823 			}
824 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
825 		}
826 	}
827 }
828 
829 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
830 {
831 	int i;
832 
833 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
834 	if (hif_ext_group->irq_enabled) {
835 		for (i = 0; i < hif_ext_group->numirq; i++) {
836 			disable_irq_nosync(hif_ext_group->os_irq[i]);
837 		}
838 		hif_ext_group->irq_enabled = false;
839 	}
840 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
841 }
842 
843 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
844 {
845 	int i;
846 
847 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
848 	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
849 		for (i = 0; i < hif_ext_group->numirq; i++) {
850 			enable_irq(hif_ext_group->os_irq[i]);
851 		}
852 		hif_ext_group->irq_enabled = true;
853 	}
854 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
855 }
856 
857 /**
858  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
859  * @scn: hif context
860  *
861  * Return: true if soc needs driver bmi otherwise false
862  */
863 bool hif_ahb_needs_bmi(struct hif_softc *scn)
864 {
865 	return !ce_srng_based(scn);
866 }
867 
868 void hif_ahb_display_stats(struct hif_softc *scn)
869 {
870 	if (!scn) {
871 		HIF_ERROR("%s, hif_scn null", __func__);
872 		return;
873 	}
874 	hif_display_ce_stats(scn);
875 }
876 
877 void hif_ahb_clear_stats(struct hif_softc *scn)
878 {
879 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
880 
881 	if (!hif_state) {
882 		HIF_ERROR("%s, hif_state null", __func__);
883 		return;
884 	}
885 	hif_clear_ce_stats(hif_state);
886 }
887