xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 61847a9ba603a01db266ed3fc7b701042e266d14)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: if_ahb.c
21  *
22  * c file for ahb specific implementations.
23  */
24 
25 #include "hif.h"
26 #include "target_type.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_io32.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_tasklet.h"
33 #include "if_ahb.h"
34 #include "if_pci.h"
35 #include "ahb_api.h"
36 #include "pci_api.h"
37 #include "hif_napi.h"
38 #include "qal_vbus_dev.h"
39 
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
41 #define IRQF_DISABLED 0x00000020
42 #endif
43 
44 #define HIF_IC_CE0_IRQ_OFFSET 4
45 #define HIF_IC_MAX_IRQ 52
46 
47 static uint8_t ic_irqnum[HIF_IC_MAX_IRQ];
48 /* integrated chip irq names */
49 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
50 "misc-pulse1",
51 "misc-latch",
52 "sw-exception",
53 "watchdog",
54 "ce0",
55 "ce1",
56 "ce2",
57 "ce3",
58 "ce4",
59 "ce5",
60 "ce6",
61 "ce7",
62 "ce8",
63 "ce9",
64 "ce10",
65 "ce11",
66 "host2wbm-desc-feed",
67 "host2reo-re-injection",
68 "host2reo-command",
69 "host2rxdma-monitor-ring3",
70 "host2rxdma-monitor-ring2",
71 "host2rxdma-monitor-ring1",
72 "reo2ost-exception",
73 "wbm2host-rx-release",
74 "reo2host-status",
75 "reo2host-destination-ring4",
76 "reo2host-destination-ring3",
77 "reo2host-destination-ring2",
78 "reo2host-destination-ring1",
79 "rxdma2host-monitor-destination-mac3",
80 "rxdma2host-monitor-destination-mac2",
81 "rxdma2host-monitor-destination-mac1",
82 "ppdu-end-interrupts-mac3",
83 "ppdu-end-interrupts-mac2",
84 "ppdu-end-interrupts-mac1",
85 "rxdma2host-monitor-status-ring-mac3",
86 "rxdma2host-monitor-status-ring-mac2",
87 "rxdma2host-monitor-status-ring-mac1",
88 "host2rxdma-host-buf-ring-mac3",
89 "host2rxdma-host-buf-ring-mac2",
90 "host2rxdma-host-buf-ring-mac1",
91 "rxdma2host-destination-ring-mac3",
92 "rxdma2host-destination-ring-mac2",
93 "rxdma2host-destination-ring-mac1",
94 "host2tcl-input-ring4",
95 "host2tcl-input-ring3",
96 "host2tcl-input-ring2",
97 "host2tcl-input-ring1",
98 "wbm2host-tx-completions-ring3",
99 "wbm2host-tx-completions-ring2",
100 "wbm2host-tx-completions-ring1",
101 "tcl2host-status-ring",
102 };
103 
104 /**
105  * hif_disable_isr() - disable isr
106  *
107  * This function disables isr and kills tasklets
108  *
109  * @hif_ctx: struct hif_softc
110  *
111  * Return: void
112  */
113 void hif_ahb_disable_isr(struct hif_softc *scn)
114 {
115 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
116 
117 	hif_exec_kill(&scn->osc);
118 	hif_nointrs(scn);
119 	ce_tasklet_kill(scn);
120 	tasklet_kill(&sc->intr_tq);
121 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
122 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
123 }
124 
125 /**
126  * hif_dump_registers() - dump bus debug registers
127  * @scn: struct hif_opaque_softc
128  *
129  * This function dumps hif bus debug registers
130  *
131  * Return: 0 for success or error code
132  */
133 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
134 {
135 	int status;
136 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
137 
138 	status = hif_dump_ce_registers(scn);
139 	if (status)
140 		HIF_ERROR("%s: Dump CE Registers Failed status %d", __func__,
141 							status);
142 
143 	return 0;
144 }
145 
146 /**
147  * hif_ahb_close() - hif_bus_close
148  * @scn: pointer to the hif context.
149  *
150  * This is a callback function for hif_bus_close.
151  *
152  *
153  * Return: n/a
154  */
155 void hif_ahb_close(struct hif_softc *scn)
156 {
157 	hif_ce_close(scn);
158 }
159 
160 /**
161  * hif_bus_open() - hif_ahb open
162  * @hif_ctx: hif context
163  * @bus_type: bus type
164  *
165  * This is a callback function for hif_bus_open.
166  *
167  * Return: n/a
168  */
169 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
170 {
171 
172 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
173 
174 	qdf_spinlock_create(&sc->irq_lock);
175 	return hif_ce_open(hif_ctx);
176 }
177 
178 /**
179  * hif_bus_configure() - Configure the bus
180  * @scn: pointer to the hif context.
181  *
182  * This function configure the ahb bus
183  *
184  * return: 0 for success. nonzero for failure.
185  */
186 int hif_ahb_bus_configure(struct hif_softc *scn)
187 {
188 	return hif_pci_bus_configure(scn);
189 }
190 
191 /**
192  * hif_configure_msi_ahb - Configure MSI interrupts
193  * @sc : pointer to the hif context
194  *
195  * return: 0 for success. nonzero for failure.
196  */
197 
198 int hif_configure_msi_ahb(struct hif_pci_softc *sc)
199 {
200 	return 0;
201 }
202 
203 /**
204  * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ
205  * @sc: pointer to the hif context.
206  *
207  * This function registers the irq handler and enables legacy interrupts
208  *
209  * return: 0 for success. nonzero for failure.
210  */
211 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
212 {
213 	int ret = 0;
214 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
215 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
216 	int irq = 0;
217 
218 	/* do not support MSI or MSI IRQ failed */
219 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
220 	qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev, "legacy", &irq);
221 	if (irq < 0) {
222 		dev_err(&pdev->dev, "Unable to get irq\n");
223 		ret = -1;
224 		goto end;
225 	}
226 	ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler,
227 				IRQF_DISABLED, "wlan_ahb", sc);
228 	if (ret) {
229 		dev_err(&pdev->dev, "ath_request_irq failed\n");
230 		ret = -1;
231 		goto end;
232 	}
233 	sc->irq = irq;
234 
235 	/* Use Legacy PCI Interrupts */
236 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
237 				PCIE_INTR_ENABLE_ADDRESS),
238 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
239 	/* read once to flush */
240 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
241 				PCIE_INTR_ENABLE_ADDRESS));
242 
243 end:
244 	return ret;
245 }
246 
247 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
248 {
249 	int ret = 0;
250 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
251 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
252 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
253 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
254 	int irq = 0;
255 	int i;
256 
257 	/* configure per CE interrupts */
258 	for (i = 0; i < scn->ce_count; i++) {
259 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
260 			continue;
261 		qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev,
262 				 ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i], &irq);
263 		ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i] = irq;
264 		ret = request_irq(irq ,
265 				hif_ahb_interrupt_handler,
266 				IRQF_TRIGGER_RISING, ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i],
267 				&hif_state->tasklets[i]);
268 		if (ret) {
269 			dev_err(&pdev->dev, "ath_request_irq failed\n");
270 			ret = -1;
271 			goto end;
272 		}
273 		hif_ahb_irq_enable(scn, i);
274 	}
275 
276 end:
277 	return ret;
278 }
279 
280 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
281 			      struct hif_exec_context *hif_ext_group)
282 {
283 	int ret = 0;
284 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
285 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
286 	int irq = 0;
287 	int j;
288 
289 	/* configure external interrupts */
290 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
291 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
292 	hif_ext_group->work_complete = &hif_dummy_grp_done;
293 
294 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
295 
296 	for (j = 0; j < hif_ext_group->numirq; j++) {
297 		qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev,
298 				 ic_irqname[hif_ext_group->irq[j]], &irq);
299 
300 		ic_irqnum[hif_ext_group->irq[j]] = irq;
301 		irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
302 		ret = request_irq(irq, hif_ext_group_interrupt_handler,
303 				  IRQF_TRIGGER_RISING,
304 				  ic_irqname[hif_ext_group->irq[j]],
305 				  hif_ext_group);
306 		if (ret) {
307 			dev_err(&pdev->dev,
308 				"ath_request_irq failed\n");
309 			ret = -1;
310 			goto end;
311 		}
312 		hif_ext_group->os_irq[j] = irq;
313 	}
314 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
315 
316 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
317 	hif_ext_group->irq_requested = true;
318 
319 end:
320 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
321 	return ret;
322 }
323 
324 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
325 {
326 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
327 	struct hif_exec_context *hif_ext_group;
328 	int i, j;
329 	int irq = 0;
330 
331 	/* configure external interrupts */
332 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
333 		hif_ext_group = hif_state->hif_ext_group[i];
334 		if (hif_ext_group->irq_requested == true) {
335 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
336 			hif_ext_group->irq_requested = false;
337 			for (j = 0; j < hif_ext_group->numirq; j++) {
338 				irq = hif_ext_group->os_irq[j];
339 				irq_clear_status_flags(irq,
340 						       IRQ_DISABLE_UNLAZY);
341 				free_irq(irq, hif_ext_group);
342 			}
343 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
344 		}
345 	}
346 }
347 
348 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
349 {
350 	struct ce_tasklet_entry *tasklet_entry = context;
351 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
352 }
353 
354 /**
355  * hif_target_sync() : ensure the target is ready
356  * @scn: hif control structure
357  *
358  * Informs fw that we plan to use legacy interupts so that
359  * it can begin booting. Ensures that the fw finishes booting
360  * before continuing. Should be called before trying to write
361  * to the targets other registers for the first time.
362  *
363  * Return: none
364  */
365 int hif_target_sync_ahb(struct hif_softc *scn)
366 {
367 	int val = 0;
368 	int limit = 0;
369 
370 	while (limit < 50) {
371 		hif_write32_mb(scn, scn->mem +
372 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
373 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
374 		qdf_mdelay(10);
375 		val = hif_read32_mb(scn, scn->mem +
376 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
377 		if (val == 0)
378 			break;
379 		limit++;
380 	}
381 	hif_write32_mb(scn, scn->mem +
382 		(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
383 		PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
384 	hif_write32_mb(scn, scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
385 	if (HAS_FW_INDICATOR) {
386 		int wait_limit = 500;
387 		int fw_ind = 0;
388 
389 		while (1) {
390 			fw_ind = hif_read32_mb(scn, scn->mem +
391 					FW_INDICATOR_ADDRESS);
392 			if (fw_ind & FW_IND_INITIALIZED)
393 				break;
394 			if (wait_limit-- < 0)
395 				break;
396 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
397 				PCIE_INTR_ENABLE_ADDRESS),
398 				PCIE_INTR_FIRMWARE_MASK);
399 			qdf_mdelay(10);
400 		}
401 		if (wait_limit < 0) {
402 			HIF_TRACE("%s: FW signal timed out", __func__);
403 			return -EIO;
404 		}
405 		HIF_TRACE("%s: Got FW signal, retries = %x", __func__,
406 							500-wait_limit);
407 	}
408 
409 	return 0;
410 }
411 
412 /**
413  * hif_disable_bus() - Disable the bus
414  * @scn : pointer to the hif context
415  *
416  * This function disables the bus and helds the target in reset state
417  *
418  * Return: none
419  */
420 void hif_ahb_disable_bus(struct hif_softc *scn)
421 {
422 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
423 	void __iomem *mem;
424 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
425 	struct resource *memres = NULL;
426 	int mem_pa_size = 0;
427 	struct hif_target_info *tgt_info = NULL;
428 	struct qdf_vbus_resource *vmres = NULL;
429 
430 	tgt_info = &scn->target_info;
431 	/*Disable WIFI clock input*/
432 	if (sc->mem) {
433 		qal_vbus_get_resource((struct qdf_pfm_hndl *)pdev, &vmres,
434 				      IORESOURCE_MEM, 0);
435 		memres = (struct resource *)vmres;
436 		if (!memres) {
437 			HIF_INFO("%s: Failed to get IORESOURCE_MEM\n",
438 								__func__);
439 			return;
440 		}
441 		mem_pa_size = memres->end - memres->start + 1;
442 
443 		/* Should not be executed on 8074 platform */
444 		if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
445 		    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
446 		    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
447 			hif_ahb_clk_enable_disable(&pdev->dev, 0);
448 
449 			hif_ahb_device_reset(scn);
450 		}
451 		mem = (void __iomem *)sc->mem;
452 		if (mem) {
453 			devm_iounmap(&pdev->dev, mem);
454 			devm_release_mem_region(&pdev->dev, scn->mem_pa,
455 								mem_pa_size);
456 			sc->mem = NULL;
457 		}
458 	}
459 	scn->mem = NULL;
460 }
461 
462 /**
463  * hif_enable_bus() - Enable the bus
464  * @dev: dev
465  * @bdev: bus dev
466  * @bid: bus id
467  * @type: bus type
468  *
469  * This function enables the radio bus by enabling necessary
470  * clocks and waits for the target to get ready to proceed futher
471  *
472  * Return: QDF_STATUS
473  */
474 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
475 		struct device *dev, void *bdev,
476 		const struct hif_bus_id *bid,
477 		enum hif_enable_type type)
478 {
479 	int ret = 0;
480 	int hif_type;
481 	int target_type;
482 	const struct platform_device_id *id = (struct platform_device_id *)bid;
483 	struct platform_device *pdev = bdev;
484 	struct hif_target_info *tgt_info = NULL;
485 	struct resource *memres = NULL;
486 	void __iomem *mem = NULL;
487 	uint32_t revision_id = 0;
488 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
489 
490 	sc->pdev = (struct pci_dev *)pdev;
491 	sc->dev = &pdev->dev;
492 	sc->devid = id->driver_data;
493 
494 	ret = hif_get_device_type(id->driver_data, revision_id,
495 			&hif_type, &target_type);
496 	if (ret < 0) {
497 		HIF_ERROR("%s: invalid device  ret %d id %d revision_id %d",
498 			__func__, ret, (int)id->driver_data, revision_id);
499 		return QDF_STATUS_E_FAILURE;
500 	}
501 
502 	memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
503 	if (!memres) {
504 		HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__);
505 		return -EIO;
506 	}
507 
508 	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
509 	if (ret) {
510 		HIF_INFO("ath: 32-bit DMA not available\n");
511 		goto err_cleanup1;
512 	}
513 
514 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
515 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
516 #else
517 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
518 #endif
519 	if (ret) {
520 		HIF_ERROR("%s: failed to set dma mask error = %d",
521 				__func__, ret);
522 		return ret;
523 	}
524 
525 	/* Arrange for access to Target SoC registers. */
526 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
527 	mem = devm_ioremap_resource(&pdev->dev, memres);
528 #else
529 	mem = devm_request_and_ioremap(&pdev->dev, memres);
530 #endif
531 	if (IS_ERR(mem)) {
532 		HIF_INFO("ath: ioremap error\n");
533 		ret = PTR_ERR(mem);
534 		goto err_cleanup1;
535 	}
536 
537 	sc->mem = mem;
538 	ol_sc->mem = mem;
539 	ol_sc->mem_pa = memres->start;
540 
541 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
542 
543 	tgt_info->target_type = target_type;
544 	hif_register_tbl_attach(ol_sc, hif_type);
545 	hif_target_register_tbl_attach(ol_sc, target_type);
546 
547 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
548 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
549 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
550 	    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
551 		if (hif_ahb_enable_radio(sc, pdev, id) != 0) {
552 			HIF_INFO("error in enabling soc\n");
553 			return -EIO;
554 		}
555 
556 		if (hif_target_sync_ahb(ol_sc) < 0) {
557 			ret = -EIO;
558 			goto err_target_sync;
559 		}
560 	}
561 	HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x",
562 			__func__, hif_type, target_type);
563 
564 	return QDF_STATUS_SUCCESS;
565 err_target_sync:
566 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
567 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
568 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
569 	    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
570 		HIF_INFO("Error: Disabling target\n");
571 		hif_ahb_disable_bus(ol_sc);
572 	}
573 err_cleanup1:
574 	return ret;
575 }
576 
577 
578 /**
579  * hif_reset_soc() - reset soc
580  *
581  * @hif_ctx: HIF context
582  *
583  * This function resets soc and helds the
584  * target in reset state
585  *
586  * Return: void
587  */
588 /* Function to reset SoC */
589 void hif_ahb_reset_soc(struct hif_softc *hif_ctx)
590 {
591 	hif_ahb_device_reset(hif_ctx);
592 }
593 
594 
595 /**
596  * hif_nointrs() - disable IRQ
597  *
598  * @scn: struct hif_softc
599  *
600  * This function stops interrupt(s)
601  *
602  * Return: none
603  */
604 void hif_ahb_nointrs(struct hif_softc *scn)
605 {
606 	int i;
607 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
608 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
609 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
610 
611 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
612 
613 	if (scn->request_irq_done == false)
614 		return;
615 
616 	if (sc->num_msi_intrs > 0) {
617 		/* MSI interrupt(s) */
618 		for (i = 0; i < sc->num_msi_intrs; i++) {
619 			free_irq(sc->irq + i, sc);
620 		}
621 		sc->num_msi_intrs = 0;
622 	} else {
623 		if (!scn->per_ce_irq) {
624 			free_irq(sc->irq, sc);
625 		} else {
626 			for (i = 0; i < scn->ce_count; i++) {
627 				if (host_ce_conf[i].flags
628 						& CE_ATTR_DISABLE_INTR)
629 					continue;
630 
631 				free_irq(ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
632 						&hif_state->tasklets[i]);
633 			}
634 			hif_ahb_deconfigure_grp_irq(scn);
635 		}
636 	}
637 	scn->request_irq_done = false;
638 
639 }
640 
641 /**
642  * ce_irq_enable() - enable copy engine IRQ
643  * @scn: struct hif_softc
644  * @ce_id: ce_id
645  *
646  * This function enables the interrupt for the radio.
647  *
648  * Return: N/A
649  */
650 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
651 {
652 	uint32_t regval;
653 	uint32_t reg_offset = 0;
654 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
655 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
656 	struct hif_target_info *tgt_info = &scn->target_info;
657 
658 	if (scn->per_ce_irq) {
659 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
660 			reg_offset = HOST_IE_ADDRESS;
661 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
662 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
663 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
664 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
665 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
666 		}
667 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
668 			reg_offset = HOST_IE_ADDRESS_2;
669 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
670 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
671 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
672 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
673 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
674 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
675 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
676 				/* Enable destination ring interrupts for
677 				 * 8074, 8074V2 and 6018
678 				 */
679 				regval = hif_read32_mb(scn, scn->mem +
680 					HOST_IE_ADDRESS_3);
681 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
682 
683 				hif_write32_mb(scn, scn->mem +
684 					       HOST_IE_ADDRESS_3, regval);
685 			}
686 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
687 		}
688 	} else {
689 		hif_pci_irq_enable(scn, ce_id);
690 	}
691 }
692 
693 /**
694  * ce_irq_disable() - disable copy engine IRQ
695  * @scn: struct hif_softc
696  * @ce_id: ce_id
697  *
698  * Return: N/A
699  */
700 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
701 {
702 	uint32_t regval;
703 	uint32_t reg_offset = 0;
704 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
705 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
706 	struct hif_target_info *tgt_info = &scn->target_info;
707 
708 	if (scn->per_ce_irq) {
709 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
710 			reg_offset = HOST_IE_ADDRESS;
711 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
712 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
713 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
714 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
715 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
716 		}
717 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
718 			reg_offset = HOST_IE_ADDRESS_2;
719 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
720 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
721 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
722 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
723 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
724 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
725 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
726 				/* Disable destination ring interrupts for
727 				 * 8074, 8074V2 and 6018
728 				 */
729 				regval = hif_read32_mb(scn, scn->mem +
730 					HOST_IE_ADDRESS_3);
731 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
732 
733 				hif_write32_mb(scn, scn->mem +
734 					       HOST_IE_ADDRESS_3, regval);
735 			}
736 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
737 		}
738 	}
739 }
740 
741 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
742 {
743 	int i;
744 
745 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
746 	if (hif_ext_group->irq_enabled) {
747 		for (i = 0; i < hif_ext_group->numirq; i++) {
748 			disable_irq_nosync(hif_ext_group->os_irq[i]);
749 		}
750 		hif_ext_group->irq_enabled = false;
751 	}
752 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
753 }
754 
755 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
756 {
757 	int i;
758 
759 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
760 	if (!hif_ext_group->irq_enabled) {
761 		for (i = 0; i < hif_ext_group->numirq; i++) {
762 			enable_irq(hif_ext_group->os_irq[i]);
763 		}
764 		hif_ext_group->irq_enabled = true;
765 	}
766 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
767 }
768 
769 /**
770  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
771  * @scn: hif context
772  *
773  * Return: true if soc needs driver bmi otherwise false
774  */
775 bool hif_ahb_needs_bmi(struct hif_softc *scn)
776 {
777 	return !ce_srng_based(scn);
778 }
779