xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 901120c066e139c7f8a2c8e4820561fdd83c67ef)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: if_ahb.c
22  *
23  * c file for ahb specific implementations.
24  */
25 
26 #include "hif.h"
27 #include "target_type.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_io32.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_tasklet.h"
34 #include "if_ahb.h"
35 #include "if_pci.h"
36 #include "ahb_api.h"
37 #include "pci_api.h"
38 #include "hif_napi.h"
39 #include "qal_vbus_dev.h"
40 #include "qdf_irq.h"
41 
42 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
43 #define IRQF_DISABLED 0x00000020
44 #endif
45 
46 #define HIF_IC_CE0_IRQ_OFFSET 4
47 #define HIF_IC_MAX_IRQ 53
48 
49 static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
50 /* integrated chip irq names */
51 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
52 "misc-pulse1",
53 "misc-latch",
54 "sw-exception",
55 "watchdog",
56 "ce0",
57 "ce1",
58 "ce2",
59 "ce3",
60 "ce4",
61 "ce5",
62 "ce6",
63 "ce7",
64 "ce8",
65 "ce9",
66 "ce10",
67 "ce11",
68 "host2wbm-desc-feed",
69 "host2reo-re-injection",
70 "host2reo-command",
71 "host2rxdma-monitor-ring3",
72 "host2rxdma-monitor-ring2",
73 "host2rxdma-monitor-ring1",
74 "reo2ost-exception",
75 "wbm2host-rx-release",
76 "reo2host-status",
77 "reo2host-destination-ring4",
78 "reo2host-destination-ring3",
79 "reo2host-destination-ring2",
80 "reo2host-destination-ring1",
81 "rxdma2host-monitor-destination-mac3",
82 "rxdma2host-monitor-destination-mac2",
83 "rxdma2host-monitor-destination-mac1",
84 "ppdu-end-interrupts-mac3",
85 "ppdu-end-interrupts-mac2",
86 "ppdu-end-interrupts-mac1",
87 "rxdma2host-monitor-status-ring-mac3",
88 "rxdma2host-monitor-status-ring-mac2",
89 "rxdma2host-monitor-status-ring-mac1",
90 "host2rxdma-host-buf-ring-mac3",
91 "host2rxdma-host-buf-ring-mac2",
92 "host2rxdma-host-buf-ring-mac1",
93 "rxdma2host-destination-ring-mac3",
94 "rxdma2host-destination-ring-mac2",
95 "rxdma2host-destination-ring-mac1",
96 "host2tcl-input-ring4",
97 "host2tcl-input-ring3",
98 "host2tcl-input-ring2",
99 "host2tcl-input-ring1",
100 "wbm2host-tx-completions-ring4",
101 "wbm2host-tx-completions-ring3",
102 "wbm2host-tx-completions-ring2",
103 "wbm2host-tx-completions-ring1",
104 "tcl2host-status-ring",
105 };
106 
107 /** hif_ahb_get_irq_name() - get irqname
108  * This function gives irqnumber to irqname
109  * mapping.
110  *
111  * @irq_no: irq number
112  *
113  * Return: irq name
114  */
115 const char *hif_ahb_get_irq_name(int irq_no)
116 {
117 	return ic_irqname[irq_no];
118 }
119 
120 /**
121  * hif_disable_isr() - disable isr
122  *
123  * This function disables isr and kills tasklets
124  *
125  * @hif_ctx: struct hif_softc
126  *
127  * Return: void
128  */
129 void hif_ahb_disable_isr(struct hif_softc *scn)
130 {
131 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
132 	hif_exec_kill(&scn->osc);
133 	hif_nointrs(scn);
134 	ce_tasklet_kill(scn);
135 	tasklet_kill(&sc->intr_tq);
136 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
137 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
138 }
139 
140 /**
141  * hif_dump_registers() - dump bus debug registers
142  * @scn: struct hif_opaque_softc
143  *
144  * This function dumps hif bus debug registers
145  *
146  * Return: 0 for success or error code
147  */
148 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
149 {
150 	int status;
151 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
152 
153 	status = hif_dump_ce_registers(scn);
154 	if (status)
155 		hif_err("Dump CE Registers Failed status %d", status);
156 
157 	return 0;
158 }
159 
160 /**
161  * hif_ahb_close() - hif_bus_close
162  * @scn: pointer to the hif context.
163  *
164  * This is a callback function for hif_bus_close.
165  *
166  *
167  * Return: n/a
168  */
169 void hif_ahb_close(struct hif_softc *scn)
170 {
171 	hif_ce_close(scn);
172 }
173 
174 /**
175  * hif_bus_open() - hif_ahb open
176  * @hif_ctx: hif context
177  * @bus_type: bus type
178  *
179  * This is a callback function for hif_bus_open.
180  *
181  * Return: n/a
182  */
183 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
184 {
185 
186 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
187 
188 	qdf_spinlock_create(&sc->irq_lock);
189 	return hif_ce_open(hif_ctx);
190 }
191 
192 /**
193  * hif_bus_configure() - Configure the bus
194  * @scn: pointer to the hif context.
195  *
196  * This function configure the ahb bus
197  *
198  * return: 0 for success. nonzero for failure.
199  */
200 int hif_ahb_bus_configure(struct hif_softc *scn)
201 {
202 	return hif_pci_bus_configure(scn);
203 }
204 
205 static void hif_ahb_get_soc_info_pld(struct hif_pci_softc *sc,
206 				     struct device *dev)
207 {
208 	struct pld_soc_info info;
209 	int ret = 0;
210 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
211 
212 	ret = pld_get_soc_info(dev, &info);
213 	sc->mem = info.v_addr;
214 	sc->ce_sc.ol_sc.mem    = info.v_addr;
215 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
216 	/* dev_mem_info[0] is for CMEM */
217 	scn->cmem_start = info.dev_mem_info[0].start;
218 	scn->cmem_size = info.dev_mem_info[0].size;
219 }
220 
221 int hif_ahb_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
222 {
223 	int ret = 0;
224 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
225 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
226 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
227 	int irq = 0;
228 
229 	if (ce_id >= CE_COUNT_MAX)
230 		return -EINVAL;
231 
232 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
233 			   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
234 			   HIF_IC_CE0_IRQ_OFFSET + ce_id, &irq);
235 	if (ret) {
236 		dev_err(&pdev->dev, "get irq failed\n");
237 		ret = -EFAULT;
238 		goto end;
239 	}
240 
241 	ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + ce_id] = irq;
242 	ret = pfrm_request_irq(&pdev->dev, irq,
243 			       hif_ahb_interrupt_handler,
244 			       IRQF_TRIGGER_RISING,
245 			       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
246 			       &hif_state->tasklets[ce_id]);
247 	if (ret) {
248 		dev_err(&pdev->dev, "ath_request_irq failed\n");
249 		ret = -EFAULT;
250 		goto end;
251 	}
252 	hif_ahb_irq_enable(scn, ce_id);
253 
254 end:
255 	return ret;
256 }
257 
258 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
259 {
260 	int ret = 0;
261 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
262 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
263 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
264 	int i;
265 
266 	/* configure per CE interrupts */
267 	for (i = 0; i < scn->ce_count; i++) {
268 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
269 			continue;
270 
271 		if (host_ce_conf[i].flags & CE_ATTR_INIT_ON_DEMAND)
272 			continue;
273 
274 		ret = hif_ahb_configure_irq_by_ceid(scn, i);
275 		if (ret)
276 			goto end;
277 	}
278 
279 end:
280 	return ret;
281 }
282 
283 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
284 			      struct hif_exec_context *hif_ext_group)
285 {
286 	int ret = 0;
287 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
288 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
289 	int irq = 0;
290 	int j;
291 
292 	/* configure external interrupts */
293 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
294 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
295 	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
296 	hif_ext_group->work_complete = &hif_dummy_grp_done;
297 
298 	for (j = 0; j < hif_ext_group->numirq; j++) {
299 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
300 				   ic_irqname[hif_ext_group->irq[j]],
301 				   hif_ext_group->irq[j], &irq);
302 		if (ret) {
303 			dev_err(&pdev->dev, "get irq failed\n");
304 			ret = -EFAULT;
305 			goto end;
306 		}
307 		ic_irqnum[hif_ext_group->irq[j]] = irq;
308 		hif_ext_group->os_irq[j] = irq;
309 	}
310 
311 	for (j = 0; j < hif_ext_group->numirq; j++) {
312 		irq = hif_ext_group->os_irq[j];
313 
314 		qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
315 		qdf_dev_set_irq_status_flags(irq, QDF_IRQ_DISABLE_UNLAZY);
316 		qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
317 
318 		ret = pfrm_request_irq(scn->qdf_dev->dev,
319 				       irq, hif_ext_group_interrupt_handler,
320 				       IRQF_TRIGGER_RISING | IRQF_SHARED,
321 				       ic_irqname[hif_ext_group->irq[j]],
322 				       hif_ext_group);
323 		if (ret) {
324 			dev_err(&pdev->dev, "ath_request_irq failed\n");
325 			ret = -EFAULT;
326 			goto end;
327 		}
328 	}
329 
330 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
331 	hif_ext_group->irq_requested = true;
332 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
333 end:
334 	return ret;
335 }
336 
337 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
338 {
339 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
340 	struct hif_exec_context *hif_ext_group;
341 	int i, j;
342 	int irq = 0;
343 
344 	/* configure external interrupts */
345 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
346 		hif_ext_group = hif_state->hif_ext_group[i];
347 		if (hif_ext_group->irq_requested == true) {
348 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
349 			hif_ext_group->irq_requested = false;
350 			for (j = 0; j < hif_ext_group->numirq; j++) {
351 				irq = hif_ext_group->os_irq[j];
352 				hif_ext_group->irq_enabled = false;
353 				qdf_dev_clear_irq_status_flags(
354 							irq,
355 							QDF_IRQ_DISABLE_UNLAZY);
356 			}
357 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
358 
359 			/* Avoid holding the irq_lock while freeing the irq
360 			 * as the same lock is being held by the irq handler
361 			 * while disabling the irq. This causes a deadlock
362 			 * between free_irq and irq_handler.
363 			 */
364 			for (j = 0; j < hif_ext_group->numirq; j++) {
365 				irq = hif_ext_group->os_irq[j];
366 				pfrm_free_irq(scn->qdf_dev->dev,
367 					      irq, hif_ext_group);
368 			}
369 		}
370 	}
371 }
372 
373 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
374 {
375 	struct ce_tasklet_entry *tasklet_entry = context;
376 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
377 }
378 
379 /**
380  * hif_disable_bus() - Disable the bus
381  * @scn : pointer to the hif context
382  *
383  * This function disables the bus and helds the target in reset state
384  *
385  * Return: none
386  */
387 void hif_ahb_disable_bus(struct hif_softc *scn)
388 {
389 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
390 	void __iomem *mem;
391 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
392 	struct resource *memres = NULL;
393 	int mem_pa_size = 0;
394 	struct hif_target_info *tgt_info = NULL;
395 	struct qdf_vbus_resource *vmres = NULL;
396 	QDF_STATUS status;
397 
398 	tgt_info = &scn->target_info;
399 	/*Disable WIFI clock input*/
400 	if (sc->mem) {
401 		status = pfrm_platform_get_resource(
402 				scn->qdf_dev->dev,
403 				(struct qdf_pfm_hndl *)pdev, &vmres,
404 				IORESOURCE_MEM, 0);
405 		if (QDF_IS_STATUS_ERROR(status)) {
406 			hif_info("Failed to get IORESOURCE_MEM");
407 			return;
408 		}
409 		memres = (struct resource *)vmres;
410 		if (memres)
411 			mem_pa_size = memres->end - memres->start + 1;
412 
413 		if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
414 		    tgt_info->target_type == TARGET_TYPE_QCA5332) {
415 			iounmap(sc->mem_ce);
416 			sc->mem_ce = NULL;
417 			scn->mem_ce = NULL;
418 		}
419 		if (sc->mem_cmem) {
420 			iounmap(sc->mem_cmem);
421 			sc->mem_cmem = NULL;
422 		}
423 		mem = (void __iomem *)sc->mem;
424 		if (mem) {
425 			pfrm_devm_iounmap(&pdev->dev, mem);
426 			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
427 						     mem_pa_size);
428 			sc->mem = NULL;
429 			pld_set_bar_addr(&pdev->dev, NULL);
430 		}
431 	}
432 	scn->mem = NULL;
433 }
434 
435 /**
436  * hif_enable_bus() - Enable the bus
437  * @dev: dev
438  * @bdev: bus dev
439  * @bid: bus id
440  * @type: bus type
441  *
442  * This function enables the radio bus by enabling necessary
443  * clocks and waits for the target to get ready to proceed further
444  *
445  * Return: QDF_STATUS
446  */
447 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
448 		struct device *dev, void *bdev,
449 		const struct hif_bus_id *bid,
450 		enum hif_enable_type type)
451 {
452 	int ret = 0;
453 	int hif_type;
454 	int target_type;
455 	const struct platform_device_id *id = (struct platform_device_id *)bid;
456 	struct platform_device *pdev = bdev;
457 	struct hif_target_info *tgt_info = NULL;
458 	struct resource *memres = NULL;
459 	void __iomem *mem = NULL;
460 	uint32_t revision_id = 0;
461 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
462 	QDF_STATUS status;
463 	struct qdf_vbus_resource *vmres = NULL;
464 
465 	sc->pdev = (struct pci_dev *)pdev;
466 	sc->dev = &pdev->dev;
467 	sc->devid = id->driver_data;
468 
469 	ret = hif_get_device_type(id->driver_data, revision_id,
470 			&hif_type, &target_type);
471 	if (ret < 0) {
472 		hif_err("Invalid device ret %d id %d revision_id %d",
473 			ret, (int)id->driver_data, revision_id);
474 		return QDF_STATUS_E_FAILURE;
475 	}
476 
477 	if (target_type == TARGET_TYPE_QCN6122 ||
478 	    target_type == TARGET_TYPE_QCN9160) {
479 		hif_ahb_get_soc_info_pld(sc, dev);
480 	}
481 
482 	/* 11BE SoC chipsets Need to call this function to get cmem addr */
483 	if (target_type == TARGET_TYPE_QCA5332)
484 		hif_ahb_get_soc_info_pld(sc, dev);
485 
486 	if (target_type == TARGET_TYPE_QCN6122 ||
487 	    target_type == TARGET_TYPE_QCN9160) {
488 		hif_update_irq_ops_with_pci(ol_sc);
489 	} else {
490 		status = pfrm_platform_get_resource(&pdev->dev,
491 						    (struct qdf_pfm_hndl *)pdev,
492 						    &vmres,
493 						    IORESOURCE_MEM, 0);
494 		if (QDF_IS_STATUS_ERROR(status)) {
495 			hif_err("Failed to get IORESOURCE_MEM");
496 			return status;
497 		}
498 		memres = (struct resource *)vmres;
499 		if (!memres) {
500 			hif_err("Failed to get IORESOURCE_MEM");
501 			return QDF_STATUS_E_IO;
502 		}
503 
504 		/* Arrange for access to Target SoC registers. */
505 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
506 		status = pfrm_devm_ioremap_resource(
507 					dev,
508 					(struct qdf_vbus_resource *)memres,
509 					&mem);
510 #else
511 		status = pfrm_devm_request_and_ioremap(
512 					dev,
513 					(struct qdf_vbus_resource *)memres,
514 					&mem);
515 #endif
516 		if (QDF_IS_STATUS_ERROR(status)) {
517 			hif_err("ath: ioremap error");
518 			ret = PTR_ERR(mem);
519 			goto err_cleanup1;
520 		}
521 
522 		sc->mem = mem;
523 		pld_set_bar_addr(dev, mem);
524 		ol_sc->mem = mem;
525 		ol_sc->mem_pa = memres->start;
526 	}
527 
528 	ret = pfrm_dma_set_mask(dev, 32);
529 	if (ret) {
530 		hif_err("ath: 32-bit DMA not available");
531 		status = QDF_STATUS_E_IO;
532 		goto err_cleanup1;
533 	}
534 
535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
536 	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
537 #else
538 	ret = pfrm_dma_set_coherent_mask(dev, 32);
539 #endif
540 	if (ret) {
541 		hif_err("Failed to set dma mask error = %d", ret);
542 		return QDF_STATUS_E_IO;
543 	}
544 
545 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
546 
547 	tgt_info->target_type = target_type;
548 	hif_register_tbl_attach(ol_sc, hif_type);
549 	hif_target_register_tbl_attach(ol_sc, target_type);
550 	/*
551 	 * In QCA5018 CE region moved to SOC outside WCSS block.
552 	 * Allocate separate I/O remap to access CE registers.
553 	 */
554 	if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
555 	    tgt_info->target_type == TARGET_TYPE_QCA5332) {
556 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
557 
558 		sc->mem_ce = ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE);
559 		if (IS_ERR(sc->mem_ce)) {
560 			hif_err("CE: ioremap failed");
561 			return QDF_STATUS_E_IO;
562 		}
563 		ol_sc->mem_ce = sc->mem_ce;
564 	}
565 
566 	/*
567 	 * In QCA5332 CMEM region is outside WCSS block.
568 	 * Allocate separate I/O remap to access CMEM address.
569 	 */
570 	if (tgt_info->target_type == TARGET_TYPE_QCA5332) {
571 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
572 
573 		sc->mem_cmem = ioremap_nocache(HOST_CMEM_ADDRESS,
574 					       HOST_CMEM_SIZE);
575 		if (IS_ERR(sc->mem_cmem)) {
576 			hif_err("CE: ioremap failed");
577 			return QDF_STATUS_E_IO;
578 		}
579 		ol_sc->mem_cmem = sc->mem_cmem;
580 	}
581 
582 	hif_info("X - hif_type = 0x%x, target_type = 0x%x",
583 		hif_type, target_type);
584 
585 	return QDF_STATUS_SUCCESS;
586 err_cleanup1:
587 	return status;
588 }
589 
590 /**
591  * hif_nointrs() - disable IRQ
592  *
593  * @scn: struct hif_softc
594  *
595  * This function stops interrupt(s)
596  *
597  * Return: none
598  */
599 void hif_ahb_nointrs(struct hif_softc *scn)
600 {
601 	int i;
602 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
603 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
604 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
605 
606 	scn->free_irq_done = true;
607 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
608 
609 	if (scn->request_irq_done == false)
610 		return;
611 
612 	if (sc->num_msi_intrs > 0) {
613 		/* MSI interrupt(s) */
614 		for (i = 0; i < sc->num_msi_intrs; i++) {
615 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
616 		}
617 		sc->num_msi_intrs = 0;
618 	} else {
619 		if (!scn->per_ce_irq) {
620 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
621 		} else {
622 			for (i = 0; i < scn->ce_count; i++) {
623 				if (host_ce_conf[i].flags
624 						& CE_ATTR_DISABLE_INTR)
625 					continue;
626 				if (!hif_state->tasklets[i].inited)
627 					continue;
628 				pfrm_free_irq(
629 					scn->qdf_dev->dev,
630 					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
631 					&hif_state->tasklets[i]);
632 			}
633 			hif_ahb_deconfigure_grp_irq(scn);
634 		}
635 	}
636 	scn->request_irq_done = false;
637 
638 }
639 
640 /**
641  * ce_irq_enable() - enable copy engine IRQ
642  * @scn: struct hif_softc
643  * @ce_id: ce_id
644  *
645  * This function enables the interrupt for the radio.
646  *
647  * Return: N/A
648  */
649 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
650 {
651 	uint32_t regval;
652 	uint32_t reg_offset = 0;
653 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
654 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
655 	struct hif_target_info *tgt_info = &scn->target_info;
656 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
657 
658 	if (scn->per_ce_irq) {
659 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
660 			reg_offset = HOST_IE_ADDRESS;
661 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
662 			regval = hif_read32_mb(scn, mem + reg_offset);
663 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
664 			hif_write32_mb(scn, mem + reg_offset, regval);
665 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
666 		}
667 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
668 			reg_offset = HOST_IE_ADDRESS_2;
669 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
670 			regval = hif_read32_mb(scn, mem + reg_offset);
671 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
672 			hif_write32_mb(scn, mem + reg_offset, regval);
673 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
674 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
675 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
676 			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
677 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
678 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
679 				/* Enable destination ring interrupts for
680 				 * 8074, 8074V2, 6018 and 50xx
681 				 */
682 				regval = hif_read32_mb(scn, mem +
683 					HOST_IE_ADDRESS_3);
684 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
685 
686 				hif_write32_mb(scn, mem +
687 					       HOST_IE_ADDRESS_3, regval);
688 			}
689 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
690 		}
691 	} else {
692 		hif_pci_irq_enable(scn, ce_id);
693 	}
694 }
695 
696 /**
697  * ce_irq_disable() - disable copy engine IRQ
698  * @scn: struct hif_softc
699  * @ce_id: ce_id
700  *
701  * Return: N/A
702  */
703 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
704 {
705 	uint32_t regval;
706 	uint32_t reg_offset = 0;
707 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
708 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
709 	struct hif_target_info *tgt_info = &scn->target_info;
710 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
711 
712 	if (scn->per_ce_irq) {
713 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
714 			reg_offset = HOST_IE_ADDRESS;
715 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
716 			regval = hif_read32_mb(scn, mem + reg_offset);
717 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
718 			hif_write32_mb(scn, mem + reg_offset, regval);
719 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
720 		}
721 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
722 			reg_offset = HOST_IE_ADDRESS_2;
723 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
724 			regval = hif_read32_mb(scn, mem + reg_offset);
725 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
726 			hif_write32_mb(scn, mem + reg_offset, regval);
727 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
728 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
729 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
730 			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
731 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
732 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
733 				/* Disable destination ring interrupts for
734 				 * 8074, 8074V2, 6018 and 50xx
735 				 */
736 				regval = hif_read32_mb(scn, mem +
737 					HOST_IE_ADDRESS_3);
738 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
739 
740 				hif_write32_mb(scn, mem +
741 					       HOST_IE_ADDRESS_3, regval);
742 			}
743 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
744 		}
745 	}
746 }
747 
748 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
749 {
750 	int i;
751 
752 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
753 	if (hif_ext_group->irq_enabled) {
754 		for (i = 0; i < hif_ext_group->numirq; i++) {
755 			disable_irq_nosync(hif_ext_group->os_irq[i]);
756 		}
757 		hif_ext_group->irq_enabled = false;
758 	}
759 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
760 }
761 
762 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
763 {
764 	int i;
765 
766 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
767 	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
768 		for (i = 0; i < hif_ext_group->numirq; i++) {
769 			enable_irq(hif_ext_group->os_irq[i]);
770 		}
771 		hif_ext_group->irq_enabled = true;
772 	}
773 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
774 }
775 
776 /**
777  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
778  * @scn: hif context
779  *
780  * Return: true if soc needs driver bmi otherwise false
781  */
782 bool hif_ahb_needs_bmi(struct hif_softc *scn)
783 {
784 	return !ce_srng_based(scn);
785 }
786 
787 /**
788  * hif_display_ahb_irq_regs() - prints the host interrupt enable (IE) regs
789  * @scn: hif context
790  *
791  * Return: None
792  */
793 
794 void hif_display_ahb_irq_regs(struct hif_softc *scn)
795 {
796 	uint32_t regval;
797 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
798 	struct hif_target_info *tgt_info = &scn->target_info;
799 
800 	if (tgt_info->target_type == TARGET_TYPE_QCN6122 ||
801 	    tgt_info->target_type == TARGET_TYPE_QCN9160) {
802 		return;
803 	}
804 	if (scn->per_ce_irq) {
805 		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS);
806 		hif_nofl_err("IRQ enable register value 0x%08x", regval);
807 
808 		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS_2);
809 		hif_nofl_err("IRQ enable register 2 value 0x%08x", regval);
810 
811 		if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
812 		    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
813 		    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
814 		    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
815 		    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
816 		    tgt_info->target_type == TARGET_TYPE_QCA6018) {
817 			regval = hif_read32_mb(scn, mem +
818 					       HOST_IE_ADDRESS_3);
819 			hif_nofl_err("IRQ enable register 3 value 0x%08x",
820 				     regval);
821 		}
822 	}
823 }
824 
825 void hif_ahb_display_stats(struct hif_softc *scn)
826 {
827 	if (!scn) {
828 		hif_err("hif_scn null");
829 		return;
830 	}
831 	hif_display_ahb_irq_regs(scn);
832 	hif_display_ce_stats(scn);
833 }
834 
835 void hif_ahb_clear_stats(struct hif_softc *scn)
836 {
837 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
838 
839 	if (!hif_state) {
840 		hif_err("hif_state null");
841 		return;
842 	}
843 	hif_clear_ce_stats(hif_state);
844 }
845