xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 1b9674e21e24478fba4530f5ae7396b9555e9c6a)
1 /*
2  * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: if_ahb.c
21  *
22  * c file for ahb specific implementations.
23  */
24 
25 #include "hif.h"
26 #include "target_type.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_io32.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_tasklet.h"
33 #include "if_ahb.h"
34 #include "if_pci.h"
35 #include "ahb_api.h"
36 #include "pci_api.h"
37 #include "hif_napi.h"
38 
39 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
40 #define IRQF_DISABLED 0x00000020
41 #endif
42 
43 #define HIF_IC_CE0_IRQ_OFFSET 4
44 #define HIF_IC_MAX_IRQ 54
45 
46 static uint8_t ic_irqnum[HIF_IC_MAX_IRQ];
47 /* integrated chip irq names */
48 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
49 "misc-pulse1",
50 "misc-latch",
51 "sw-exception",
52 "watchdog",
53 "ce0",
54 "ce1",
55 "ce2",
56 "ce3",
57 "ce4",
58 "ce5",
59 "ce6",
60 "ce7",
61 "ce8",
62 "ce9",
63 "ce10",
64 "ce11",
65 "ce12",
66 "ce13",
67 "host2wbm-desc-feed",
68 "host2reo-re-injection",
69 "host2reo-command",
70 "host2rxdma-monitor-ring3",
71 "host2rxdma-monitor-ring2",
72 "host2rxdma-monitor-ring1",
73 "reo2ost-exception",
74 "wbm2host-rx-release",
75 "reo2host-status",
76 "reo2host-destination-ring4",
77 "reo2host-destination-ring3",
78 "reo2host-destination-ring2",
79 "reo2host-destination-ring1",
80 "rxdma2host-monitor-destination-mac3",
81 "rxdma2host-monitor-destination-mac2",
82 "rxdma2host-monitor-destination-mac1",
83 "ppdu-end-interrupts-mac3",
84 "ppdu-end-interrupts-mac2",
85 "ppdu-end-interrupts-mac1",
86 "rxdma2host-monitor-status-ring-mac3",
87 "rxdma2host-monitor-status-ring-mac2",
88 "rxdma2host-monitor-status-ring-mac1",
89 "host2rxdma-host-buf-ring-mac3",
90 "host2rxdma-host-buf-ring-mac2",
91 "host2rxdma-host-buf-ring-mac1",
92 "rxdma2host-destination-ring-mac3",
93 "rxdma2host-destination-ring-mac2",
94 "rxdma2host-destination-ring-mac1",
95 "host2tcl-input-ring4",
96 "host2tcl-input-ring3",
97 "host2tcl-input-ring2",
98 "host2tcl-input-ring1",
99 "wbm2host-tx-completions-ring3",
100 "wbm2host-tx-completions-ring2",
101 "wbm2host-tx-completions-ring1",
102 "tcl2host-status-ring",
103 };
104 
105 /**
106  * hif_disable_isr() - disable isr
107  *
108  * This function disables isr and kills tasklets
109  *
110  * @hif_ctx: struct hif_softc
111  *
112  * Return: void
113  */
114 void hif_ahb_disable_isr(struct hif_softc *scn)
115 {
116 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
117 
118 	hif_exec_kill(&scn->osc);
119 	hif_nointrs(scn);
120 	ce_tasklet_kill(scn);
121 	tasklet_kill(&sc->intr_tq);
122 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
123 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
124 }
125 
126 /**
127  * hif_dump_registers() - dump bus debug registers
128  * @scn: struct hif_opaque_softc
129  *
130  * This function dumps hif bus debug registers
131  *
132  * Return: 0 for success or error code
133  */
134 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
135 {
136 	int status;
137 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
138 
139 	status = hif_dump_ce_registers(scn);
140 	if (status)
141 		HIF_ERROR("%s: Dump CE Registers Failed status %d", __func__,
142 							status);
143 
144 	return 0;
145 }
146 
147 /**
148  * hif_ahb_close() - hif_bus_close
149  * @scn: pointer to the hif context.
150  *
151  * This is a callback function for hif_bus_close.
152  *
153  *
154  * Return: n/a
155  */
156 void hif_ahb_close(struct hif_softc *scn)
157 {
158 	hif_ce_close(scn);
159 }
160 
161 /**
162  * hif_bus_open() - hif_ahb open
163  * @hif_ctx: hif context
164  * @bus_type: bus type
165  *
166  * This is a callback function for hif_bus_open.
167  *
168  * Return: n/a
169  */
170 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
171 {
172 
173 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
174 
175 	qdf_spinlock_create(&sc->irq_lock);
176 	return hif_ce_open(hif_ctx);
177 }
178 
179 /**
180  * hif_bus_configure() - Configure the bus
181  * @scn: pointer to the hif context.
182  *
183  * This function configure the ahb bus
184  *
185  * return: 0 for success. nonzero for failure.
186  */
187 int hif_ahb_bus_configure(struct hif_softc *scn)
188 {
189 	return hif_pci_bus_configure(scn);
190 }
191 
192 /**
193  * hif_configure_msi_ahb - Configure MSI interrupts
194  * @sc : pointer to the hif context
195  *
196  * return: 0 for success. nonzero for failure.
197  */
198 
199 int hif_configure_msi_ahb(struct hif_pci_softc *sc)
200 {
201 	return 0;
202 }
203 
204 /**
205  * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ
206  * @sc: pointer to the hif context.
207  *
208  * This function registers the irq handler and enables legacy interrupts
209  *
210  * return: 0 for success. nonzero for failure.
211  */
212 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
213 {
214 	int ret = 0;
215 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
216 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
217 	int irq = 0;
218 
219 	/* do not support MSI or MSI IRQ failed */
220 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
221 	irq = platform_get_irq_byname(pdev, "legacy");
222 	if (irq < 0) {
223 		dev_err(&pdev->dev, "Unable to get irq\n");
224 		ret = -1;
225 		goto end;
226 	}
227 	ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler,
228 				IRQF_DISABLED, "wlan_ahb", sc);
229 	if (ret) {
230 		dev_err(&pdev->dev, "ath_request_irq failed\n");
231 		ret = -1;
232 		goto end;
233 	}
234 	sc->irq = irq;
235 
236 	/* Use Legacy PCI Interrupts */
237 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
238 				PCIE_INTR_ENABLE_ADDRESS),
239 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
240 	/* read once to flush */
241 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
242 				PCIE_INTR_ENABLE_ADDRESS));
243 
244 end:
245 	return ret;
246 }
247 
248 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
249 {
250 	int ret = 0;
251 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
252 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
253 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
254 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
255 	int irq = 0;
256 	int i;
257 
258 	/* configure per CE interrupts */
259 	for (i = 0; i < scn->ce_count; i++) {
260 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
261 			continue;
262 		irq = platform_get_irq_byname(pdev, ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i]);
263 		ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i] = irq;
264 		ret = request_irq(irq ,
265 				hif_ahb_interrupt_handler,
266 				IRQF_TRIGGER_RISING, ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i],
267 				&hif_state->tasklets[i]);
268 		if (ret) {
269 			dev_err(&pdev->dev, "ath_request_irq failed\n");
270 			ret = -1;
271 			goto end;
272 		}
273 		hif_ahb_irq_enable(scn, i);
274 	}
275 
276 end:
277 	return ret;
278 }
279 
280 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
281 			      struct hif_exec_context *hif_ext_group)
282 {
283 	int ret = 0;
284 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
285 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
286 	int irq = 0;
287 	const char *irq_name;
288 	int j;
289 
290 	/* configure external interrupts */
291 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
292 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
293 	hif_ext_group->work_complete = &hif_dummy_grp_done;
294 
295 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
296 
297 	for (j = 0; j < hif_ext_group->numirq; j++) {
298 		irq_name = ic_irqname[hif_ext_group->irq[j]];
299 		irq = platform_get_irq_byname(pdev, irq_name);
300 
301 		ic_irqnum[hif_ext_group->irq[j]] = irq;
302 		irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
303 		ret = request_irq(irq, hif_ext_group_interrupt_handler,
304 				  IRQF_TRIGGER_RISING,
305 				  ic_irqname[hif_ext_group->irq[j]],
306 				  hif_ext_group);
307 		if (ret) {
308 			dev_err(&pdev->dev,
309 				"ath_request_irq failed\n");
310 			ret = -1;
311 			goto end;
312 		}
313 		hif_ext_group->os_irq[j] = irq;
314 	}
315 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
316 
317 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
318 	hif_ext_group->irq_requested = true;
319 
320 end:
321 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
322 	return ret;
323 }
324 
325 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
326 {
327 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
328 	struct hif_exec_context *hif_ext_group;
329 	int i, j;
330 	int irq = 0;
331 
332 	/* configure external interrupts */
333 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
334 		hif_ext_group = hif_state->hif_ext_group[i];
335 		if (hif_ext_group->irq_requested == true) {
336 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
337 			hif_ext_group->irq_requested = false;
338 			for (j = 0; j < hif_ext_group->numirq; j++) {
339 				irq = hif_ext_group->os_irq[j];
340 				irq_clear_status_flags(irq,
341 						       IRQ_DISABLE_UNLAZY);
342 				free_irq(irq, hif_ext_group);
343 			}
344 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
345 		}
346 	}
347 }
348 
349 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
350 {
351 	struct ce_tasklet_entry *tasklet_entry = context;
352 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
353 }
354 
355 /**
356  * hif_target_sync() : ensure the target is ready
357  * @scn: hif control structure
358  *
359  * Informs fw that we plan to use legacy interupts so that
360  * it can begin booting. Ensures that the fw finishes booting
361  * before continuing. Should be called before trying to write
362  * to the targets other registers for the first time.
363  *
364  * Return: none
365  */
366 int hif_target_sync_ahb(struct hif_softc *scn)
367 {
368 	int val = 0;
369 	int limit = 0;
370 
371 	while (limit < 50) {
372 		hif_write32_mb(scn, scn->mem +
373 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
374 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
375 		qdf_mdelay(10);
376 		val = hif_read32_mb(scn, scn->mem +
377 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
378 		if (val == 0)
379 			break;
380 		limit++;
381 	}
382 	hif_write32_mb(scn, scn->mem +
383 		(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
384 		PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
385 	hif_write32_mb(scn, scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
386 	if (HAS_FW_INDICATOR) {
387 		int wait_limit = 500;
388 		int fw_ind = 0;
389 
390 		while (1) {
391 			fw_ind = hif_read32_mb(scn, scn->mem +
392 					FW_INDICATOR_ADDRESS);
393 			if (fw_ind & FW_IND_INITIALIZED)
394 				break;
395 			if (wait_limit-- < 0)
396 				break;
397 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
398 				PCIE_INTR_ENABLE_ADDRESS),
399 				PCIE_INTR_FIRMWARE_MASK);
400 			qdf_mdelay(10);
401 		}
402 		if (wait_limit < 0) {
403 			HIF_TRACE("%s: FW signal timed out", __func__);
404 			return -EIO;
405 		}
406 		HIF_TRACE("%s: Got FW signal, retries = %x", __func__,
407 							500-wait_limit);
408 	}
409 
410 	return 0;
411 }
412 
413 /**
414  * hif_disable_bus() - Disable the bus
415  * @scn : pointer to the hif context
416  *
417  * This function disables the bus and helds the target in reset state
418  *
419  * Return: none
420  */
421 void hif_ahb_disable_bus(struct hif_softc *scn)
422 {
423 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
424 	void __iomem *mem;
425 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
426 	struct resource *memres = NULL;
427 	int mem_pa_size = 0;
428 	struct hif_target_info *tgt_info = NULL;
429 
430 	tgt_info = &scn->target_info;
431 	/*Disable WIFI clock input*/
432 	if (sc->mem) {
433 		memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
434 		if (!memres) {
435 			HIF_INFO("%s: Failed to get IORESOURCE_MEM\n",
436 								__func__);
437 			return;
438 		}
439 		mem_pa_size = memres->end - memres->start + 1;
440 
441 		/* Should not be executed on 8074 platform */
442 		if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
443 		    (tgt_info->target_type != TARGET_TYPE_QCA8074V2)) {
444 			hif_ahb_clk_enable_disable(&pdev->dev, 0);
445 
446 			hif_ahb_device_reset(scn);
447 		}
448 		mem = (void __iomem *)sc->mem;
449 		if (mem) {
450 			devm_iounmap(&pdev->dev, mem);
451 			devm_release_mem_region(&pdev->dev, scn->mem_pa,
452 								mem_pa_size);
453 			sc->mem = NULL;
454 		}
455 	}
456 	scn->mem = NULL;
457 }
458 
459 /**
460  * hif_enable_bus() - Enable the bus
461  * @dev: dev
462  * @bdev: bus dev
463  * @bid: bus id
464  * @type: bus type
465  *
466  * This function enables the radio bus by enabling necessary
467  * clocks and waits for the target to get ready to proceed futher
468  *
469  * Return: QDF_STATUS
470  */
471 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
472 		struct device *dev, void *bdev,
473 		const struct hif_bus_id *bid,
474 		enum hif_enable_type type)
475 {
476 	int ret = 0;
477 	int hif_type;
478 	int target_type;
479 	const struct platform_device_id *id = (struct platform_device_id *)bid;
480 	struct platform_device *pdev = bdev;
481 	struct hif_target_info *tgt_info = NULL;
482 	struct resource *memres = NULL;
483 	void __iomem *mem = NULL;
484 	uint32_t revision_id = 0;
485 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
486 
487 	sc->pdev = (struct pci_dev *)pdev;
488 	sc->dev = &pdev->dev;
489 	sc->devid = id->driver_data;
490 
491 	ret = hif_get_device_type(id->driver_data, revision_id,
492 			&hif_type, &target_type);
493 	if (ret < 0) {
494 		HIF_ERROR("%s: invalid device  ret %d id %d revision_id %d",
495 			__func__, ret, (int)id->driver_data, revision_id);
496 		return QDF_STATUS_E_FAILURE;
497 	}
498 
499 	memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
500 	if (!memres) {
501 		HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__);
502 		return -EIO;
503 	}
504 
505 	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
506 	if (ret) {
507 		HIF_INFO("ath: 32-bit DMA not available\n");
508 		goto err_cleanup1;
509 	}
510 
511 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
512 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
513 #else
514 	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
515 #endif
516 	if (ret) {
517 		HIF_ERROR("%s: failed to set dma mask error = %d",
518 				__func__, ret);
519 		return ret;
520 	}
521 
522 	/* Arrange for access to Target SoC registers. */
523 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
524 	mem = devm_ioremap_resource(&pdev->dev, memres);
525 #else
526 	mem = devm_request_and_ioremap(&pdev->dev, memres);
527 #endif
528 	if (IS_ERR(mem)) {
529 		HIF_INFO("ath: ioremap error\n");
530 		ret = PTR_ERR(mem);
531 		goto err_cleanup1;
532 	}
533 
534 	sc->mem = mem;
535 	ol_sc->mem = mem;
536 	ol_sc->mem_pa = memres->start;
537 
538 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
539 
540 	tgt_info->target_type = target_type;
541 	hif_register_tbl_attach(ol_sc, hif_type);
542 	hif_target_register_tbl_attach(ol_sc, target_type);
543 
544 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
545 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
546 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2)) {
547 		if (hif_ahb_enable_radio(sc, pdev, id) != 0) {
548 			HIF_INFO("error in enabling soc\n");
549 			return -EIO;
550 		}
551 
552 		if (hif_target_sync_ahb(ol_sc) < 0) {
553 			ret = -EIO;
554 			goto err_target_sync;
555 		}
556 	}
557 	HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x",
558 			__func__, hif_type, target_type);
559 
560 	return QDF_STATUS_SUCCESS;
561 err_target_sync:
562 	/* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
563 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
564 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2)) {
565 		HIF_INFO("Error: Disabling target\n");
566 		hif_ahb_disable_bus(ol_sc);
567 	}
568 err_cleanup1:
569 	return ret;
570 }
571 
572 
573 /**
574  * hif_reset_soc() - reset soc
575  *
576  * @hif_ctx: HIF context
577  *
578  * This function resets soc and helds the
579  * target in reset state
580  *
581  * Return: void
582  */
583 /* Function to reset SoC */
584 void hif_ahb_reset_soc(struct hif_softc *hif_ctx)
585 {
586 	hif_ahb_device_reset(hif_ctx);
587 }
588 
589 
590 /**
591  * hif_nointrs() - disable IRQ
592  *
593  * @scn: struct hif_softc
594  *
595  * This function stops interrupt(s)
596  *
597  * Return: none
598  */
599 void hif_ahb_nointrs(struct hif_softc *scn)
600 {
601 	int i;
602 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
603 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
604 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
605 
606 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
607 
608 	if (scn->request_irq_done == false)
609 		return;
610 
611 	if (sc->num_msi_intrs > 0) {
612 		/* MSI interrupt(s) */
613 		for (i = 0; i < sc->num_msi_intrs; i++) {
614 			free_irq(sc->irq + i, sc);
615 		}
616 		sc->num_msi_intrs = 0;
617 	} else {
618 		if (!scn->per_ce_irq) {
619 			free_irq(sc->irq, sc);
620 		} else {
621 			for (i = 0; i < scn->ce_count; i++) {
622 				if (host_ce_conf[i].flags
623 						& CE_ATTR_DISABLE_INTR)
624 					continue;
625 
626 				free_irq(ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
627 						&hif_state->tasklets[i]);
628 			}
629 			hif_ahb_deconfigure_grp_irq(scn);
630 		}
631 	}
632 	scn->request_irq_done = false;
633 
634 }
635 
636 /**
637  * ce_irq_enable() - enable copy engine IRQ
638  * @scn: struct hif_softc
639  * @ce_id: ce_id
640  *
641  * This function enables the interrupt for the radio.
642  *
643  * Return: N/A
644  */
645 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
646 {
647 	uint32_t regval;
648 	uint32_t reg_offset = 0;
649 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
650 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
651 	struct hif_target_info *tgt_info = &scn->target_info;
652 
653 	if (scn->per_ce_irq) {
654 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
655 			reg_offset = HOST_IE_ADDRESS;
656 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
657 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
658 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
659 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
660 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
661 		}
662 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
663 			reg_offset = HOST_IE_ADDRESS_2;
664 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
665 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
666 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
667 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
668 			if (tgt_info->target_type == TARGET_TYPE_QCA8074) {
669 				/* Enable destination ring interrupts for 8074
670 				 * TODO: To be removed in 2.0 HW */
671 				regval = hif_read32_mb(scn, scn->mem +
672 					HOST_IE_ADDRESS_3);
673 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
674 			}
675 			hif_write32_mb(scn, scn->mem + HOST_IE_ADDRESS_3,
676 				       regval);
677 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
678 		}
679 	} else {
680 		hif_pci_irq_enable(scn, ce_id);
681 	}
682 }
683 
684 /**
685  * ce_irq_disable() - disable copy engine IRQ
686  * @scn: struct hif_softc
687  * @ce_id: ce_id
688  *
689  * Return: N/A
690  */
691 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
692 {
693 	uint32_t regval;
694 	uint32_t reg_offset = 0;
695 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
696 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
697 	struct hif_target_info *tgt_info = &scn->target_info;
698 
699 	if (scn->per_ce_irq) {
700 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
701 			reg_offset = HOST_IE_ADDRESS;
702 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
703 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
704 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
705 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
706 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
707 		}
708 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
709 			reg_offset = HOST_IE_ADDRESS_2;
710 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
711 			regval = hif_read32_mb(scn, scn->mem + reg_offset);
712 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
713 			hif_write32_mb(scn, scn->mem + reg_offset, regval);
714 			if (tgt_info->target_type == TARGET_TYPE_QCA8074) {
715 				/* Disable destination ring interrupts for 8074
716 				 * TODO: To be removed in 2.0 HW */
717 				regval = hif_read32_mb(scn, scn->mem +
718 					HOST_IE_ADDRESS_3);
719 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
720 			}
721 			hif_write32_mb(scn, scn->mem + HOST_IE_ADDRESS_3,
722 				       regval);
723 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
724 		}
725 	}
726 }
727 
728 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
729 {
730 	int i;
731 
732 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
733 	if (hif_ext_group->irq_enabled) {
734 		for (i = 0; i < hif_ext_group->numirq; i++) {
735 			disable_irq_nosync(hif_ext_group->os_irq[i]);
736 		}
737 		hif_ext_group->irq_enabled = false;
738 	}
739 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
740 }
741 
742 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
743 {
744 	int i;
745 
746 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
747 	if (!hif_ext_group->irq_enabled) {
748 		for (i = 0; i < hif_ext_group->numirq; i++) {
749 			enable_irq(hif_ext_group->os_irq[i]);
750 		}
751 		hif_ext_group->irq_enabled = true;
752 	}
753 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
754 }
755 
756 /**
757  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
758  * @scn: hif context
759  *
760  * Return: true if soc needs driver bmi otherwise false
761  */
762 bool hif_ahb_needs_bmi(struct hif_softc *scn)
763 {
764 	return !ce_srng_based(scn);
765 }
766