xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: if_ahb.c
22  *
23  * c file for ahb specific implementations.
24  */
25 
26 #include "hif.h"
27 #include "target_type.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_io32.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_tasklet.h"
34 #include "if_ahb.h"
35 #include "if_pci.h"
36 #include "ahb_api.h"
37 #include "pci_api.h"
38 #include "hif_napi.h"
39 #include "qal_vbus_dev.h"
40 #include "qdf_irq.h"
41 
42 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
43 #define IRQF_DISABLED 0x00000020
44 #endif
45 
46 #define HIF_IC_CE0_IRQ_OFFSET 4
47 #define HIF_IC_MAX_IRQ 58
48 
49 static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
50 /* integrated chip irq names */
51 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
52 "misc-pulse1",
53 "misc-latch",
54 "sw-exception",
55 "watchdog",
56 "ce0",
57 "ce1",
58 "ce2",
59 "ce3",
60 "ce4",
61 "ce5",
62 "ce6",
63 "ce7",
64 "ce8",
65 "ce9",
66 "ce10",
67 "ce11",
68 "host2wbm-desc-feed",
69 "host2reo-re-injection",
70 "host2reo-command",
71 "host2rxdma-monitor-ring3",
72 "host2rxdma-monitor-ring2",
73 "host2rxdma-monitor-ring1",
74 "reo2ost-exception",
75 "wbm2host-rx-release",
76 "reo2host-status",
77 "reo2host-destination-ring4",
78 "reo2host-destination-ring3",
79 "reo2host-destination-ring2",
80 "reo2host-destination-ring1",
81 "rxdma2host-monitor-destination-mac3",
82 "rxdma2host-monitor-destination-mac2",
83 "rxdma2host-monitor-destination-mac1",
84 "ppdu-end-interrupts-mac3",
85 "ppdu-end-interrupts-mac2",
86 "ppdu-end-interrupts-mac1",
87 "rxdma2host-monitor-status-ring-mac3",
88 "rxdma2host-monitor-status-ring-mac2",
89 "rxdma2host-monitor-status-ring-mac1",
90 "host2rxdma-host-buf-ring-mac3",
91 "host2rxdma-host-buf-ring-mac2",
92 "host2rxdma-host-buf-ring-mac1",
93 "rxdma2host-destination-ring-mac3",
94 "rxdma2host-destination-ring-mac2",
95 "rxdma2host-destination-ring-mac1",
96 "host2tcl-input-ring4",
97 "host2tcl-input-ring3",
98 "host2tcl-input-ring2",
99 "host2tcl-input-ring1",
100 "wbm2host-tx-completions-ring4",
101 "wbm2host-tx-completions-ring3",
102 "wbm2host-tx-completions-ring2",
103 "wbm2host-tx-completions-ring1",
104 "tcl2host-status-ring",
105 "txmon2host-monitor-destination-mac3",
106 "txmon2host-monitor-destination-mac2",
107 "txmon2host-monitor-destination-mac1",
108 "host2tx-monitor-ring1",
109 "umac_reset"
110 };
111 
112 /**
113  * hif_ahb_get_irq_name() - get irqname
114  * @irq_no: irq number
115  *
116  * This function gives irqnumber to irqname
117  * mapping.
118  *
119  * Return: irq name
120  */
121 const char *hif_ahb_get_irq_name(int irq_no)
122 {
123 	return ic_irqname[irq_no];
124 }
125 
126 /**
127  * hif_ahb_disable_isr() - disable isr
128  * @scn: struct hif_softc
129  *
130  * This function disables isr and kills tasklets
131  *
132  * Return: void
133  */
134 void hif_ahb_disable_isr(struct hif_softc *scn)
135 {
136 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
137 	hif_exec_kill(&scn->osc);
138 	hif_nointrs(scn);
139 	ce_tasklet_kill(scn);
140 	tasklet_kill(&sc->intr_tq);
141 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
142 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
143 }
144 
145 /**
146  * hif_ahb_dump_registers() - dump bus debug registers
147  * @hif_ctx: struct hif_opaque_softc
148  *
149  * This function dumps hif bus debug registers
150  *
151  * Return: 0 for success or error code
152  */
153 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
154 {
155 	int status;
156 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
157 
158 	status = hif_dump_ce_registers(scn);
159 	if (status)
160 		hif_err("Dump CE Registers Failed status %d", status);
161 
162 	return 0;
163 }
164 
165 /**
166  * hif_ahb_close() - hif_bus_close
167  * @scn: pointer to the hif context.
168  *
169  * This is a callback function for hif_bus_close.
170  *
171  *
172  * Return: n/a
173  */
174 void hif_ahb_close(struct hif_softc *scn)
175 {
176 	hif_ce_close(scn);
177 }
178 
179 /**
180  * hif_ahb_open() - hif_ahb open
181  * @hif_ctx: hif context
182  * @bus_type: bus type
183  *
184  * This is a callback function for hif_bus_open.
185  *
186  * Return: QDF_STATUS
187  */
188 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
189 {
190 
191 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
192 
193 	qdf_spinlock_create(&sc->irq_lock);
194 	return hif_ce_open(hif_ctx);
195 }
196 
197 /**
198  * hif_ahb_bus_configure() - Configure the bus
199  * @scn: pointer to the hif context.
200  *
201  * This function configure the ahb bus
202  *
203  * Return: 0 for success. nonzero for failure.
204  */
205 int hif_ahb_bus_configure(struct hif_softc *scn)
206 {
207 	return hif_pci_bus_configure(scn);
208 }
209 
210 static void hif_ahb_get_soc_info_pld(struct hif_pci_softc *sc,
211 				     struct device *dev)
212 {
213 	struct pld_soc_info info;
214 	int ret = 0;
215 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
216 
217 	ret = pld_get_soc_info(dev, &info);
218 	sc->mem = info.v_addr;
219 	sc->ce_sc.ol_sc.mem    = info.v_addr;
220 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
221 	/* dev_mem_info[0] is for CMEM */
222 	scn->cmem_start = info.dev_mem_info[0].start;
223 	scn->cmem_size = info.dev_mem_info[0].size;
224 }
225 
226 int hif_ahb_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
227 {
228 	int ret = 0;
229 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
230 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
231 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
232 	int irq = 0;
233 
234 	if (ce_id >= CE_COUNT_MAX)
235 		return -EINVAL;
236 
237 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
238 			   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
239 			   HIF_IC_CE0_IRQ_OFFSET + ce_id, &irq);
240 	if (ret) {
241 		dev_err(&pdev->dev, "get irq failed\n");
242 		ret = -EFAULT;
243 		goto end;
244 	}
245 
246 	ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + ce_id] = irq;
247 	ret = pfrm_request_irq(&pdev->dev, irq,
248 			       hif_ahb_interrupt_handler,
249 			       IRQF_TRIGGER_RISING,
250 			       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
251 			       &hif_state->tasklets[ce_id]);
252 	if (ret) {
253 		dev_err(&pdev->dev, "ath_request_irq failed\n");
254 		ret = -EFAULT;
255 		goto end;
256 	}
257 	hif_ahb_irq_enable(scn, ce_id);
258 
259 end:
260 	return ret;
261 }
262 
263 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
264 {
265 	int ret = 0;
266 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
267 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
268 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
269 	int i;
270 
271 	/* configure per CE interrupts */
272 	for (i = 0; i < scn->ce_count; i++) {
273 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
274 			continue;
275 
276 		if (host_ce_conf[i].flags & CE_ATTR_INIT_ON_DEMAND)
277 			continue;
278 
279 		ret = hif_ahb_configure_irq_by_ceid(scn, i);
280 		if (ret)
281 			goto end;
282 	}
283 
284 end:
285 	return ret;
286 }
287 
288 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
289 			      struct hif_exec_context *hif_ext_group)
290 {
291 	int ret = 0;
292 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
293 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
294 	int irq = 0;
295 	int j;
296 
297 	/* configure external interrupts */
298 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
299 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
300 	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
301 	hif_ext_group->work_complete = &hif_dummy_grp_done;
302 
303 	for (j = 0; j < hif_ext_group->numirq; j++) {
304 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
305 				   ic_irqname[hif_ext_group->irq[j]],
306 				   hif_ext_group->irq[j], &irq);
307 		if (ret) {
308 			dev_err(&pdev->dev, "get irq failed\n");
309 			ret = -EFAULT;
310 			goto end;
311 		}
312 		ic_irqnum[hif_ext_group->irq[j]] = irq;
313 		hif_ext_group->os_irq[j] = irq;
314 	}
315 
316 	for (j = 0; j < hif_ext_group->numirq; j++) {
317 		irq = hif_ext_group->os_irq[j];
318 
319 		qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
320 		qdf_dev_set_irq_status_flags(irq, QDF_IRQ_DISABLE_UNLAZY);
321 		qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
322 
323 		ret = pfrm_request_irq(scn->qdf_dev->dev,
324 				       irq, hif_ext_group_interrupt_handler,
325 				       IRQF_TRIGGER_RISING | IRQF_SHARED,
326 				       ic_irqname[hif_ext_group->irq[j]],
327 				       hif_ext_group);
328 		if (ret) {
329 			dev_err(&pdev->dev, "ath_request_irq failed\n");
330 			ret = -EFAULT;
331 			goto end;
332 		}
333 	}
334 
335 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
336 	hif_ext_group->irq_requested = true;
337 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
338 end:
339 	return ret;
340 }
341 
342 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
343 {
344 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
345 	struct hif_exec_context *hif_ext_group;
346 	int i, j;
347 	int irq = 0;
348 
349 	/* configure external interrupts */
350 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
351 		hif_ext_group = hif_state->hif_ext_group[i];
352 		if (hif_ext_group->irq_requested == true) {
353 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
354 			hif_ext_group->irq_requested = false;
355 			for (j = 0; j < hif_ext_group->numirq; j++) {
356 				irq = hif_ext_group->os_irq[j];
357 				hif_ext_group->irq_enabled = false;
358 				qdf_dev_clear_irq_status_flags(
359 							irq,
360 							QDF_IRQ_DISABLE_UNLAZY);
361 			}
362 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
363 
364 			/* Avoid holding the irq_lock while freeing the irq
365 			 * as the same lock is being held by the irq handler
366 			 * while disabling the irq. This causes a deadlock
367 			 * between free_irq and irq_handler.
368 			 */
369 			for (j = 0; j < hif_ext_group->numirq; j++) {
370 				irq = hif_ext_group->os_irq[j];
371 				pfrm_free_irq(scn->qdf_dev->dev,
372 					      irq, hif_ext_group);
373 			}
374 		}
375 	}
376 }
377 
378 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
379 {
380 	struct ce_tasklet_entry *tasklet_entry = context;
381 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
382 }
383 
384 /**
385  * hif_ahb_disable_bus() - Disable the bus
386  * @scn : pointer to the hif context
387  *
388  * This function disables the bus and helds the target in reset state
389  *
390  * Return: none
391  */
392 void hif_ahb_disable_bus(struct hif_softc *scn)
393 {
394 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
395 	void __iomem *mem;
396 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
397 	struct resource *memres = NULL;
398 	int mem_pa_size = 0;
399 	struct hif_target_info *tgt_info = NULL;
400 	struct qdf_vbus_resource *vmres = NULL;
401 	QDF_STATUS status;
402 
403 	tgt_info = &scn->target_info;
404 	/*Disable WIFI clock input*/
405 	if (sc->mem) {
406 		status = pfrm_platform_get_resource(
407 				scn->qdf_dev->dev,
408 				(struct qdf_pfm_hndl *)pdev, &vmres,
409 				IORESOURCE_MEM, 0);
410 		if (QDF_IS_STATUS_ERROR(status)) {
411 			hif_info("Failed to get IORESOURCE_MEM");
412 			return;
413 		}
414 		memres = (struct resource *)vmres;
415 		if (memres)
416 			mem_pa_size = memres->end - memres->start + 1;
417 
418 		if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
419 		    tgt_info->target_type == TARGET_TYPE_QCA5332) {
420 			iounmap(sc->mem_ce);
421 			sc->mem_ce = NULL;
422 			scn->mem_ce = NULL;
423 		}
424 		if (sc->mem_pmm_base) {
425 			iounmap(sc->mem_pmm_base);
426 			sc->mem_pmm_base = NULL;
427 			scn->mem_pmm_base = NULL;
428 		}
429 		if (sc->mem_cmem) {
430 			iounmap(sc->mem_cmem);
431 			sc->mem_cmem = NULL;
432 			scn->mem_cmem = NULL;
433 		}
434 		mem = (void __iomem *)sc->mem;
435 		if (mem) {
436 			pfrm_devm_iounmap(&pdev->dev, mem);
437 			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
438 						     mem_pa_size);
439 			sc->mem = NULL;
440 			pld_set_bar_addr(&pdev->dev, NULL);
441 		}
442 	}
443 	scn->mem = NULL;
444 }
445 
446 /**
447  * hif_ahb_enable_bus() - Enable the bus
448  * @ol_sc: HIF context
449  * @dev: dev
450  * @bdev: bus dev
451  * @bid: bus id
452  * @type: bus type
453  *
454  * This function enables the radio bus by enabling necessary
455  * clocks and waits for the target to get ready to proceed further
456  *
457  * Return: QDF_STATUS
458  */
459 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
460 		struct device *dev, void *bdev,
461 		const struct hif_bus_id *bid,
462 		enum hif_enable_type type)
463 {
464 	int ret = 0;
465 	int hif_type;
466 	int target_type;
467 	const struct platform_device_id *id = (struct platform_device_id *)bid;
468 	struct platform_device *pdev = bdev;
469 	struct hif_target_info *tgt_info = NULL;
470 	struct resource *memres = NULL;
471 	void __iomem *mem = NULL;
472 	uint32_t revision_id = 0;
473 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
474 	QDF_STATUS status;
475 	struct qdf_vbus_resource *vmres = NULL;
476 
477 	sc->pdev = (struct pci_dev *)pdev;
478 	sc->dev = &pdev->dev;
479 	sc->devid = id->driver_data;
480 
481 	ret = hif_get_device_type(id->driver_data, revision_id,
482 			&hif_type, &target_type);
483 	if (ret < 0) {
484 		hif_err("Invalid device ret %d id %d revision_id %d",
485 			ret, (int)id->driver_data, revision_id);
486 		return QDF_STATUS_E_FAILURE;
487 	}
488 
489 	if (target_type == TARGET_TYPE_QCN6122 ||
490 	    target_type == TARGET_TYPE_QCN9160) {
491 		hif_ahb_get_soc_info_pld(sc, dev);
492 	}
493 
494 	/* 11BE SoC chipsets Need to call this function to get cmem addr */
495 	if (target_type == TARGET_TYPE_QCA5332)
496 		hif_ahb_get_soc_info_pld(sc, dev);
497 
498 	if (target_type == TARGET_TYPE_QCN6122 ||
499 	    target_type == TARGET_TYPE_QCN9160) {
500 		hif_update_irq_ops_with_pci(ol_sc);
501 	} else {
502 		status = pfrm_platform_get_resource(&pdev->dev,
503 						    (struct qdf_pfm_hndl *)pdev,
504 						    &vmres,
505 						    IORESOURCE_MEM, 0);
506 		if (QDF_IS_STATUS_ERROR(status)) {
507 			hif_err("Failed to get IORESOURCE_MEM");
508 			return status;
509 		}
510 		memres = (struct resource *)vmres;
511 		if (!memres) {
512 			hif_err("Failed to get IORESOURCE_MEM");
513 			return QDF_STATUS_E_IO;
514 		}
515 
516 		/* Arrange for access to Target SoC registers. */
517 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
518 		status = pfrm_devm_ioremap_resource(
519 					dev,
520 					(struct qdf_vbus_resource *)memres,
521 					&mem);
522 #else
523 		status = pfrm_devm_request_and_ioremap(
524 					dev,
525 					(struct qdf_vbus_resource *)memres,
526 					&mem);
527 #endif
528 		if (QDF_IS_STATUS_ERROR(status)) {
529 			hif_err("ath: ioremap error");
530 			ret = PTR_ERR(mem);
531 			goto err_cleanup1;
532 		}
533 
534 		sc->mem = mem;
535 		pld_set_bar_addr(dev, mem);
536 		ol_sc->mem = mem;
537 		ol_sc->mem_pa = memres->start;
538 	}
539 
540 	ret = pfrm_dma_set_mask(dev, 32);
541 	if (ret) {
542 		hif_err("ath: 32-bit DMA not available");
543 		status = QDF_STATUS_E_IO;
544 		goto err_cleanup1;
545 	}
546 
547 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
548 	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
549 #else
550 	ret = pfrm_dma_set_coherent_mask(dev, 32);
551 #endif
552 	if (ret) {
553 		hif_err("Failed to set dma mask error = %d", ret);
554 		return QDF_STATUS_E_IO;
555 	}
556 
557 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
558 
559 	tgt_info->target_type = target_type;
560 	hif_register_tbl_attach(ol_sc, hif_type);
561 	hif_target_register_tbl_attach(ol_sc, target_type);
562 	/*
563 	 * In QCA5018 CE region moved to SOC outside WCSS block.
564 	 * Allocate separate I/O remap to access CE registers.
565 	 */
566 	if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
567 	    tgt_info->target_type == TARGET_TYPE_QCA5332) {
568 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
569 
570 		sc->mem_ce = ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE);
571 		if (IS_ERR(sc->mem_ce)) {
572 			hif_err("CE: ioremap failed");
573 			return QDF_STATUS_E_IO;
574 		}
575 		ol_sc->mem_ce = sc->mem_ce;
576 	}
577 
578 	if (tgt_info->target_type == TARGET_TYPE_QCA5332) {
579 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
580 
581 		/*
582 		 * In QCA5332 CMEM region is outside WCSS block.
583 		 * Allocate separate I/O remap to access CMEM address.
584 		 */
585 		sc->mem_cmem = ioremap_nocache(HOST_CMEM_ADDRESS,
586 					       HOST_CMEM_SIZE);
587 		if (IS_ERR(sc->mem_cmem)) {
588 			hif_err("CE: ioremap failed");
589 			return QDF_STATUS_E_IO;
590 		}
591 		ol_sc->mem_cmem = sc->mem_cmem;
592 
593 		/*
594 		 * PMM SCRATCH Register for QCA5332
595 		 */
596 		sc->mem_pmm_base = ioremap_nocache(PMM_SCRATCH_BASE,
597 						   PMM_SCRATCH_SIZE);
598 		if (IS_ERR(sc->mem_pmm_base)) {
599 			hif_err("CE: ioremap failed");
600 			return QDF_STATUS_E_IO;
601 		}
602 		ol_sc->mem_pmm_base = sc->mem_pmm_base;
603 	}
604 
605 	hif_info("X - hif_type = 0x%x, target_type = 0x%x",
606 		hif_type, target_type);
607 
608 	return QDF_STATUS_SUCCESS;
609 err_cleanup1:
610 	return status;
611 }
612 
613 /**
614  * hif_ahb_nointrs() - disable IRQ
615  *
616  * @scn: struct hif_softc
617  *
618  * This function stops interrupt(s)
619  *
620  * Return: none
621  */
622 void hif_ahb_nointrs(struct hif_softc *scn)
623 {
624 	int i;
625 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
626 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
627 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
628 
629 	scn->free_irq_done = true;
630 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
631 
632 	if (scn->request_irq_done == false)
633 		return;
634 
635 	if (sc->num_msi_intrs > 0) {
636 		/* MSI interrupt(s) */
637 		for (i = 0; i < sc->num_msi_intrs; i++) {
638 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
639 		}
640 		sc->num_msi_intrs = 0;
641 	} else {
642 		if (!scn->per_ce_irq) {
643 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
644 		} else {
645 			for (i = 0; i < scn->ce_count; i++) {
646 				if (host_ce_conf[i].flags
647 						& CE_ATTR_DISABLE_INTR)
648 					continue;
649 				if (!hif_state->tasklets[i].inited)
650 					continue;
651 				pfrm_free_irq(
652 					scn->qdf_dev->dev,
653 					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
654 					&hif_state->tasklets[i]);
655 			}
656 			hif_ahb_deconfigure_grp_irq(scn);
657 		}
658 	}
659 	scn->request_irq_done = false;
660 
661 }
662 
663 /**
664  * hif_ahb_irq_enable() - enable copy engine IRQ
665  * @scn: struct hif_softc
666  * @ce_id: ce_id
667  *
668  * This function enables the interrupt for the radio.
669  *
670  * Return: N/A
671  */
672 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
673 {
674 	uint32_t regval;
675 	uint32_t reg_offset = 0;
676 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
677 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
678 	struct hif_target_info *tgt_info = &scn->target_info;
679 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
680 
681 	if (scn->per_ce_irq) {
682 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
683 			reg_offset = HOST_IE_ADDRESS;
684 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
685 			regval = hif_read32_mb(scn, mem + reg_offset);
686 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
687 			hif_write32_mb(scn, mem + reg_offset, regval);
688 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
689 		}
690 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
691 			reg_offset = HOST_IE_ADDRESS_2;
692 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
693 			regval = hif_read32_mb(scn, mem + reg_offset);
694 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
695 			hif_write32_mb(scn, mem + reg_offset, regval);
696 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
697 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
698 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
699 			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
700 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
701 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
702 				/* Enable destination ring interrupts for
703 				 * 8074, 8074V2, 6018 and 50xx
704 				 */
705 				regval = hif_read32_mb(scn, mem +
706 					HOST_IE_ADDRESS_3);
707 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
708 
709 				hif_write32_mb(scn, mem +
710 					       HOST_IE_ADDRESS_3, regval);
711 			}
712 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
713 		}
714 	} else {
715 		hif_pci_irq_enable(scn, ce_id);
716 	}
717 }
718 
719 /**
720  * hif_ahb_irq_disable() - disable copy engine IRQ
721  * @scn: struct hif_softc
722  * @ce_id: ce_id
723  *
724  * Return: N/A
725  */
726 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
727 {
728 	uint32_t regval;
729 	uint32_t reg_offset = 0;
730 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
731 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
732 	struct hif_target_info *tgt_info = &scn->target_info;
733 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
734 
735 	if (scn->per_ce_irq) {
736 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
737 			reg_offset = HOST_IE_ADDRESS;
738 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
739 			regval = hif_read32_mb(scn, mem + reg_offset);
740 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
741 			hif_write32_mb(scn, mem + reg_offset, regval);
742 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
743 		}
744 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
745 			reg_offset = HOST_IE_ADDRESS_2;
746 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
747 			regval = hif_read32_mb(scn, mem + reg_offset);
748 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
749 			hif_write32_mb(scn, mem + reg_offset, regval);
750 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
751 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
752 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
753 			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
754 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
755 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
756 				/* Disable destination ring interrupts for
757 				 * 8074, 8074V2, 6018 and 50xx
758 				 */
759 				regval = hif_read32_mb(scn, mem +
760 					HOST_IE_ADDRESS_3);
761 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
762 
763 				hif_write32_mb(scn, mem +
764 					       HOST_IE_ADDRESS_3, regval);
765 			}
766 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
767 		}
768 	}
769 }
770 
771 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
772 {
773 	int i;
774 
775 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
776 	if (hif_ext_group->irq_enabled) {
777 		for (i = 0; i < hif_ext_group->numirq; i++) {
778 			disable_irq_nosync(hif_ext_group->os_irq[i]);
779 		}
780 		hif_ext_group->irq_enabled = false;
781 	}
782 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
783 }
784 
785 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
786 {
787 	int i;
788 
789 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
790 	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
791 		for (i = 0; i < hif_ext_group->numirq; i++) {
792 			enable_irq(hif_ext_group->os_irq[i]);
793 		}
794 		hif_ext_group->irq_enabled = true;
795 	}
796 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
797 }
798 
799 /**
800  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
801  * @scn: hif context
802  *
803  * Return: true if soc needs driver bmi otherwise false
804  */
805 bool hif_ahb_needs_bmi(struct hif_softc *scn)
806 {
807 	return !ce_srng_based(scn);
808 }
809 
810 /**
811  * hif_display_ahb_irq_regs() - prints the host interrupt enable (IE) regs
812  * @scn: hif context
813  *
814  * Return: None
815  */
816 
817 void hif_display_ahb_irq_regs(struct hif_softc *scn)
818 {
819 	uint32_t regval;
820 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
821 	struct hif_target_info *tgt_info = &scn->target_info;
822 
823 	if (tgt_info->target_type == TARGET_TYPE_QCN6122 ||
824 	    tgt_info->target_type == TARGET_TYPE_QCN9160) {
825 		return;
826 	}
827 	if (scn->per_ce_irq) {
828 		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS);
829 		hif_nofl_err("IRQ enable register value 0x%08x", regval);
830 
831 		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS_2);
832 		hif_nofl_err("IRQ enable register 2 value 0x%08x", regval);
833 
834 		if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
835 		    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
836 		    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
837 		    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
838 		    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
839 		    tgt_info->target_type == TARGET_TYPE_QCA6018) {
840 			regval = hif_read32_mb(scn, mem +
841 					       HOST_IE_ADDRESS_3);
842 			hif_nofl_err("IRQ enable register 3 value 0x%08x",
843 				     regval);
844 		}
845 	}
846 }
847 
848 void hif_ahb_display_stats(struct hif_softc *scn)
849 {
850 	if (!scn) {
851 		hif_err("hif_scn null");
852 		return;
853 	}
854 	hif_display_ahb_irq_regs(scn);
855 	hif_display_ce_stats(scn);
856 }
857 
858 void hif_ahb_clear_stats(struct hif_softc *scn)
859 {
860 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
861 
862 	if (!hif_state) {
863 		hif_err("hif_state null");
864 		return;
865 	}
866 	hif_clear_ce_stats(hif_state);
867 }
868