xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision b62151f8dd0743da724a4533988c78d2c7385d4f)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: if_ahb.c
22  *
23  * c file for ahb specific implementations.
24  */
25 
26 #include "hif.h"
27 #include "target_type.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_io32.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_tasklet.h"
34 #include "if_ahb.h"
35 #include "if_pci.h"
36 #include "ahb_api.h"
37 #include "pci_api.h"
38 #include "hif_napi.h"
39 #include "qal_vbus_dev.h"
40 #include "qdf_irq.h"
41 
42 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
43 #define IRQF_DISABLED 0x00000020
44 #endif
45 
46 #define HIF_IC_CE0_IRQ_OFFSET 4
47 #define HIF_IC_MAX_IRQ 58
48 
49 static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
50 /* integrated chip irq names */
51 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
52 "misc-pulse1",
53 "misc-latch",
54 "sw-exception",
55 "watchdog",
56 "ce0",
57 "ce1",
58 "ce2",
59 "ce3",
60 "ce4",
61 "ce5",
62 "ce6",
63 "ce7",
64 "ce8",
65 "ce9",
66 "ce10",
67 "ce11",
68 "host2wbm-desc-feed",
69 "host2reo-re-injection",
70 "host2reo-command",
71 "host2rxdma-monitor-ring3",
72 "host2rxdma-monitor-ring2",
73 "host2rxdma-monitor-ring1",
74 "reo2ost-exception",
75 "wbm2host-rx-release",
76 "reo2host-status",
77 "reo2host-destination-ring4",
78 "reo2host-destination-ring3",
79 "reo2host-destination-ring2",
80 "reo2host-destination-ring1",
81 "rxdma2host-monitor-destination-mac3",
82 "rxdma2host-monitor-destination-mac2",
83 "rxdma2host-monitor-destination-mac1",
84 "ppdu-end-interrupts-mac3",
85 "ppdu-end-interrupts-mac2",
86 "ppdu-end-interrupts-mac1",
87 "rxdma2host-monitor-status-ring-mac3",
88 "rxdma2host-monitor-status-ring-mac2",
89 "rxdma2host-monitor-status-ring-mac1",
90 "host2rxdma-host-buf-ring-mac3",
91 "host2rxdma-host-buf-ring-mac2",
92 "host2rxdma-host-buf-ring-mac1",
93 "rxdma2host-destination-ring-mac3",
94 "rxdma2host-destination-ring-mac2",
95 "rxdma2host-destination-ring-mac1",
96 "host2tcl-input-ring4",
97 "host2tcl-input-ring3",
98 "host2tcl-input-ring2",
99 "host2tcl-input-ring1",
100 "wbm2host-tx-completions-ring4",
101 "wbm2host-tx-completions-ring3",
102 "wbm2host-tx-completions-ring2",
103 "wbm2host-tx-completions-ring1",
104 "tcl2host-status-ring",
105 "txmon2host-monitor-destination-mac3",
106 "txmon2host-monitor-destination-mac2",
107 "txmon2host-monitor-destination-mac1",
108 "host2tx-monitor-ring1",
109 };
110 
111 /**
112  * hif_ahb_get_irq_name() - get irqname
113  * @irq_no: irq number
114  *
115  * This function gives irqnumber to irqname
116  * mapping.
117  *
118  * Return: irq name
119  */
120 const char *hif_ahb_get_irq_name(int irq_no)
121 {
122 	return ic_irqname[irq_no];
123 }
124 
125 /**
126  * hif_ahb_disable_isr() - disable isr
127  * @scn: struct hif_softc
128  *
129  * This function disables isr and kills tasklets
130  *
131  * Return: void
132  */
133 void hif_ahb_disable_isr(struct hif_softc *scn)
134 {
135 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
136 	hif_exec_kill(&scn->osc);
137 	hif_nointrs(scn);
138 	ce_tasklet_kill(scn);
139 	tasklet_kill(&sc->intr_tq);
140 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
141 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
142 }
143 
144 /**
145  * hif_ahb_dump_registers() - dump bus debug registers
146  * @hif_ctx: struct hif_opaque_softc
147  *
148  * This function dumps hif bus debug registers
149  *
150  * Return: 0 for success or error code
151  */
152 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
153 {
154 	int status;
155 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
156 
157 	status = hif_dump_ce_registers(scn);
158 	if (status)
159 		hif_err("Dump CE Registers Failed status %d", status);
160 
161 	return 0;
162 }
163 
164 /**
165  * hif_ahb_close() - hif_bus_close
166  * @scn: pointer to the hif context.
167  *
168  * This is a callback function for hif_bus_close.
169  *
170  *
171  * Return: n/a
172  */
173 void hif_ahb_close(struct hif_softc *scn)
174 {
175 	hif_ce_close(scn);
176 }
177 
178 /**
179  * hif_ahb_open() - hif_ahb open
180  * @hif_ctx: hif context
181  * @bus_type: bus type
182  *
183  * This is a callback function for hif_bus_open.
184  *
185  * Return: QDF_STATUS
186  */
187 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
188 {
189 
190 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
191 
192 	qdf_spinlock_create(&sc->irq_lock);
193 	return hif_ce_open(hif_ctx);
194 }
195 
196 /**
197  * hif_ahb_bus_configure() - Configure the bus
198  * @scn: pointer to the hif context.
199  *
200  * This function configure the ahb bus
201  *
202  * Return: 0 for success. nonzero for failure.
203  */
204 int hif_ahb_bus_configure(struct hif_softc *scn)
205 {
206 	return hif_pci_bus_configure(scn);
207 }
208 
209 static void hif_ahb_get_bar_addr_pld(struct hif_pci_softc *sc,
210 				     struct device *dev)
211 {
212 	struct pld_soc_info info;
213 	int ret = 0;
214 
215 	ret = pld_get_soc_info(dev, &info);
216 	sc->mem = info.v_addr;
217 	sc->ce_sc.ol_sc.mem    = info.v_addr;
218 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
219 }
220 
221 static void hif_ahb_get_soc_cmem_info_pld(struct hif_pci_softc *sc,
222 					  struct device *dev)
223 {
224 	struct pld_soc_info info;
225 	int ret = 0;
226 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
227 
228 	ret = pld_get_soc_info(dev, &info);
229 	/* dev_mem_info[0] is for CMEM */
230 	scn->cmem_start = info.dev_mem_info[0].start;
231 	scn->cmem_size = info.dev_mem_info[0].size;
232 }
233 
234 int hif_ahb_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
235 {
236 	int ret = 0;
237 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
238 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
239 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
240 	int irq = 0;
241 
242 	if (ce_id >= CE_COUNT_MAX)
243 		return -EINVAL;
244 
245 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
246 			   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
247 			   HIF_IC_CE0_IRQ_OFFSET + ce_id, &irq);
248 	if (ret) {
249 		dev_err(&pdev->dev, "get irq failed\n");
250 		ret = -EFAULT;
251 		goto end;
252 	}
253 
254 	ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + ce_id] = irq;
255 	ret = pfrm_request_irq(&pdev->dev, irq,
256 			       hif_ahb_interrupt_handler,
257 			       IRQF_TRIGGER_RISING,
258 			       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
259 			       &hif_state->tasklets[ce_id]);
260 	if (ret) {
261 		dev_err(&pdev->dev, "ath_request_irq failed\n");
262 		ret = -EFAULT;
263 		goto end;
264 	}
265 	hif_ahb_irq_enable(scn, ce_id);
266 
267 end:
268 	return ret;
269 }
270 
271 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
272 {
273 	int ret = 0;
274 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
275 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
276 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
277 	int i;
278 
279 	/* configure per CE interrupts */
280 	for (i = 0; i < scn->ce_count; i++) {
281 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
282 			continue;
283 
284 		if (host_ce_conf[i].flags & CE_ATTR_INIT_ON_DEMAND)
285 			continue;
286 
287 		ret = hif_ahb_configure_irq_by_ceid(scn, i);
288 		if (ret)
289 			goto end;
290 	}
291 
292 end:
293 	return ret;
294 }
295 
296 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
297 			      struct hif_exec_context *hif_ext_group)
298 {
299 	int ret = 0;
300 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
301 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
302 	int irq = 0;
303 	int j;
304 
305 	/* configure external interrupts */
306 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
307 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
308 	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
309 	hif_ext_group->work_complete = &hif_dummy_grp_done;
310 
311 	for (j = 0; j < hif_ext_group->numirq; j++) {
312 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
313 				   ic_irqname[hif_ext_group->irq[j]],
314 				   hif_ext_group->irq[j], &irq);
315 		if (ret) {
316 			dev_err(&pdev->dev, "get irq failed\n");
317 			ret = -EFAULT;
318 			goto end;
319 		}
320 		ic_irqnum[hif_ext_group->irq[j]] = irq;
321 		hif_ext_group->os_irq[j] = irq;
322 	}
323 
324 	for (j = 0; j < hif_ext_group->numirq; j++) {
325 		irq = hif_ext_group->os_irq[j];
326 
327 		qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
328 		qdf_dev_set_irq_status_flags(irq, QDF_IRQ_DISABLE_UNLAZY);
329 		qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
330 
331 		ret = pfrm_request_irq(scn->qdf_dev->dev,
332 				       irq, hif_ext_group_interrupt_handler,
333 				       IRQF_TRIGGER_RISING | IRQF_SHARED,
334 				       ic_irqname[hif_ext_group->irq[j]],
335 				       hif_ext_group);
336 		if (ret) {
337 			dev_err(&pdev->dev, "ath_request_irq failed\n");
338 			ret = -EFAULT;
339 			goto end;
340 		}
341 	}
342 
343 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
344 	hif_ext_group->irq_requested = true;
345 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
346 end:
347 	return ret;
348 }
349 
350 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
351 {
352 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
353 	struct hif_exec_context *hif_ext_group;
354 	int i, j;
355 	int irq = 0;
356 
357 	/* configure external interrupts */
358 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
359 		hif_ext_group = hif_state->hif_ext_group[i];
360 		if (hif_ext_group->irq_requested == true) {
361 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
362 			hif_ext_group->irq_requested = false;
363 			for (j = 0; j < hif_ext_group->numirq; j++) {
364 				irq = hif_ext_group->os_irq[j];
365 				hif_ext_group->irq_enabled = false;
366 				qdf_dev_clear_irq_status_flags(
367 							irq,
368 							QDF_IRQ_DISABLE_UNLAZY);
369 			}
370 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
371 
372 			/* Avoid holding the irq_lock while freeing the irq
373 			 * as the same lock is being held by the irq handler
374 			 * while disabling the irq. This causes a deadlock
375 			 * between free_irq and irq_handler.
376 			 */
377 			for (j = 0; j < hif_ext_group->numirq; j++) {
378 				irq = hif_ext_group->os_irq[j];
379 				pfrm_free_irq(scn->qdf_dev->dev,
380 					      irq, hif_ext_group);
381 			}
382 		}
383 	}
384 }
385 
386 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
387 {
388 	struct ce_tasklet_entry *tasklet_entry = context;
389 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
390 }
391 
392 /**
393  * hif_ahb_disable_bus() - Disable the bus
394  * @scn : pointer to the hif context
395  *
396  * This function disables the bus and helds the target in reset state
397  *
398  * Return: none
399  */
400 void hif_ahb_disable_bus(struct hif_softc *scn)
401 {
402 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
403 	void __iomem *mem;
404 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
405 	struct resource *memres = NULL;
406 	int mem_pa_size = 0;
407 	struct hif_target_info *tgt_info = NULL;
408 	struct qdf_vbus_resource *vmres = NULL;
409 	QDF_STATUS status;
410 
411 	tgt_info = &scn->target_info;
412 	/*Disable WIFI clock input*/
413 	if (sc->mem) {
414 		status = pfrm_platform_get_resource(
415 				scn->qdf_dev->dev,
416 				(struct qdf_pfm_hndl *)pdev, &vmres,
417 				IORESOURCE_MEM, 0);
418 		if (QDF_IS_STATUS_ERROR(status)) {
419 			hif_info("Failed to get IORESOURCE_MEM");
420 			return;
421 		}
422 		memres = (struct resource *)vmres;
423 		if (memres)
424 			mem_pa_size = memres->end - memres->start + 1;
425 
426 		if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
427 		    tgt_info->target_type == TARGET_TYPE_QCA5332) {
428 			iounmap(sc->mem_ce);
429 			sc->mem_ce = NULL;
430 			scn->mem_ce = NULL;
431 		}
432 		if (sc->mem_pmm_base) {
433 			iounmap(sc->mem_pmm_base);
434 			sc->mem_pmm_base = NULL;
435 			scn->mem_pmm_base = NULL;
436 		}
437 		if (sc->mem_cmem) {
438 			iounmap(sc->mem_cmem);
439 			sc->mem_cmem = NULL;
440 			scn->mem_cmem = NULL;
441 		}
442 		mem = (void __iomem *)sc->mem;
443 		if (mem) {
444 			pfrm_devm_iounmap(&pdev->dev, mem);
445 			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
446 						     mem_pa_size);
447 			sc->mem = NULL;
448 			pld_set_bar_addr(&pdev->dev, NULL);
449 		}
450 	}
451 	scn->mem = NULL;
452 }
453 
454 /**
455  * hif_ahb_enable_bus() - Enable the bus
456  * @ol_sc: HIF context
457  * @dev: dev
458  * @bdev: bus dev
459  * @bid: bus id
460  * @type: bus type
461  *
462  * This function enables the radio bus by enabling necessary
463  * clocks and waits for the target to get ready to proceed further
464  *
465  * Return: QDF_STATUS
466  */
467 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
468 		struct device *dev, void *bdev,
469 		const struct hif_bus_id *bid,
470 		enum hif_enable_type type)
471 {
472 	int ret = 0;
473 	int hif_type;
474 	int target_type;
475 	const struct platform_device_id *id = (struct platform_device_id *)bid;
476 	struct platform_device *pdev = bdev;
477 	struct hif_target_info *tgt_info = NULL;
478 	struct resource *memres = NULL;
479 	void __iomem *mem = NULL;
480 	uint32_t revision_id = 0;
481 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
482 	QDF_STATUS status;
483 	struct qdf_vbus_resource *vmres = NULL;
484 
485 	sc->pdev = (struct pci_dev *)pdev;
486 	sc->dev = &pdev->dev;
487 	sc->devid = id->driver_data;
488 
489 	ret = hif_get_device_type(id->driver_data, revision_id,
490 			&hif_type, &target_type);
491 	if (ret < 0) {
492 		hif_err("Invalid device ret %d id %d revision_id %d",
493 			ret, (int)id->driver_data, revision_id);
494 		return QDF_STATUS_E_FAILURE;
495 	}
496 
497 	if (target_type == TARGET_TYPE_QCN6122 ||
498 	    target_type == TARGET_TYPE_QCN9160 ||
499 	    target_type == TARGET_TYPE_QCN6432) {
500 		hif_ahb_get_bar_addr_pld(sc, dev);
501 	}
502 
503 	/* 11BE SoC chipsets Need to call this function to get cmem addr */
504 	if (target_type == TARGET_TYPE_QCA5332)
505 		hif_ahb_get_soc_cmem_info_pld(sc, dev);
506 
507 	if (target_type == TARGET_TYPE_QCN6122 ||
508 	    target_type == TARGET_TYPE_QCN9160 ||
509 	    target_type == TARGET_TYPE_QCN6432) {
510 		hif_update_irq_ops_with_pci(ol_sc);
511 	} else {
512 		status = pfrm_platform_get_resource(&pdev->dev,
513 						    (struct qdf_pfm_hndl *)pdev,
514 						    &vmres,
515 						    IORESOURCE_MEM, 0);
516 		if (QDF_IS_STATUS_ERROR(status)) {
517 			hif_err("Failed to get IORESOURCE_MEM");
518 			return status;
519 		}
520 		memres = (struct resource *)vmres;
521 		if (!memres) {
522 			hif_err("Failed to get IORESOURCE_MEM");
523 			return QDF_STATUS_E_IO;
524 		}
525 
526 		/* Arrange for access to Target SoC registers. */
527 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
528 		status = pfrm_devm_ioremap_resource(
529 					dev,
530 					(struct qdf_vbus_resource *)memres,
531 					&mem);
532 #else
533 		status = pfrm_devm_request_and_ioremap(
534 					dev,
535 					(struct qdf_vbus_resource *)memres,
536 					&mem);
537 #endif
538 		if (QDF_IS_STATUS_ERROR(status)) {
539 			hif_err("ath: ioremap error");
540 			ret = PTR_ERR(mem);
541 			goto err_cleanup1;
542 		}
543 
544 		sc->mem = mem;
545 		pld_set_bar_addr(dev, mem);
546 		ol_sc->mem = mem;
547 		ol_sc->mem_pa = memres->start;
548 	}
549 
550 	ret = pfrm_dma_set_mask(dev, 32);
551 	if (ret) {
552 		hif_err("ath: 32-bit DMA not available");
553 		status = QDF_STATUS_E_IO;
554 		goto err_cleanup1;
555 	}
556 
557 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
558 	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
559 #else
560 	ret = pfrm_dma_set_coherent_mask(dev, 32);
561 #endif
562 	if (ret) {
563 		hif_err("Failed to set dma mask error = %d", ret);
564 		return QDF_STATUS_E_IO;
565 	}
566 
567 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
568 
569 	tgt_info->target_type = target_type;
570 	hif_register_tbl_attach(ol_sc, hif_type);
571 	hif_target_register_tbl_attach(ol_sc, target_type);
572 	/*
573 	 * In QCA5018 CE region moved to SOC outside WCSS block.
574 	 * Allocate separate I/O remap to access CE registers.
575 	 */
576 	if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
577 	    tgt_info->target_type == TARGET_TYPE_QCA5332) {
578 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
579 
580 		sc->mem_ce = qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE);
581 		if (IS_ERR(sc->mem_ce)) {
582 			hif_err("CE: ioremap failed");
583 			return QDF_STATUS_E_IO;
584 		}
585 		ol_sc->mem_ce = sc->mem_ce;
586 	}
587 
588 	if (tgt_info->target_type == TARGET_TYPE_QCA5332) {
589 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
590 
591 		/*
592 		 * In QCA5332 CMEM region is outside WCSS block.
593 		 * Allocate separate I/O remap to access CMEM address.
594 		 */
595 		sc->mem_cmem = qdf_ioremap(HOST_CMEM_ADDRESS, HOST_CMEM_SIZE);
596 		if (IS_ERR(sc->mem_cmem)) {
597 			hif_err("CE: ioremap failed");
598 			return QDF_STATUS_E_IO;
599 		}
600 		ol_sc->mem_cmem = sc->mem_cmem;
601 
602 		/*
603 		 * PMM SCRATCH Register for QCA5332
604 		 */
605 		sc->mem_pmm_base = qdf_ioremap(PMM_SCRATCH_BASE,
606 						   PMM_SCRATCH_SIZE);
607 		if (IS_ERR(sc->mem_pmm_base)) {
608 			hif_err("CE: ioremap failed");
609 			return QDF_STATUS_E_IO;
610 		}
611 		ol_sc->mem_pmm_base = sc->mem_pmm_base;
612 	}
613 
614 	hif_info("X - hif_type = 0x%x, target_type = 0x%x",
615 		hif_type, target_type);
616 
617 	return QDF_STATUS_SUCCESS;
618 err_cleanup1:
619 	return status;
620 }
621 
622 /**
623  * hif_ahb_nointrs() - disable IRQ
624  *
625  * @scn: struct hif_softc
626  *
627  * This function stops interrupt(s)
628  *
629  * Return: none
630  */
631 void hif_ahb_nointrs(struct hif_softc *scn)
632 {
633 	int i;
634 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
635 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
636 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
637 
638 	scn->free_irq_done = true;
639 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
640 
641 	if (scn->request_irq_done == false)
642 		return;
643 
644 	if (sc->num_msi_intrs > 0) {
645 		/* MSI interrupt(s) */
646 		for (i = 0; i < sc->num_msi_intrs; i++) {
647 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
648 		}
649 		sc->num_msi_intrs = 0;
650 	} else {
651 		if (!scn->per_ce_irq) {
652 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
653 		} else {
654 			for (i = 0; i < scn->ce_count; i++) {
655 				if (host_ce_conf[i].flags
656 						& CE_ATTR_DISABLE_INTR)
657 					continue;
658 				if (!hif_state->tasklets[i].inited)
659 					continue;
660 				pfrm_free_irq(
661 					scn->qdf_dev->dev,
662 					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
663 					&hif_state->tasklets[i]);
664 			}
665 			hif_ahb_deconfigure_grp_irq(scn);
666 		}
667 	}
668 	scn->request_irq_done = false;
669 
670 }
671 
672 /**
673  * hif_ahb_irq_enable() - enable copy engine IRQ
674  * @scn: struct hif_softc
675  * @ce_id: ce_id
676  *
677  * This function enables the interrupt for the radio.
678  *
679  * Return: N/A
680  */
681 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
682 {
683 	uint32_t regval;
684 	uint32_t reg_offset = 0;
685 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
686 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
687 	struct hif_target_info *tgt_info = &scn->target_info;
688 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
689 
690 	if (scn->per_ce_irq) {
691 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
692 			reg_offset = HOST_IE_ADDRESS;
693 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
694 			regval = hif_read32_mb(scn, mem + reg_offset);
695 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
696 			hif_write32_mb(scn, mem + reg_offset, regval);
697 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
698 		}
699 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
700 			reg_offset = HOST_IE_ADDRESS_2;
701 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
702 			regval = hif_read32_mb(scn, mem + reg_offset);
703 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
704 			hif_write32_mb(scn, mem + reg_offset, regval);
705 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
706 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
707 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
708 			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
709 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
710 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
711 				/* Enable destination ring interrupts for
712 				 * 8074, 8074V2, 6018 and 50xx
713 				 */
714 				regval = hif_read32_mb(scn, mem +
715 					HOST_IE_ADDRESS_3);
716 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
717 
718 				hif_write32_mb(scn, mem +
719 					       HOST_IE_ADDRESS_3, regval);
720 			}
721 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
722 		}
723 	} else {
724 		hif_pci_irq_enable(scn, ce_id);
725 	}
726 }
727 
728 /**
729  * hif_ahb_irq_disable() - disable copy engine IRQ
730  * @scn: struct hif_softc
731  * @ce_id: ce_id
732  *
733  * Return: N/A
734  */
735 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
736 {
737 	uint32_t regval;
738 	uint32_t reg_offset = 0;
739 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
740 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
741 	struct hif_target_info *tgt_info = &scn->target_info;
742 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
743 
744 	if (scn->per_ce_irq) {
745 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
746 			reg_offset = HOST_IE_ADDRESS;
747 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
748 			regval = hif_read32_mb(scn, mem + reg_offset);
749 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
750 			hif_write32_mb(scn, mem + reg_offset, regval);
751 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
752 		}
753 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
754 			reg_offset = HOST_IE_ADDRESS_2;
755 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
756 			regval = hif_read32_mb(scn, mem + reg_offset);
757 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
758 			hif_write32_mb(scn, mem + reg_offset, regval);
759 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
760 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
761 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
762 			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
763 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
764 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
765 				/* Disable destination ring interrupts for
766 				 * 8074, 8074V2, 6018 and 50xx
767 				 */
768 				regval = hif_read32_mb(scn, mem +
769 					HOST_IE_ADDRESS_3);
770 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
771 
772 				hif_write32_mb(scn, mem +
773 					       HOST_IE_ADDRESS_3, regval);
774 			}
775 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
776 		}
777 	}
778 }
779 
780 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
781 {
782 	int i;
783 
784 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
785 	if (hif_ext_group->irq_enabled) {
786 		for (i = 0; i < hif_ext_group->numirq; i++) {
787 			disable_irq_nosync(hif_ext_group->os_irq[i]);
788 		}
789 		hif_ext_group->irq_enabled = false;
790 	}
791 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
792 }
793 
794 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
795 {
796 	int i;
797 
798 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
799 	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
800 		for (i = 0; i < hif_ext_group->numirq; i++) {
801 			enable_irq(hif_ext_group->os_irq[i]);
802 		}
803 		hif_ext_group->irq_enabled = true;
804 	}
805 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
806 }
807 
808 /**
809  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
810  * @scn: hif context
811  *
812  * Return: true if soc needs driver bmi otherwise false
813  */
814 bool hif_ahb_needs_bmi(struct hif_softc *scn)
815 {
816 	return !ce_srng_based(scn);
817 }
818 
819 /**
820  * hif_display_ahb_irq_regs() - prints the host interrupt enable (IE) regs
821  * @scn: hif context
822  *
823  * Return: None
824  */
825 
826 void hif_display_ahb_irq_regs(struct hif_softc *scn)
827 {
828 	uint32_t regval;
829 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
830 	struct hif_target_info *tgt_info = &scn->target_info;
831 
832 	if (tgt_info->target_type == TARGET_TYPE_QCN6122 ||
833 	    tgt_info->target_type == TARGET_TYPE_QCN9160 ||
834 	    tgt_info->target_type == TARGET_TYPE_QCN6432) {
835 		return;
836 	}
837 	if (scn->per_ce_irq) {
838 		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS);
839 		hif_nofl_err("IRQ enable register value 0x%08x", regval);
840 
841 		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS_2);
842 		hif_nofl_err("IRQ enable register 2 value 0x%08x", regval);
843 
844 		if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
845 		    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
846 		    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
847 		    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
848 		    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
849 		    tgt_info->target_type == TARGET_TYPE_QCA6018) {
850 			regval = hif_read32_mb(scn, mem +
851 					       HOST_IE_ADDRESS_3);
852 			hif_nofl_err("IRQ enable register 3 value 0x%08x",
853 				     regval);
854 		}
855 	}
856 }
857 
858 void hif_ahb_display_stats(struct hif_softc *scn)
859 {
860 	if (!scn) {
861 		hif_err("hif_scn null");
862 		return;
863 	}
864 	hif_display_ahb_irq_regs(scn);
865 	hif_display_ce_stats(scn);
866 }
867 
868 void hif_ahb_clear_stats(struct hif_softc *scn)
869 {
870 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
871 
872 	if (!hif_state) {
873 		hif_err("hif_state null");
874 		return;
875 	}
876 	hif_clear_ce_stats(hif_state);
877 }
878