xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: if_ahb.c
21  *
22  * c file for ahb specific implementations.
23  */
24 
25 #include "hif.h"
26 #include "target_type.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_io32.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_tasklet.h"
33 #include "if_ahb.h"
34 #include "if_pci.h"
35 #include "ahb_api.h"
36 #include "pci_api.h"
37 #include "hif_napi.h"
38 #include "qal_vbus_dev.h"
39 #include "qdf_irq.h"
40 
41 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
42 #define IRQF_DISABLED 0x00000020
43 #endif
44 
45 #define HIF_IC_CE0_IRQ_OFFSET 4
46 #define HIF_IC_MAX_IRQ 52
47 
48 static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
49 /* integrated chip irq names */
50 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
51 "misc-pulse1",
52 "misc-latch",
53 "sw-exception",
54 "watchdog",
55 "ce0",
56 "ce1",
57 "ce2",
58 "ce3",
59 "ce4",
60 "ce5",
61 "ce6",
62 "ce7",
63 "ce8",
64 "ce9",
65 "ce10",
66 "ce11",
67 "host2wbm-desc-feed",
68 "host2reo-re-injection",
69 "host2reo-command",
70 "host2rxdma-monitor-ring3",
71 "host2rxdma-monitor-ring2",
72 "host2rxdma-monitor-ring1",
73 "reo2ost-exception",
74 "wbm2host-rx-release",
75 "reo2host-status",
76 "reo2host-destination-ring4",
77 "reo2host-destination-ring3",
78 "reo2host-destination-ring2",
79 "reo2host-destination-ring1",
80 "rxdma2host-monitor-destination-mac3",
81 "rxdma2host-monitor-destination-mac2",
82 "rxdma2host-monitor-destination-mac1",
83 "ppdu-end-interrupts-mac3",
84 "ppdu-end-interrupts-mac2",
85 "ppdu-end-interrupts-mac1",
86 "rxdma2host-monitor-status-ring-mac3",
87 "rxdma2host-monitor-status-ring-mac2",
88 "rxdma2host-monitor-status-ring-mac1",
89 "host2rxdma-host-buf-ring-mac3",
90 "host2rxdma-host-buf-ring-mac2",
91 "host2rxdma-host-buf-ring-mac1",
92 "rxdma2host-destination-ring-mac3",
93 "rxdma2host-destination-ring-mac2",
94 "rxdma2host-destination-ring-mac1",
95 "host2tcl-input-ring4",
96 "host2tcl-input-ring3",
97 "host2tcl-input-ring2",
98 "host2tcl-input-ring1",
99 "wbm2host-tx-completions-ring3",
100 "wbm2host-tx-completions-ring2",
101 "wbm2host-tx-completions-ring1",
102 "tcl2host-status-ring",
103 };
104 
105 /** hif_ahb_get_irq_name() - get irqname
106  * This function gives irqnumber to irqname
107  * mapping.
108  *
109  * @irq_no: irq number
110  *
111  * Return: irq name
112  */
113 const char *hif_ahb_get_irq_name(int irq_no)
114 {
115 	return ic_irqname[irq_no];
116 }
117 
118 /**
119  * hif_disable_isr() - disable isr
120  *
121  * This function disables isr and kills tasklets
122  *
123  * @hif_ctx: struct hif_softc
124  *
125  * Return: void
126  */
127 void hif_ahb_disable_isr(struct hif_softc *scn)
128 {
129 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
130 	hif_exec_kill(&scn->osc);
131 	hif_nointrs(scn);
132 	ce_tasklet_kill(scn);
133 	tasklet_kill(&sc->intr_tq);
134 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
135 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
136 }
137 
138 /**
139  * hif_dump_registers() - dump bus debug registers
140  * @scn: struct hif_opaque_softc
141  *
142  * This function dumps hif bus debug registers
143  *
144  * Return: 0 for success or error code
145  */
146 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
147 {
148 	int status;
149 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
150 
151 	status = hif_dump_ce_registers(scn);
152 	if (status)
153 		hif_err("Dump CE Registers Failed status %d", status);
154 
155 	return 0;
156 }
157 
158 /**
159  * hif_ahb_close() - hif_bus_close
160  * @scn: pointer to the hif context.
161  *
162  * This is a callback function for hif_bus_close.
163  *
164  *
165  * Return: n/a
166  */
167 void hif_ahb_close(struct hif_softc *scn)
168 {
169 	hif_ce_close(scn);
170 }
171 
172 /**
173  * hif_bus_open() - hif_ahb open
174  * @hif_ctx: hif context
175  * @bus_type: bus type
176  *
177  * This is a callback function for hif_bus_open.
178  *
179  * Return: n/a
180  */
181 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
182 {
183 
184 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
185 
186 	qdf_spinlock_create(&sc->irq_lock);
187 	return hif_ce_open(hif_ctx);
188 }
189 
190 /**
191  * hif_bus_configure() - Configure the bus
192  * @scn: pointer to the hif context.
193  *
194  * This function configure the ahb bus
195  *
196  * return: 0 for success. nonzero for failure.
197  */
198 int hif_ahb_bus_configure(struct hif_softc *scn)
199 {
200 	return hif_pci_bus_configure(scn);
201 }
202 
203 /**
204  * hif_configure_msi_ahb - Configure MSI interrupts
205  * @sc : pointer to the hif context
206  *
207  * return: 0 for success. nonzero for failure.
208  */
209 
210 int hif_configure_msi_ahb(struct hif_pci_softc *sc)
211 {
212 	return 0;
213 }
214 
215 /**
216  * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ
217  * @sc: pointer to the hif context.
218  *
219  * This function registers the irq handler and enables legacy interrupts
220  *
221  * return: 0 for success. nonzero for failure.
222  */
223 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
224 {
225 	int ret = 0;
226 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
227 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
228 	int irq = 0;
229 
230 	/* do not support MSI or MSI IRQ failed */
231 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
232 	qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev, "legacy", &irq);
233 	if (irq < 0) {
234 		dev_err(&pdev->dev, "Unable to get irq\n");
235 		ret = -EFAULT;
236 		goto end;
237 	}
238 	ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler,
239 				IRQF_DISABLED, "wlan_ahb", sc);
240 	if (ret) {
241 		dev_err(&pdev->dev, "ath_request_irq failed\n");
242 		ret = -EFAULT;
243 		goto end;
244 	}
245 	sc->irq = irq;
246 
247 	/* Use Legacy PCI Interrupts */
248 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
249 				PCIE_INTR_ENABLE_ADDRESS),
250 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
251 	/* read once to flush */
252 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
253 				PCIE_INTR_ENABLE_ADDRESS));
254 
255 end:
256 	return ret;
257 }
258 
259 static void hif_ahb_get_soc_info_pld(struct hif_pci_softc *sc,
260 				     struct device *dev)
261 {
262 	struct pld_soc_info info;
263 	int ret = 0;
264 
265 	ret = pld_get_soc_info(dev, &info);
266 	sc->mem = info.v_addr;
267 	sc->ce_sc.ol_sc.mem    = info.v_addr;
268 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
269 }
270 
271 int hif_ahb_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
272 {
273 	int ret = 0;
274 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
275 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
276 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
277 	int irq = 0;
278 
279 	if (ce_id >= CE_COUNT_MAX)
280 		return -EINVAL;
281 
282 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
283 			   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
284 			   HIF_IC_CE0_IRQ_OFFSET + ce_id, &irq);
285 	if (ret) {
286 		dev_err(&pdev->dev, "get irq failed\n");
287 		ret = -EFAULT;
288 		goto end;
289 	}
290 
291 	ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + ce_id] = irq;
292 	ret = pfrm_request_irq(&pdev->dev, irq,
293 			       hif_ahb_interrupt_handler,
294 			       IRQF_TRIGGER_RISING,
295 			       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
296 			       &hif_state->tasklets[ce_id]);
297 	if (ret) {
298 		dev_err(&pdev->dev, "ath_request_irq failed\n");
299 		ret = -EFAULT;
300 		goto end;
301 	}
302 	hif_ahb_irq_enable(scn, ce_id);
303 
304 end:
305 	return ret;
306 }
307 
308 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
309 {
310 	int ret = 0;
311 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
312 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
313 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
314 	int i;
315 
316 	/* configure per CE interrupts */
317 	for (i = 0; i < scn->ce_count; i++) {
318 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
319 			continue;
320 
321 		if (host_ce_conf[i].flags & CE_ATTR_INIT_ON_DEMAND)
322 			continue;
323 
324 		ret = hif_ahb_configure_irq_by_ceid(scn, i);
325 		if (ret)
326 			goto end;
327 	}
328 
329 end:
330 	return ret;
331 }
332 
333 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
334 			      struct hif_exec_context *hif_ext_group)
335 {
336 	int ret = 0;
337 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
338 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
339 	int irq = 0;
340 	int j;
341 
342 	/* configure external interrupts */
343 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
344 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
345 	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
346 	hif_ext_group->work_complete = &hif_dummy_grp_done;
347 
348 	for (j = 0; j < hif_ext_group->numirq; j++) {
349 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
350 				   ic_irqname[hif_ext_group->irq[j]],
351 				   hif_ext_group->irq[j], &irq);
352 		if (ret) {
353 			dev_err(&pdev->dev, "get irq failed\n");
354 			ret = -EFAULT;
355 			goto end;
356 		}
357 		ic_irqnum[hif_ext_group->irq[j]] = irq;
358 		hif_ext_group->os_irq[j] = irq;
359 	}
360 
361 	for (j = 0; j < hif_ext_group->numirq; j++) {
362 		irq = hif_ext_group->os_irq[j];
363 
364 		qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
365 		qdf_dev_set_irq_status_flags(irq, QDF_IRQ_DISABLE_UNLAZY);
366 		qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
367 
368 		ret = pfrm_request_irq(scn->qdf_dev->dev,
369 				       irq, hif_ext_group_interrupt_handler,
370 				       IRQF_TRIGGER_RISING,
371 				       ic_irqname[hif_ext_group->irq[j]],
372 				       hif_ext_group);
373 		if (ret) {
374 			dev_err(&pdev->dev, "ath_request_irq failed\n");
375 			ret = -EFAULT;
376 			goto end;
377 		}
378 	}
379 
380 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
381 	hif_ext_group->irq_requested = true;
382 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
383 end:
384 	return ret;
385 }
386 
387 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
388 {
389 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
390 	struct hif_exec_context *hif_ext_group;
391 	int i, j;
392 	int irq = 0;
393 
394 	/* configure external interrupts */
395 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
396 		hif_ext_group = hif_state->hif_ext_group[i];
397 		if (hif_ext_group->irq_requested == true) {
398 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
399 			hif_ext_group->irq_requested = false;
400 			for (j = 0; j < hif_ext_group->numirq; j++) {
401 				irq = hif_ext_group->os_irq[j];
402 				hif_ext_group->irq_enabled = false;
403 				qdf_dev_clear_irq_status_flags(
404 							irq,
405 							QDF_IRQ_DISABLE_UNLAZY);
406 			}
407 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
408 
409 			/* Avoid holding the irq_lock while freeing the irq
410 			 * as the same lock is being held by the irq handler
411 			 * while disabling the irq. This causes a deadlock
412 			 * between free_irq and irq_handler.
413 			 */
414 			for (j = 0; j < hif_ext_group->numirq; j++) {
415 				irq = hif_ext_group->os_irq[j];
416 				pfrm_free_irq(scn->qdf_dev->dev,
417 					      irq, hif_ext_group);
418 			}
419 		}
420 	}
421 }
422 
423 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
424 {
425 	struct ce_tasklet_entry *tasklet_entry = context;
426 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
427 }
428 
429 /**
430  * hif_target_sync() : ensure the target is ready
431  * @scn: hif control structure
432  *
433  * Informs fw that we plan to use legacy interupts so that
434  * it can begin booting. Ensures that the fw finishes booting
435  * before continuing. Should be called before trying to write
436  * to the targets other registers for the first time.
437  *
438  * Return: none
439  */
440 int hif_target_sync_ahb(struct hif_softc *scn)
441 {
442 	int val = 0;
443 	int limit = 0;
444 
445 	while (limit < 50) {
446 		hif_write32_mb(scn, scn->mem +
447 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
448 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
449 		qdf_mdelay(10);
450 		val = hif_read32_mb(scn, scn->mem +
451 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
452 		if (val == 0)
453 			break;
454 		limit++;
455 	}
456 	hif_write32_mb(scn, scn->mem +
457 		(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
458 		PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
459 	hif_write32_mb(scn, scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
460 	if (HAS_FW_INDICATOR) {
461 		int wait_limit = 500;
462 		int fw_ind = 0;
463 
464 		while (1) {
465 			fw_ind = hif_read32_mb(scn, scn->mem +
466 					FW_INDICATOR_ADDRESS);
467 			if (fw_ind & FW_IND_INITIALIZED)
468 				break;
469 			if (wait_limit-- < 0)
470 				break;
471 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
472 				PCIE_INTR_ENABLE_ADDRESS),
473 				PCIE_INTR_FIRMWARE_MASK);
474 			qdf_mdelay(10);
475 		}
476 		if (wait_limit < 0) {
477 			hif_info("FW signal timed out");
478 			return -EIO;
479 		}
480 		hif_info("Got FW signal, retries = %x", 500-wait_limit);
481 	}
482 
483 	return 0;
484 }
485 
486 /**
487  * hif_disable_bus() - Disable the bus
488  * @scn : pointer to the hif context
489  *
490  * This function disables the bus and helds the target in reset state
491  *
492  * Return: none
493  */
494 void hif_ahb_disable_bus(struct hif_softc *scn)
495 {
496 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
497 	void __iomem *mem;
498 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
499 	struct resource *memres = NULL;
500 	int mem_pa_size = 0;
501 	struct hif_target_info *tgt_info = NULL;
502 	struct qdf_vbus_resource *vmres = NULL;
503 	QDF_STATUS status;
504 
505 	tgt_info = &scn->target_info;
506 	/*Disable WIFI clock input*/
507 	if (sc->mem) {
508 		status = pfrm_platform_get_resource(
509 				scn->qdf_dev->dev,
510 				(struct qdf_pfm_hndl *)pdev, &vmres,
511 				IORESOURCE_MEM, 0);
512 		if (QDF_IS_STATUS_ERROR(status)) {
513 			hif_info("Failed to get IORESOURCE_MEM");
514 			return;
515 		}
516 		memres = (struct resource *)vmres;
517 		if (memres)
518 			mem_pa_size = memres->end - memres->start + 1;
519 
520 		/* Should not be executed on 8074 platform */
521 		if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
522 		    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
523 		    (tgt_info->target_type != TARGET_TYPE_QCA9574) &&
524 		    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
525 		    (tgt_info->target_type != TARGET_TYPE_QCN6122) &&
526 		    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
527 			hif_ahb_clk_enable_disable(&pdev->dev, 0);
528 
529 			hif_ahb_device_reset(scn);
530 		}
531 		if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
532 			iounmap(sc->mem_ce);
533 			sc->mem_ce = NULL;
534 			scn->mem_ce = NULL;
535 		}
536 		mem = (void __iomem *)sc->mem;
537 		if (mem) {
538 			pfrm_devm_iounmap(&pdev->dev, mem);
539 			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
540 						     mem_pa_size);
541 			sc->mem = NULL;
542 		}
543 	}
544 	scn->mem = NULL;
545 }
546 
547 /**
548  * hif_enable_bus() - Enable the bus
549  * @dev: dev
550  * @bdev: bus dev
551  * @bid: bus id
552  * @type: bus type
553  *
554  * This function enables the radio bus by enabling necessary
555  * clocks and waits for the target to get ready to proceed futher
556  *
557  * Return: QDF_STATUS
558  */
559 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
560 		struct device *dev, void *bdev,
561 		const struct hif_bus_id *bid,
562 		enum hif_enable_type type)
563 {
564 	int ret = 0;
565 	int hif_type;
566 	int target_type;
567 	const struct platform_device_id *id = (struct platform_device_id *)bid;
568 	struct platform_device *pdev = bdev;
569 	struct hif_target_info *tgt_info = NULL;
570 	struct resource *memres = NULL;
571 	void __iomem *mem = NULL;
572 	uint32_t revision_id = 0;
573 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
574 	QDF_STATUS status;
575 	struct qdf_vbus_resource *vmres = NULL;
576 
577 	sc->pdev = (struct pci_dev *)pdev;
578 	sc->dev = &pdev->dev;
579 	sc->devid = id->driver_data;
580 
581 	ret = hif_get_device_type(id->driver_data, revision_id,
582 			&hif_type, &target_type);
583 	if (ret < 0) {
584 		hif_err("Invalid device ret %d id %d revision_id %d",
585 			ret, (int)id->driver_data, revision_id);
586 		return QDF_STATUS_E_FAILURE;
587 	}
588 
589 	if (target_type == TARGET_TYPE_QCN6122) {
590 		hif_ahb_get_soc_info_pld(sc, dev);
591 		hif_update_irq_ops_with_pci(ol_sc);
592 	} else {
593 		status = pfrm_platform_get_resource(&pdev->dev,
594 						    (struct qdf_pfm_hndl *)pdev,
595 						    &vmres,
596 						    IORESOURCE_MEM, 0);
597 		if (QDF_IS_STATUS_ERROR(status)) {
598 			hif_err("Failed to get IORESOURCE_MEM");
599 			return status;
600 		}
601 		memres = (struct resource *)vmres;
602 		if (!memres) {
603 			hif_err("Failed to get IORESOURCE_MEM");
604 			return QDF_STATUS_E_IO;
605 		}
606 
607 		/* Arrange for access to Target SoC registers. */
608 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
609 		status = pfrm_devm_ioremap_resource(
610 					dev,
611 					(struct qdf_vbus_resource *)memres,
612 					&mem);
613 #else
614 		status = pfrm_devm_request_and_ioremap(
615 					dev,
616 					(struct qdf_vbus_resource *)memres,
617 					&mem);
618 #endif
619 		if (QDF_IS_STATUS_ERROR(status)) {
620 			hif_err("ath: ioremap error");
621 			ret = PTR_ERR(mem);
622 			goto err_cleanup1;
623 		}
624 
625 		sc->mem = mem;
626 		ol_sc->mem = mem;
627 		ol_sc->mem_pa = memres->start;
628 	}
629 
630 	ret = pfrm_dma_set_mask(dev, 32);
631 	if (ret) {
632 		hif_err("ath: 32-bit DMA not available");
633 		status = QDF_STATUS_E_IO;
634 		goto err_cleanup1;
635 	}
636 
637 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
638 	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
639 #else
640 	ret = pfrm_dma_set_coherent_mask(dev, 32);
641 #endif
642 	if (ret) {
643 		hif_err("Failed to set dma mask error = %d", ret);
644 		return QDF_STATUS_E_IO;
645 	}
646 
647 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
648 
649 	tgt_info->target_type = target_type;
650 	hif_register_tbl_attach(ol_sc, hif_type);
651 	hif_target_register_tbl_attach(ol_sc, target_type);
652 	/*
653 	 * In QCA5018 CE region moved to SOC outside WCSS block.
654 	 * Allocate separate I/O remap to access CE registers.
655 	 */
656 	if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
657 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
658 
659 		sc->mem_ce = ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE);
660 		if (IS_ERR(sc->mem_ce)) {
661 			hif_err("CE: ioremap failed");
662 			return QDF_STATUS_E_IO;
663 		}
664 		ol_sc->mem_ce = sc->mem_ce;
665 	}
666 
667 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
668 			(tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
669 			(tgt_info->target_type != TARGET_TYPE_QCA9574) &&
670 			(tgt_info->target_type != TARGET_TYPE_QCA5018) &&
671 			(tgt_info->target_type != TARGET_TYPE_QCN6122) &&
672 			(tgt_info->target_type != TARGET_TYPE_QCA6018)) {
673 		if (hif_ahb_enable_radio(sc, pdev, id) != 0) {
674 			hif_err("error in enabling soc");
675 			return QDF_STATUS_E_IO;
676 		}
677 
678 		if (hif_target_sync_ahb(ol_sc) < 0) {
679 			status = QDF_STATUS_E_IO;
680 			goto err_target_sync;
681 		}
682 	}
683 	hif_info("X - hif_type = 0x%x, target_type = 0x%x",
684 		hif_type, target_type);
685 
686 	return QDF_STATUS_SUCCESS;
687 err_target_sync:
688 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
689 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
690 	    (tgt_info->target_type != TARGET_TYPE_QCA9574) &&
691 	    (tgt_info->target_type != TARGET_TYPE_QCN6122) &&
692 	    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
693 	    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
694 		hif_err("Disabling target");
695 		hif_ahb_disable_bus(ol_sc);
696 	}
697 err_cleanup1:
698 	return status;
699 }
700 
701 
702 /**
703  * hif_reset_soc() - reset soc
704  *
705  * @hif_ctx: HIF context
706  *
707  * This function resets soc and helds the
708  * target in reset state
709  *
710  * Return: void
711  */
712 /* Function to reset SoC */
713 void hif_ahb_reset_soc(struct hif_softc *hif_ctx)
714 {
715 	hif_ahb_device_reset(hif_ctx);
716 }
717 
718 
719 /**
720  * hif_nointrs() - disable IRQ
721  *
722  * @scn: struct hif_softc
723  *
724  * This function stops interrupt(s)
725  *
726  * Return: none
727  */
728 void hif_ahb_nointrs(struct hif_softc *scn)
729 {
730 	int i;
731 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
732 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
733 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
734 
735 	scn->free_irq_done = true;
736 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
737 
738 	if (scn->request_irq_done == false)
739 		return;
740 
741 	if (sc->num_msi_intrs > 0) {
742 		/* MSI interrupt(s) */
743 		for (i = 0; i < sc->num_msi_intrs; i++) {
744 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
745 		}
746 		sc->num_msi_intrs = 0;
747 	} else {
748 		if (!scn->per_ce_irq) {
749 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
750 		} else {
751 			for (i = 0; i < scn->ce_count; i++) {
752 				if (host_ce_conf[i].flags
753 						& CE_ATTR_DISABLE_INTR)
754 					continue;
755 				if (!hif_state->tasklets[i].inited)
756 					continue;
757 				pfrm_free_irq(
758 					scn->qdf_dev->dev,
759 					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
760 					&hif_state->tasklets[i]);
761 			}
762 			hif_ahb_deconfigure_grp_irq(scn);
763 		}
764 	}
765 	scn->request_irq_done = false;
766 
767 }
768 
769 /**
770  * ce_irq_enable() - enable copy engine IRQ
771  * @scn: struct hif_softc
772  * @ce_id: ce_id
773  *
774  * This function enables the interrupt for the radio.
775  *
776  * Return: N/A
777  */
778 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
779 {
780 	uint32_t regval;
781 	uint32_t reg_offset = 0;
782 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
783 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
784 	struct hif_target_info *tgt_info = &scn->target_info;
785 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
786 
787 	if (scn->per_ce_irq) {
788 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
789 			reg_offset = HOST_IE_ADDRESS;
790 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
791 			regval = hif_read32_mb(scn, mem + reg_offset);
792 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
793 			hif_write32_mb(scn, mem + reg_offset, regval);
794 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
795 		}
796 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
797 			reg_offset = HOST_IE_ADDRESS_2;
798 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
799 			regval = hif_read32_mb(scn, mem + reg_offset);
800 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
801 			hif_write32_mb(scn, mem + reg_offset, regval);
802 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
803 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
804 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
805 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
806 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
807 				/* Enable destination ring interrupts for
808 				 * 8074, 8074V2, 6018 and 50xx
809 				 */
810 				regval = hif_read32_mb(scn, mem +
811 					HOST_IE_ADDRESS_3);
812 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
813 
814 				hif_write32_mb(scn, mem +
815 					       HOST_IE_ADDRESS_3, regval);
816 			}
817 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
818 		}
819 	} else {
820 		hif_pci_irq_enable(scn, ce_id);
821 	}
822 }
823 
824 /**
825  * ce_irq_disable() - disable copy engine IRQ
826  * @scn: struct hif_softc
827  * @ce_id: ce_id
828  *
829  * Return: N/A
830  */
831 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
832 {
833 	uint32_t regval;
834 	uint32_t reg_offset = 0;
835 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
836 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
837 	struct hif_target_info *tgt_info = &scn->target_info;
838 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
839 
840 	if (scn->per_ce_irq) {
841 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
842 			reg_offset = HOST_IE_ADDRESS;
843 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
844 			regval = hif_read32_mb(scn, mem + reg_offset);
845 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
846 			hif_write32_mb(scn, mem + reg_offset, regval);
847 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
848 		}
849 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
850 			reg_offset = HOST_IE_ADDRESS_2;
851 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
852 			regval = hif_read32_mb(scn, mem + reg_offset);
853 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
854 			hif_write32_mb(scn, mem + reg_offset, regval);
855 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
856 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
857 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
858 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
859 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
860 				/* Disable destination ring interrupts for
861 				 * 8074, 8074V2, 6018 and 50xx
862 				 */
863 				regval = hif_read32_mb(scn, mem +
864 					HOST_IE_ADDRESS_3);
865 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
866 
867 				hif_write32_mb(scn, mem +
868 					       HOST_IE_ADDRESS_3, regval);
869 			}
870 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
871 		}
872 	}
873 }
874 
875 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
876 {
877 	int i;
878 
879 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
880 	if (hif_ext_group->irq_enabled) {
881 		for (i = 0; i < hif_ext_group->numirq; i++) {
882 			disable_irq_nosync(hif_ext_group->os_irq[i]);
883 		}
884 		hif_ext_group->irq_enabled = false;
885 	}
886 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
887 }
888 
889 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
890 {
891 	int i;
892 
893 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
894 	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
895 		for (i = 0; i < hif_ext_group->numirq; i++) {
896 			enable_irq(hif_ext_group->os_irq[i]);
897 		}
898 		hif_ext_group->irq_enabled = true;
899 	}
900 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
901 }
902 
903 /**
904  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
905  * @scn: hif context
906  *
907  * Return: true if soc needs driver bmi otherwise false
908  */
909 bool hif_ahb_needs_bmi(struct hif_softc *scn)
910 {
911 	return !ce_srng_based(scn);
912 }
913 
914 void hif_ahb_display_stats(struct hif_softc *scn)
915 {
916 	if (!scn) {
917 		hif_err("hif_scn null");
918 		return;
919 	}
920 	hif_display_ce_stats(scn);
921 }
922 
923 void hif_ahb_clear_stats(struct hif_softc *scn)
924 {
925 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
926 
927 	if (!hif_state) {
928 		hif_err("hif_state null");
929 		return;
930 	}
931 	hif_clear_ce_stats(hif_state);
932 }
933