xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 97f44cd39e4ff816eaa1710279d28cf6b9e65ad9)
1 /*
2  * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /**
20  * DOC: if_ahb.c
21  *
22  * c file for ahb specific implementations.
23  */
24 
25 #include "hif.h"
26 #include "target_type.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hif_io32.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_tasklet.h"
33 #include "if_ahb.h"
34 #include "if_pci.h"
35 #include "ahb_api.h"
36 #include "pci_api.h"
37 #include "hif_napi.h"
38 #include "qal_vbus_dev.h"
39 
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
41 #define IRQF_DISABLED 0x00000020
42 #endif
43 
44 #define HIF_IC_CE0_IRQ_OFFSET 4
45 #define HIF_IC_MAX_IRQ 52
46 
47 static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
48 /* integrated chip irq names */
49 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
50 "misc-pulse1",
51 "misc-latch",
52 "sw-exception",
53 "watchdog",
54 "ce0",
55 "ce1",
56 "ce2",
57 "ce3",
58 "ce4",
59 "ce5",
60 "ce6",
61 "ce7",
62 "ce8",
63 "ce9",
64 "ce10",
65 "ce11",
66 "host2wbm-desc-feed",
67 "host2reo-re-injection",
68 "host2reo-command",
69 "host2rxdma-monitor-ring3",
70 "host2rxdma-monitor-ring2",
71 "host2rxdma-monitor-ring1",
72 "reo2ost-exception",
73 "wbm2host-rx-release",
74 "reo2host-status",
75 "reo2host-destination-ring4",
76 "reo2host-destination-ring3",
77 "reo2host-destination-ring2",
78 "reo2host-destination-ring1",
79 "rxdma2host-monitor-destination-mac3",
80 "rxdma2host-monitor-destination-mac2",
81 "rxdma2host-monitor-destination-mac1",
82 "ppdu-end-interrupts-mac3",
83 "ppdu-end-interrupts-mac2",
84 "ppdu-end-interrupts-mac1",
85 "rxdma2host-monitor-status-ring-mac3",
86 "rxdma2host-monitor-status-ring-mac2",
87 "rxdma2host-monitor-status-ring-mac1",
88 "host2rxdma-host-buf-ring-mac3",
89 "host2rxdma-host-buf-ring-mac2",
90 "host2rxdma-host-buf-ring-mac1",
91 "rxdma2host-destination-ring-mac3",
92 "rxdma2host-destination-ring-mac2",
93 "rxdma2host-destination-ring-mac1",
94 "host2tcl-input-ring4",
95 "host2tcl-input-ring3",
96 "host2tcl-input-ring2",
97 "host2tcl-input-ring1",
98 "wbm2host-tx-completions-ring3",
99 "wbm2host-tx-completions-ring2",
100 "wbm2host-tx-completions-ring1",
101 "tcl2host-status-ring",
102 };
103 
104 /** hif_ahb_get_irq_name() - get irqname
105  * This function gives irqnumber to irqname
106  * mapping.
107  *
108  * @irq_no: irq number
109  *
110  * Return: irq name
111  */
112 const char *hif_ahb_get_irq_name(int irq_no)
113 {
114 	return ic_irqname[irq_no];
115 }
116 
117 /**
118  * hif_disable_isr() - disable isr
119  *
120  * This function disables isr and kills tasklets
121  *
122  * @hif_ctx: struct hif_softc
123  *
124  * Return: void
125  */
126 void hif_ahb_disable_isr(struct hif_softc *scn)
127 {
128 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
129 	hif_exec_kill(&scn->osc);
130 	hif_nointrs(scn);
131 	ce_tasklet_kill(scn);
132 	tasklet_kill(&sc->intr_tq);
133 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
134 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
135 }
136 
137 /**
138  * hif_dump_registers() - dump bus debug registers
139  * @scn: struct hif_opaque_softc
140  *
141  * This function dumps hif bus debug registers
142  *
143  * Return: 0 for success or error code
144  */
145 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
146 {
147 	int status;
148 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
149 
150 	status = hif_dump_ce_registers(scn);
151 	if (status)
152 		hif_err("Dump CE Registers Failed status %d", status);
153 
154 	return 0;
155 }
156 
157 /**
158  * hif_ahb_close() - hif_bus_close
159  * @scn: pointer to the hif context.
160  *
161  * This is a callback function for hif_bus_close.
162  *
163  *
164  * Return: n/a
165  */
166 void hif_ahb_close(struct hif_softc *scn)
167 {
168 	hif_ce_close(scn);
169 }
170 
171 /**
172  * hif_bus_open() - hif_ahb open
173  * @hif_ctx: hif context
174  * @bus_type: bus type
175  *
176  * This is a callback function for hif_bus_open.
177  *
178  * Return: n/a
179  */
180 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
181 {
182 
183 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
184 
185 	qdf_spinlock_create(&sc->irq_lock);
186 	return hif_ce_open(hif_ctx);
187 }
188 
189 /**
190  * hif_bus_configure() - Configure the bus
191  * @scn: pointer to the hif context.
192  *
193  * This function configure the ahb bus
194  *
195  * return: 0 for success. nonzero for failure.
196  */
197 int hif_ahb_bus_configure(struct hif_softc *scn)
198 {
199 	return hif_pci_bus_configure(scn);
200 }
201 
202 /**
203  * hif_configure_msi_ahb - Configure MSI interrupts
204  * @sc : pointer to the hif context
205  *
206  * return: 0 for success. nonzero for failure.
207  */
208 
209 int hif_configure_msi_ahb(struct hif_pci_softc *sc)
210 {
211 	return 0;
212 }
213 
214 /**
215  * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ
216  * @sc: pointer to the hif context.
217  *
218  * This function registers the irq handler and enables legacy interrupts
219  *
220  * return: 0 for success. nonzero for failure.
221  */
222 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
223 {
224 	int ret = 0;
225 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
226 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
227 	int irq = 0;
228 
229 	/* do not support MSI or MSI IRQ failed */
230 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
231 	qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev, "legacy", &irq);
232 	if (irq < 0) {
233 		dev_err(&pdev->dev, "Unable to get irq\n");
234 		ret = -EFAULT;
235 		goto end;
236 	}
237 	ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler,
238 				IRQF_DISABLED, "wlan_ahb", sc);
239 	if (ret) {
240 		dev_err(&pdev->dev, "ath_request_irq failed\n");
241 		ret = -EFAULT;
242 		goto end;
243 	}
244 	sc->irq = irq;
245 
246 	/* Use Legacy PCI Interrupts */
247 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
248 				PCIE_INTR_ENABLE_ADDRESS),
249 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
250 	/* read once to flush */
251 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
252 				PCIE_INTR_ENABLE_ADDRESS));
253 
254 end:
255 	return ret;
256 }
257 
258 static void hif_ahb_get_soc_info_pld(struct hif_pci_softc *sc,
259 				     struct device *dev)
260 {
261 	struct pld_soc_info info;
262 	int ret = 0;
263 
264 	ret = pld_get_soc_info(dev, &info);
265 	sc->mem = info.v_addr;
266 	sc->ce_sc.ol_sc.mem    = info.v_addr;
267 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
268 }
269 
270 int hif_ahb_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
271 {
272 	int ret = 0;
273 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
274 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
275 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
276 	int irq = 0;
277 
278 	if (ce_id >= CE_COUNT_MAX)
279 		return -EINVAL;
280 
281 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
282 			   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
283 			   HIF_IC_CE0_IRQ_OFFSET + ce_id, &irq);
284 	if (ret) {
285 		dev_err(&pdev->dev, "get irq failed\n");
286 		ret = -EFAULT;
287 		goto end;
288 	}
289 
290 	ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + ce_id] = irq;
291 	ret = pfrm_request_irq(&pdev->dev, irq,
292 			       hif_ahb_interrupt_handler,
293 			       IRQF_TRIGGER_RISING,
294 			       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
295 			       &hif_state->tasklets[ce_id]);
296 	if (ret) {
297 		dev_err(&pdev->dev, "ath_request_irq failed\n");
298 		ret = -EFAULT;
299 		goto end;
300 	}
301 	hif_ahb_irq_enable(scn, ce_id);
302 
303 end:
304 	return ret;
305 }
306 
307 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
308 {
309 	int ret = 0;
310 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
311 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
312 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
313 	int i;
314 
315 	/* configure per CE interrupts */
316 	for (i = 0; i < scn->ce_count; i++) {
317 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
318 			continue;
319 
320 		if (host_ce_conf[i].flags & CE_ATTR_INIT_ON_DEMAND)
321 			continue;
322 
323 		ret = hif_ahb_configure_irq_by_ceid(scn, i);
324 		if (ret)
325 			goto end;
326 	}
327 
328 end:
329 	return ret;
330 }
331 
332 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
333 			      struct hif_exec_context *hif_ext_group)
334 {
335 	int ret = 0;
336 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
337 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
338 	int irq = 0;
339 	int j;
340 
341 	/* configure external interrupts */
342 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
343 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
344 	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
345 	hif_ext_group->work_complete = &hif_dummy_grp_done;
346 
347 	for (j = 0; j < hif_ext_group->numirq; j++) {
348 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
349 				   ic_irqname[hif_ext_group->irq[j]],
350 				   hif_ext_group->irq[j], &irq);
351 		if (ret) {
352 			dev_err(&pdev->dev, "get irq failed\n");
353 			ret = -EFAULT;
354 			goto end;
355 		}
356 		ic_irqnum[hif_ext_group->irq[j]] = irq;
357 		hif_ext_group->os_irq[j] = irq;
358 	}
359 
360 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
361 
362 	for (j = 0; j < hif_ext_group->numirq; j++) {
363 		irq = hif_ext_group->os_irq[j];
364 		irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
365 		ret = pfrm_request_irq(scn->qdf_dev->dev,
366 				       irq, hif_ext_group_interrupt_handler,
367 				       IRQF_TRIGGER_RISING,
368 				       ic_irqname[hif_ext_group->irq[j]],
369 				       hif_ext_group);
370 		if (ret) {
371 			dev_err(&pdev->dev, "ath_request_irq failed\n");
372 			ret = -EFAULT;
373 			goto end;
374 		}
375 	}
376 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
377 
378 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
379 	hif_ext_group->irq_requested = true;
380 
381 end:
382 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
383 	return ret;
384 }
385 
386 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
387 {
388 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
389 	struct hif_exec_context *hif_ext_group;
390 	int i, j;
391 	int irq = 0;
392 
393 	/* configure external interrupts */
394 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
395 		hif_ext_group = hif_state->hif_ext_group[i];
396 		if (hif_ext_group->irq_requested == true) {
397 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
398 			hif_ext_group->irq_requested = false;
399 			for (j = 0; j < hif_ext_group->numirq; j++) {
400 				irq = hif_ext_group->os_irq[j];
401 				hif_ext_group->irq_enabled = false;
402 				irq_clear_status_flags(irq,
403 						       IRQ_DISABLE_UNLAZY);
404 			}
405 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
406 
407 			/* Avoid holding the irq_lock while freeing the irq
408 			 * as the same lock is being held by the irq handler
409 			 * while disabling the irq. This causes a deadlock
410 			 * between free_irq and irq_handler.
411 			 */
412 			for (j = 0; j < hif_ext_group->numirq; j++) {
413 				irq = hif_ext_group->os_irq[j];
414 				pfrm_free_irq(scn->qdf_dev->dev,
415 					      irq, hif_ext_group);
416 			}
417 		}
418 	}
419 }
420 
421 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
422 {
423 	struct ce_tasklet_entry *tasklet_entry = context;
424 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
425 }
426 
427 /**
428  * hif_target_sync() : ensure the target is ready
429  * @scn: hif control structure
430  *
431  * Informs fw that we plan to use legacy interupts so that
432  * it can begin booting. Ensures that the fw finishes booting
433  * before continuing. Should be called before trying to write
434  * to the targets other registers for the first time.
435  *
436  * Return: none
437  */
438 int hif_target_sync_ahb(struct hif_softc *scn)
439 {
440 	int val = 0;
441 	int limit = 0;
442 
443 	while (limit < 50) {
444 		hif_write32_mb(scn, scn->mem +
445 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
446 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
447 		qdf_mdelay(10);
448 		val = hif_read32_mb(scn, scn->mem +
449 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
450 		if (val == 0)
451 			break;
452 		limit++;
453 	}
454 	hif_write32_mb(scn, scn->mem +
455 		(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
456 		PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
457 	hif_write32_mb(scn, scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
458 	if (HAS_FW_INDICATOR) {
459 		int wait_limit = 500;
460 		int fw_ind = 0;
461 
462 		while (1) {
463 			fw_ind = hif_read32_mb(scn, scn->mem +
464 					FW_INDICATOR_ADDRESS);
465 			if (fw_ind & FW_IND_INITIALIZED)
466 				break;
467 			if (wait_limit-- < 0)
468 				break;
469 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
470 				PCIE_INTR_ENABLE_ADDRESS),
471 				PCIE_INTR_FIRMWARE_MASK);
472 			qdf_mdelay(10);
473 		}
474 		if (wait_limit < 0) {
475 			hif_info("FW signal timed out");
476 			return -EIO;
477 		}
478 		hif_info("Got FW signal, retries = %x", 500-wait_limit);
479 	}
480 
481 	return 0;
482 }
483 
484 /**
485  * hif_disable_bus() - Disable the bus
486  * @scn : pointer to the hif context
487  *
488  * This function disables the bus and helds the target in reset state
489  *
490  * Return: none
491  */
492 void hif_ahb_disable_bus(struct hif_softc *scn)
493 {
494 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
495 	void __iomem *mem;
496 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
497 	struct resource *memres = NULL;
498 	int mem_pa_size = 0;
499 	struct hif_target_info *tgt_info = NULL;
500 	struct qdf_vbus_resource *vmres = NULL;
501 	QDF_STATUS status;
502 
503 	tgt_info = &scn->target_info;
504 	/*Disable WIFI clock input*/
505 	if (sc->mem) {
506 		status = pfrm_platform_get_resource(
507 				scn->qdf_dev->dev,
508 				(struct qdf_pfm_hndl *)pdev, &vmres,
509 				IORESOURCE_MEM, 0);
510 		if (QDF_IS_STATUS_ERROR(status)) {
511 			hif_info("Failed to get IORESOURCE_MEM");
512 			return;
513 		}
514 		memres = (struct resource *)vmres;
515 		if (memres)
516 			mem_pa_size = memres->end - memres->start + 1;
517 
518 		/* Should not be executed on 8074 platform */
519 		if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
520 		    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
521 		    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
522 		    (tgt_info->target_type != TARGET_TYPE_QCN9100) &&
523 		    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
524 			hif_ahb_clk_enable_disable(&pdev->dev, 0);
525 
526 			hif_ahb_device_reset(scn);
527 		}
528 		if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
529 			iounmap(sc->mem_ce);
530 			sc->mem_ce = NULL;
531 			scn->mem_ce = NULL;
532 		}
533 		mem = (void __iomem *)sc->mem;
534 		if (mem) {
535 			pfrm_devm_iounmap(&pdev->dev, mem);
536 			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
537 						     mem_pa_size);
538 			sc->mem = NULL;
539 		}
540 	}
541 	scn->mem = NULL;
542 }
543 
544 /**
545  * hif_enable_bus() - Enable the bus
546  * @dev: dev
547  * @bdev: bus dev
548  * @bid: bus id
549  * @type: bus type
550  *
551  * This function enables the radio bus by enabling necessary
552  * clocks and waits for the target to get ready to proceed futher
553  *
554  * Return: QDF_STATUS
555  */
556 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
557 		struct device *dev, void *bdev,
558 		const struct hif_bus_id *bid,
559 		enum hif_enable_type type)
560 {
561 	int ret = 0;
562 	int hif_type;
563 	int target_type;
564 	const struct platform_device_id *id = (struct platform_device_id *)bid;
565 	struct platform_device *pdev = bdev;
566 	struct hif_target_info *tgt_info = NULL;
567 	struct resource *memres = NULL;
568 	void __iomem *mem = NULL;
569 	uint32_t revision_id = 0;
570 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
571 	QDF_STATUS status;
572 	struct qdf_vbus_resource *vmres = NULL;
573 
574 	sc->pdev = (struct pci_dev *)pdev;
575 	sc->dev = &pdev->dev;
576 	sc->devid = id->driver_data;
577 
578 	ret = hif_get_device_type(id->driver_data, revision_id,
579 			&hif_type, &target_type);
580 	if (ret < 0) {
581 		hif_err("Invalid device ret %d id %d revision_id %d",
582 			ret, (int)id->driver_data, revision_id);
583 		return QDF_STATUS_E_FAILURE;
584 	}
585 
586 	if (target_type == TARGET_TYPE_QCN9100) {
587 		hif_ahb_get_soc_info_pld(sc, dev);
588 		hif_update_irq_ops_with_pci(ol_sc);
589 	} else {
590 		status = pfrm_platform_get_resource(&pdev->dev,
591 						    (struct qdf_pfm_hndl *)pdev,
592 						    &vmres,
593 						    IORESOURCE_MEM, 0);
594 		if (QDF_IS_STATUS_ERROR(status)) {
595 			hif_err("Failed to get IORESOURCE_MEM");
596 			return status;
597 		}
598 		memres = (struct resource *)vmres;
599 		if (!memres) {
600 			hif_err("Failed to get IORESOURCE_MEM");
601 			return QDF_STATUS_E_IO;
602 		}
603 
604 		/* Arrange for access to Target SoC registers. */
605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
606 		status = pfrm_devm_ioremap_resource(
607 					dev,
608 					(struct qdf_vbus_resource *)memres,
609 					&mem);
610 #else
611 		status = pfrm_devm_request_and_ioremap(
612 					dev,
613 					(struct qdf_vbus_resource *)memres,
614 					&mem);
615 #endif
616 		if (QDF_IS_STATUS_ERROR(status)) {
617 			hif_err("ath: ioremap error");
618 			ret = PTR_ERR(mem);
619 			goto err_cleanup1;
620 		}
621 
622 		sc->mem = mem;
623 		ol_sc->mem = mem;
624 		ol_sc->mem_pa = memres->start;
625 	}
626 
627 	ret = pfrm_dma_set_mask(dev, 32);
628 	if (ret) {
629 		hif_err("ath: 32-bit DMA not available");
630 		status = QDF_STATUS_E_IO;
631 		goto err_cleanup1;
632 	}
633 
634 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
635 	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
636 #else
637 	ret = pfrm_dma_set_coherent_mask(dev, 32);
638 #endif
639 	if (ret) {
640 		hif_err("Failed to set dma mask error = %d", ret);
641 		return QDF_STATUS_E_IO;
642 	}
643 
644 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
645 
646 	tgt_info->target_type = target_type;
647 	hif_register_tbl_attach(ol_sc, hif_type);
648 	hif_target_register_tbl_attach(ol_sc, target_type);
649 	/*
650 	 * In QCA5018 CE region moved to SOC outside WCSS block.
651 	 * Allocate separate I/O remap to access CE registers.
652 	 */
653 	if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
654 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
655 
656 		sc->mem_ce = ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE);
657 		if (IS_ERR(sc->mem_ce)) {
658 			hif_err("CE: ioremap failed");
659 			return QDF_STATUS_E_IO;
660 		}
661 		ol_sc->mem_ce = sc->mem_ce;
662 	}
663 
664 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
665 			(tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
666 			(tgt_info->target_type != TARGET_TYPE_QCA5018) &&
667 			(tgt_info->target_type != TARGET_TYPE_QCN9100) &&
668 			(tgt_info->target_type != TARGET_TYPE_QCA6018)) {
669 		if (hif_ahb_enable_radio(sc, pdev, id) != 0) {
670 			hif_err("error in enabling soc");
671 			return QDF_STATUS_E_IO;
672 		}
673 
674 		if (hif_target_sync_ahb(ol_sc) < 0) {
675 			status = QDF_STATUS_E_IO;
676 			goto err_target_sync;
677 		}
678 	}
679 	hif_info("X - hif_type = 0x%x, target_type = 0x%x",
680 		hif_type, target_type);
681 
682 	return QDF_STATUS_SUCCESS;
683 err_target_sync:
684 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
685 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
686 	    (tgt_info->target_type != TARGET_TYPE_QCN9100) &&
687 	    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
688 	    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
689 		hif_err("Disabling target");
690 		hif_ahb_disable_bus(ol_sc);
691 	}
692 err_cleanup1:
693 	return status;
694 }
695 
696 
697 /**
698  * hif_reset_soc() - reset soc
699  *
700  * @hif_ctx: HIF context
701  *
702  * This function resets soc and helds the
703  * target in reset state
704  *
705  * Return: void
706  */
707 /* Function to reset SoC */
708 void hif_ahb_reset_soc(struct hif_softc *hif_ctx)
709 {
710 	hif_ahb_device_reset(hif_ctx);
711 }
712 
713 
714 /**
715  * hif_nointrs() - disable IRQ
716  *
717  * @scn: struct hif_softc
718  *
719  * This function stops interrupt(s)
720  *
721  * Return: none
722  */
723 void hif_ahb_nointrs(struct hif_softc *scn)
724 {
725 	int i;
726 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
727 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
728 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
729 
730 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
731 
732 	if (scn->request_irq_done == false)
733 		return;
734 
735 	if (sc->num_msi_intrs > 0) {
736 		/* MSI interrupt(s) */
737 		for (i = 0; i < sc->num_msi_intrs; i++) {
738 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
739 		}
740 		sc->num_msi_intrs = 0;
741 	} else {
742 		if (!scn->per_ce_irq) {
743 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
744 		} else {
745 			for (i = 0; i < scn->ce_count; i++) {
746 				if (host_ce_conf[i].flags
747 						& CE_ATTR_DISABLE_INTR)
748 					continue;
749 				if (!hif_state->tasklets[i].inited)
750 					continue;
751 				pfrm_free_irq(
752 					scn->qdf_dev->dev,
753 					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
754 					&hif_state->tasklets[i]);
755 			}
756 			hif_ahb_deconfigure_grp_irq(scn);
757 		}
758 	}
759 	scn->request_irq_done = false;
760 
761 }
762 
763 /**
764  * ce_irq_enable() - enable copy engine IRQ
765  * @scn: struct hif_softc
766  * @ce_id: ce_id
767  *
768  * This function enables the interrupt for the radio.
769  *
770  * Return: N/A
771  */
772 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
773 {
774 	uint32_t regval;
775 	uint32_t reg_offset = 0;
776 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
777 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
778 	struct hif_target_info *tgt_info = &scn->target_info;
779 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
780 
781 	if (scn->per_ce_irq) {
782 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
783 			reg_offset = HOST_IE_ADDRESS;
784 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
785 			regval = hif_read32_mb(scn, mem + reg_offset);
786 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
787 			hif_write32_mb(scn, mem + reg_offset, regval);
788 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
789 		}
790 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
791 			reg_offset = HOST_IE_ADDRESS_2;
792 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
793 			regval = hif_read32_mb(scn, mem + reg_offset);
794 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
795 			hif_write32_mb(scn, mem + reg_offset, regval);
796 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
797 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
798 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
799 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
800 				/* Enable destination ring interrupts for
801 				 * 8074, 8074V2, 6018 and 50xx
802 				 */
803 				regval = hif_read32_mb(scn, mem +
804 					HOST_IE_ADDRESS_3);
805 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
806 
807 				hif_write32_mb(scn, mem +
808 					       HOST_IE_ADDRESS_3, regval);
809 			}
810 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
811 		}
812 	} else {
813 		hif_pci_irq_enable(scn, ce_id);
814 	}
815 }
816 
817 /**
818  * ce_irq_disable() - disable copy engine IRQ
819  * @scn: struct hif_softc
820  * @ce_id: ce_id
821  *
822  * Return: N/A
823  */
824 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
825 {
826 	uint32_t regval;
827 	uint32_t reg_offset = 0;
828 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
829 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
830 	struct hif_target_info *tgt_info = &scn->target_info;
831 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
832 
833 	if (scn->per_ce_irq) {
834 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
835 			reg_offset = HOST_IE_ADDRESS;
836 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
837 			regval = hif_read32_mb(scn, mem + reg_offset);
838 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
839 			hif_write32_mb(scn, mem + reg_offset, regval);
840 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
841 		}
842 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
843 			reg_offset = HOST_IE_ADDRESS_2;
844 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
845 			regval = hif_read32_mb(scn, mem + reg_offset);
846 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
847 			hif_write32_mb(scn, mem + reg_offset, regval);
848 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
849 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
850 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
851 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
852 				/* Disable destination ring interrupts for
853 				 * 8074, 8074V2, 6018 and 50xx
854 				 */
855 				regval = hif_read32_mb(scn, mem +
856 					HOST_IE_ADDRESS_3);
857 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
858 
859 				hif_write32_mb(scn, mem +
860 					       HOST_IE_ADDRESS_3, regval);
861 			}
862 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
863 		}
864 	}
865 }
866 
867 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
868 {
869 	int i;
870 
871 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
872 	if (hif_ext_group->irq_enabled) {
873 		for (i = 0; i < hif_ext_group->numirq; i++) {
874 			disable_irq_nosync(hif_ext_group->os_irq[i]);
875 		}
876 		hif_ext_group->irq_enabled = false;
877 	}
878 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
879 }
880 
881 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
882 {
883 	int i;
884 
885 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
886 	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
887 		for (i = 0; i < hif_ext_group->numirq; i++) {
888 			enable_irq(hif_ext_group->os_irq[i]);
889 		}
890 		hif_ext_group->irq_enabled = true;
891 	}
892 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
893 }
894 
895 /**
896  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
897  * @scn: hif context
898  *
899  * Return: true if soc needs driver bmi otherwise false
900  */
901 bool hif_ahb_needs_bmi(struct hif_softc *scn)
902 {
903 	return !ce_srng_based(scn);
904 }
905 
906 void hif_ahb_display_stats(struct hif_softc *scn)
907 {
908 	if (!scn) {
909 		hif_err("hif_scn null");
910 		return;
911 	}
912 	hif_display_ce_stats(scn);
913 }
914 
915 void hif_ahb_clear_stats(struct hif_softc *scn)
916 {
917 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
918 
919 	if (!hif_state) {
920 		hif_err("hif_state null");
921 		return;
922 	}
923 	hif_clear_ce_stats(hif_state);
924 }
925