xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/snoc/if_ahb.c (revision 8c3c4172fbd442a68f7b879958acb6794236aee0)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  * DOC: if_ahb.c
22  *
23  * c file for ahb specific implementations.
24  */
25 
26 #include "hif.h"
27 #include "target_type.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_io32.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_tasklet.h"
34 #include "if_ahb.h"
35 #include "if_pci.h"
36 #include "ahb_api.h"
37 #include "pci_api.h"
38 #include "hif_napi.h"
39 #include "qal_vbus_dev.h"
40 #include "qdf_irq.h"
41 
42 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
43 #define IRQF_DISABLED 0x00000020
44 #endif
45 
46 #define HIF_IC_CE0_IRQ_OFFSET 4
47 #define HIF_IC_MAX_IRQ 53
48 
49 static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
50 /* integrated chip irq names */
51 const char *ic_irqname[HIF_IC_MAX_IRQ] = {
52 "misc-pulse1",
53 "misc-latch",
54 "sw-exception",
55 "watchdog",
56 "ce0",
57 "ce1",
58 "ce2",
59 "ce3",
60 "ce4",
61 "ce5",
62 "ce6",
63 "ce7",
64 "ce8",
65 "ce9",
66 "ce10",
67 "ce11",
68 "host2wbm-desc-feed",
69 "host2reo-re-injection",
70 "host2reo-command",
71 "host2rxdma-monitor-ring3",
72 "host2rxdma-monitor-ring2",
73 "host2rxdma-monitor-ring1",
74 "reo2ost-exception",
75 "wbm2host-rx-release",
76 "reo2host-status",
77 "reo2host-destination-ring4",
78 "reo2host-destination-ring3",
79 "reo2host-destination-ring2",
80 "reo2host-destination-ring1",
81 "rxdma2host-monitor-destination-mac3",
82 "rxdma2host-monitor-destination-mac2",
83 "rxdma2host-monitor-destination-mac1",
84 "ppdu-end-interrupts-mac3",
85 "ppdu-end-interrupts-mac2",
86 "ppdu-end-interrupts-mac1",
87 "rxdma2host-monitor-status-ring-mac3",
88 "rxdma2host-monitor-status-ring-mac2",
89 "rxdma2host-monitor-status-ring-mac1",
90 "host2rxdma-host-buf-ring-mac3",
91 "host2rxdma-host-buf-ring-mac2",
92 "host2rxdma-host-buf-ring-mac1",
93 "rxdma2host-destination-ring-mac3",
94 "rxdma2host-destination-ring-mac2",
95 "rxdma2host-destination-ring-mac1",
96 "host2tcl-input-ring4",
97 "host2tcl-input-ring3",
98 "host2tcl-input-ring2",
99 "host2tcl-input-ring1",
100 "wbm2host-tx-completions-ring4",
101 "wbm2host-tx-completions-ring3",
102 "wbm2host-tx-completions-ring2",
103 "wbm2host-tx-completions-ring1",
104 "tcl2host-status-ring",
105 };
106 
107 /** hif_ahb_get_irq_name() - get irqname
108  * This function gives irqnumber to irqname
109  * mapping.
110  *
111  * @irq_no: irq number
112  *
113  * Return: irq name
114  */
115 const char *hif_ahb_get_irq_name(int irq_no)
116 {
117 	return ic_irqname[irq_no];
118 }
119 
120 /**
121  * hif_disable_isr() - disable isr
122  *
123  * This function disables isr and kills tasklets
124  *
125  * @hif_ctx: struct hif_softc
126  *
127  * Return: void
128  */
129 void hif_ahb_disable_isr(struct hif_softc *scn)
130 {
131 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
132 	hif_exec_kill(&scn->osc);
133 	hif_nointrs(scn);
134 	ce_tasklet_kill(scn);
135 	tasklet_kill(&sc->intr_tq);
136 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
137 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
138 }
139 
140 /**
141  * hif_dump_registers() - dump bus debug registers
142  * @scn: struct hif_opaque_softc
143  *
144  * This function dumps hif bus debug registers
145  *
146  * Return: 0 for success or error code
147  */
148 int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
149 {
150 	int status;
151 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
152 
153 	status = hif_dump_ce_registers(scn);
154 	if (status)
155 		hif_err("Dump CE Registers Failed status %d", status);
156 
157 	return 0;
158 }
159 
160 /**
161  * hif_ahb_close() - hif_bus_close
162  * @scn: pointer to the hif context.
163  *
164  * This is a callback function for hif_bus_close.
165  *
166  *
167  * Return: n/a
168  */
169 void hif_ahb_close(struct hif_softc *scn)
170 {
171 	hif_ce_close(scn);
172 }
173 
174 /**
175  * hif_bus_open() - hif_ahb open
176  * @hif_ctx: hif context
177  * @bus_type: bus type
178  *
179  * This is a callback function for hif_bus_open.
180  *
181  * Return: n/a
182  */
183 QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
184 {
185 
186 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
187 
188 	qdf_spinlock_create(&sc->irq_lock);
189 	return hif_ce_open(hif_ctx);
190 }
191 
192 /**
193  * hif_bus_configure() - Configure the bus
194  * @scn: pointer to the hif context.
195  *
196  * This function configure the ahb bus
197  *
198  * return: 0 for success. nonzero for failure.
199  */
200 int hif_ahb_bus_configure(struct hif_softc *scn)
201 {
202 	return hif_pci_bus_configure(scn);
203 }
204 
205 /**
206  * hif_configure_msi_ahb - Configure MSI interrupts
207  * @sc : pointer to the hif context
208  *
209  * return: 0 for success. nonzero for failure.
210  */
211 
212 int hif_configure_msi_ahb(struct hif_pci_softc *sc)
213 {
214 	return 0;
215 }
216 
217 /**
218  * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ
219  * @sc: pointer to the hif context.
220  *
221  * This function registers the irq handler and enables legacy interrupts
222  *
223  * return: 0 for success. nonzero for failure.
224  */
225 int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
226 {
227 	int ret = 0;
228 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
229 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
230 	int irq = 0;
231 
232 	/* do not support MSI or MSI IRQ failed */
233 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
234 	qal_vbus_get_irq((struct qdf_pfm_hndl *)pdev, "legacy", &irq);
235 	if (irq < 0) {
236 		dev_err(&pdev->dev, "Unable to get irq\n");
237 		ret = -EFAULT;
238 		goto end;
239 	}
240 	ret = request_irq(irq, hif_pci_legacy_ce_interrupt_handler,
241 				IRQF_DISABLED, "wlan_ahb", sc);
242 	if (ret) {
243 		dev_err(&pdev->dev, "ath_request_irq failed\n");
244 		ret = -EFAULT;
245 		goto end;
246 	}
247 	sc->irq = irq;
248 
249 	/* Use Legacy PCI Interrupts */
250 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
251 				PCIE_INTR_ENABLE_ADDRESS),
252 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
253 	/* read once to flush */
254 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
255 				PCIE_INTR_ENABLE_ADDRESS));
256 
257 end:
258 	return ret;
259 }
260 
261 static void hif_ahb_get_soc_info_pld(struct hif_pci_softc *sc,
262 				     struct device *dev)
263 {
264 	struct pld_soc_info info;
265 	int ret = 0;
266 
267 	ret = pld_get_soc_info(dev, &info);
268 	sc->mem = info.v_addr;
269 	sc->ce_sc.ol_sc.mem    = info.v_addr;
270 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
271 }
272 
273 int hif_ahb_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
274 {
275 	int ret = 0;
276 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
277 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
278 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
279 	int irq = 0;
280 
281 	if (ce_id >= CE_COUNT_MAX)
282 		return -EINVAL;
283 
284 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
285 			   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
286 			   HIF_IC_CE0_IRQ_OFFSET + ce_id, &irq);
287 	if (ret) {
288 		dev_err(&pdev->dev, "get irq failed\n");
289 		ret = -EFAULT;
290 		goto end;
291 	}
292 
293 	ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + ce_id] = irq;
294 	ret = pfrm_request_irq(&pdev->dev, irq,
295 			       hif_ahb_interrupt_handler,
296 			       IRQF_TRIGGER_RISING,
297 			       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
298 			       &hif_state->tasklets[ce_id]);
299 	if (ret) {
300 		dev_err(&pdev->dev, "ath_request_irq failed\n");
301 		ret = -EFAULT;
302 		goto end;
303 	}
304 	hif_ahb_irq_enable(scn, ce_id);
305 
306 end:
307 	return ret;
308 }
309 
310 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
311 {
312 	int ret = 0;
313 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
314 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
315 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
316 	int i;
317 
318 	/* configure per CE interrupts */
319 	for (i = 0; i < scn->ce_count; i++) {
320 		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
321 			continue;
322 
323 		if (host_ce_conf[i].flags & CE_ATTR_INIT_ON_DEMAND)
324 			continue;
325 
326 		ret = hif_ahb_configure_irq_by_ceid(scn, i);
327 		if (ret)
328 			goto end;
329 	}
330 
331 end:
332 	return ret;
333 }
334 
335 int hif_ahb_configure_grp_irq(struct hif_softc *scn,
336 			      struct hif_exec_context *hif_ext_group)
337 {
338 	int ret = 0;
339 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
340 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
341 	int irq = 0;
342 	int j;
343 
344 	/* configure external interrupts */
345 	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
346 	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
347 	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
348 	hif_ext_group->work_complete = &hif_dummy_grp_done;
349 
350 	for (j = 0; j < hif_ext_group->numirq; j++) {
351 		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
352 				   ic_irqname[hif_ext_group->irq[j]],
353 				   hif_ext_group->irq[j], &irq);
354 		if (ret) {
355 			dev_err(&pdev->dev, "get irq failed\n");
356 			ret = -EFAULT;
357 			goto end;
358 		}
359 		ic_irqnum[hif_ext_group->irq[j]] = irq;
360 		hif_ext_group->os_irq[j] = irq;
361 	}
362 
363 	for (j = 0; j < hif_ext_group->numirq; j++) {
364 		irq = hif_ext_group->os_irq[j];
365 
366 		qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
367 		qdf_dev_set_irq_status_flags(irq, QDF_IRQ_DISABLE_UNLAZY);
368 		qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
369 
370 		ret = pfrm_request_irq(scn->qdf_dev->dev,
371 				       irq, hif_ext_group_interrupt_handler,
372 				       IRQF_TRIGGER_RISING,
373 				       ic_irqname[hif_ext_group->irq[j]],
374 				       hif_ext_group);
375 		if (ret) {
376 			dev_err(&pdev->dev, "ath_request_irq failed\n");
377 			ret = -EFAULT;
378 			goto end;
379 		}
380 	}
381 
382 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
383 	hif_ext_group->irq_requested = true;
384 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
385 end:
386 	return ret;
387 }
388 
389 void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
390 {
391 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
392 	struct hif_exec_context *hif_ext_group;
393 	int i, j;
394 	int irq = 0;
395 
396 	/* configure external interrupts */
397 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
398 		hif_ext_group = hif_state->hif_ext_group[i];
399 		if (hif_ext_group->irq_requested == true) {
400 			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
401 			hif_ext_group->irq_requested = false;
402 			for (j = 0; j < hif_ext_group->numirq; j++) {
403 				irq = hif_ext_group->os_irq[j];
404 				hif_ext_group->irq_enabled = false;
405 				qdf_dev_clear_irq_status_flags(
406 							irq,
407 							QDF_IRQ_DISABLE_UNLAZY);
408 			}
409 			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
410 
411 			/* Avoid holding the irq_lock while freeing the irq
412 			 * as the same lock is being held by the irq handler
413 			 * while disabling the irq. This causes a deadlock
414 			 * between free_irq and irq_handler.
415 			 */
416 			for (j = 0; j < hif_ext_group->numirq; j++) {
417 				irq = hif_ext_group->os_irq[j];
418 				pfrm_free_irq(scn->qdf_dev->dev,
419 					      irq, hif_ext_group);
420 			}
421 		}
422 	}
423 }
424 
425 irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
426 {
427 	struct ce_tasklet_entry *tasklet_entry = context;
428 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
429 }
430 
431 /**
432  * hif_target_sync() : ensure the target is ready
433  * @scn: hif control structure
434  *
435  * Informs fw that we plan to use legacy interupts so that
436  * it can begin booting. Ensures that the fw finishes booting
437  * before continuing. Should be called before trying to write
438  * to the targets other registers for the first time.
439  *
440  * Return: none
441  */
442 int hif_target_sync_ahb(struct hif_softc *scn)
443 {
444 	int val = 0;
445 	int limit = 0;
446 
447 	while (limit < 50) {
448 		hif_write32_mb(scn, scn->mem +
449 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
450 			PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
451 		qdf_mdelay(10);
452 		val = hif_read32_mb(scn, scn->mem +
453 			(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
454 		if (val == 0)
455 			break;
456 		limit++;
457 	}
458 	hif_write32_mb(scn, scn->mem +
459 		(SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS),
460 		PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
461 	hif_write32_mb(scn, scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
462 	if (HAS_FW_INDICATOR) {
463 		int wait_limit = 500;
464 		int fw_ind = 0;
465 
466 		while (1) {
467 			fw_ind = hif_read32_mb(scn, scn->mem +
468 					FW_INDICATOR_ADDRESS);
469 			if (fw_ind & FW_IND_INITIALIZED)
470 				break;
471 			if (wait_limit-- < 0)
472 				break;
473 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
474 				PCIE_INTR_ENABLE_ADDRESS),
475 				PCIE_INTR_FIRMWARE_MASK);
476 			qdf_mdelay(10);
477 		}
478 		if (wait_limit < 0) {
479 			hif_info("FW signal timed out");
480 			return -EIO;
481 		}
482 		hif_info("Got FW signal, retries = %x", 500-wait_limit);
483 	}
484 
485 	return 0;
486 }
487 
488 /**
489  * hif_disable_bus() - Disable the bus
490  * @scn : pointer to the hif context
491  *
492  * This function disables the bus and helds the target in reset state
493  *
494  * Return: none
495  */
496 void hif_ahb_disable_bus(struct hif_softc *scn)
497 {
498 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
499 	void __iomem *mem;
500 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
501 	struct resource *memres = NULL;
502 	int mem_pa_size = 0;
503 	struct hif_target_info *tgt_info = NULL;
504 	struct qdf_vbus_resource *vmres = NULL;
505 	QDF_STATUS status;
506 
507 	tgt_info = &scn->target_info;
508 	/*Disable WIFI clock input*/
509 	if (sc->mem) {
510 		status = pfrm_platform_get_resource(
511 				scn->qdf_dev->dev,
512 				(struct qdf_pfm_hndl *)pdev, &vmres,
513 				IORESOURCE_MEM, 0);
514 		if (QDF_IS_STATUS_ERROR(status)) {
515 			hif_info("Failed to get IORESOURCE_MEM");
516 			return;
517 		}
518 		memres = (struct resource *)vmres;
519 		if (memres)
520 			mem_pa_size = memres->end - memres->start + 1;
521 
522 		/* Should not be executed on 8074 platform */
523 		if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
524 		    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
525 		    (tgt_info->target_type != TARGET_TYPE_QCA9574) &&
526 		    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
527 		    (tgt_info->target_type != TARGET_TYPE_QCN6122) &&
528 		    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
529 			hif_ahb_clk_enable_disable(&pdev->dev, 0);
530 
531 			hif_ahb_device_reset(scn);
532 		}
533 		if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
534 			iounmap(sc->mem_ce);
535 			sc->mem_ce = NULL;
536 			scn->mem_ce = NULL;
537 		}
538 		mem = (void __iomem *)sc->mem;
539 		if (mem) {
540 			pfrm_devm_iounmap(&pdev->dev, mem);
541 			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
542 						     mem_pa_size);
543 			sc->mem = NULL;
544 			pld_set_bar_addr(&pdev->dev, NULL);
545 		}
546 	}
547 	scn->mem = NULL;
548 }
549 
550 /**
551  * hif_enable_bus() - Enable the bus
552  * @dev: dev
553  * @bdev: bus dev
554  * @bid: bus id
555  * @type: bus type
556  *
557  * This function enables the radio bus by enabling necessary
558  * clocks and waits for the target to get ready to proceed futher
559  *
560  * Return: QDF_STATUS
561  */
562 QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
563 		struct device *dev, void *bdev,
564 		const struct hif_bus_id *bid,
565 		enum hif_enable_type type)
566 {
567 	int ret = 0;
568 	int hif_type;
569 	int target_type;
570 	const struct platform_device_id *id = (struct platform_device_id *)bid;
571 	struct platform_device *pdev = bdev;
572 	struct hif_target_info *tgt_info = NULL;
573 	struct resource *memres = NULL;
574 	void __iomem *mem = NULL;
575 	uint32_t revision_id = 0;
576 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
577 	QDF_STATUS status;
578 	struct qdf_vbus_resource *vmres = NULL;
579 
580 	sc->pdev = (struct pci_dev *)pdev;
581 	sc->dev = &pdev->dev;
582 	sc->devid = id->driver_data;
583 
584 	ret = hif_get_device_type(id->driver_data, revision_id,
585 			&hif_type, &target_type);
586 	if (ret < 0) {
587 		hif_err("Invalid device ret %d id %d revision_id %d",
588 			ret, (int)id->driver_data, revision_id);
589 		return QDF_STATUS_E_FAILURE;
590 	}
591 
592 	if (target_type == TARGET_TYPE_QCN6122) {
593 		hif_ahb_get_soc_info_pld(sc, dev);
594 		hif_update_irq_ops_with_pci(ol_sc);
595 	} else {
596 		status = pfrm_platform_get_resource(&pdev->dev,
597 						    (struct qdf_pfm_hndl *)pdev,
598 						    &vmres,
599 						    IORESOURCE_MEM, 0);
600 		if (QDF_IS_STATUS_ERROR(status)) {
601 			hif_err("Failed to get IORESOURCE_MEM");
602 			return status;
603 		}
604 		memres = (struct resource *)vmres;
605 		if (!memres) {
606 			hif_err("Failed to get IORESOURCE_MEM");
607 			return QDF_STATUS_E_IO;
608 		}
609 
610 		/* Arrange for access to Target SoC registers. */
611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
612 		status = pfrm_devm_ioremap_resource(
613 					dev,
614 					(struct qdf_vbus_resource *)memres,
615 					&mem);
616 #else
617 		status = pfrm_devm_request_and_ioremap(
618 					dev,
619 					(struct qdf_vbus_resource *)memres,
620 					&mem);
621 #endif
622 		if (QDF_IS_STATUS_ERROR(status)) {
623 			hif_err("ath: ioremap error");
624 			ret = PTR_ERR(mem);
625 			goto err_cleanup1;
626 		}
627 
628 		sc->mem = mem;
629 		pld_set_bar_addr(dev, mem);
630 		ol_sc->mem = mem;
631 		ol_sc->mem_pa = memres->start;
632 	}
633 
634 	ret = pfrm_dma_set_mask(dev, 32);
635 	if (ret) {
636 		hif_err("ath: 32-bit DMA not available");
637 		status = QDF_STATUS_E_IO;
638 		goto err_cleanup1;
639 	}
640 
641 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
642 	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
643 #else
644 	ret = pfrm_dma_set_coherent_mask(dev, 32);
645 #endif
646 	if (ret) {
647 		hif_err("Failed to set dma mask error = %d", ret);
648 		return QDF_STATUS_E_IO;
649 	}
650 
651 	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
652 
653 	tgt_info->target_type = target_type;
654 	hif_register_tbl_attach(ol_sc, hif_type);
655 	hif_target_register_tbl_attach(ol_sc, target_type);
656 	/*
657 	 * In QCA5018 CE region moved to SOC outside WCSS block.
658 	 * Allocate separate I/O remap to access CE registers.
659 	 */
660 	if (tgt_info->target_type == TARGET_TYPE_QCA5018) {
661 		struct hif_softc *scn = HIF_GET_SOFTC(sc);
662 
663 		sc->mem_ce = ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE);
664 		if (IS_ERR(sc->mem_ce)) {
665 			hif_err("CE: ioremap failed");
666 			return QDF_STATUS_E_IO;
667 		}
668 		ol_sc->mem_ce = sc->mem_ce;
669 	}
670 
671 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
672 			(tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
673 			(tgt_info->target_type != TARGET_TYPE_QCA9574) &&
674 			(tgt_info->target_type != TARGET_TYPE_QCA5018) &&
675 			(tgt_info->target_type != TARGET_TYPE_QCN6122) &&
676 			(tgt_info->target_type != TARGET_TYPE_QCA6018)) {
677 		if (hif_ahb_enable_radio(sc, pdev, id) != 0) {
678 			hif_err("error in enabling soc");
679 			return QDF_STATUS_E_IO;
680 		}
681 
682 		if (hif_target_sync_ahb(ol_sc) < 0) {
683 			status = QDF_STATUS_E_IO;
684 			goto err_target_sync;
685 		}
686 	}
687 	hif_info("X - hif_type = 0x%x, target_type = 0x%x",
688 		hif_type, target_type);
689 
690 	return QDF_STATUS_SUCCESS;
691 err_target_sync:
692 	if ((tgt_info->target_type != TARGET_TYPE_QCA8074) &&
693 	    (tgt_info->target_type != TARGET_TYPE_QCA8074V2) &&
694 	    (tgt_info->target_type != TARGET_TYPE_QCA9574) &&
695 	    (tgt_info->target_type != TARGET_TYPE_QCN6122) &&
696 	    (tgt_info->target_type != TARGET_TYPE_QCA5018) &&
697 	    (tgt_info->target_type != TARGET_TYPE_QCA6018)) {
698 		hif_err("Disabling target");
699 		hif_ahb_disable_bus(ol_sc);
700 	}
701 err_cleanup1:
702 	return status;
703 }
704 
705 
706 /**
707  * hif_reset_soc() - reset soc
708  *
709  * @hif_ctx: HIF context
710  *
711  * This function resets soc and helds the
712  * target in reset state
713  *
714  * Return: void
715  */
716 /* Function to reset SoC */
717 void hif_ahb_reset_soc(struct hif_softc *hif_ctx)
718 {
719 	hif_ahb_device_reset(hif_ctx);
720 }
721 
722 
723 /**
724  * hif_nointrs() - disable IRQ
725  *
726  * @scn: struct hif_softc
727  *
728  * This function stops interrupt(s)
729  *
730  * Return: none
731  */
732 void hif_ahb_nointrs(struct hif_softc *scn)
733 {
734 	int i;
735 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
736 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
737 	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
738 
739 	scn->free_irq_done = true;
740 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
741 
742 	if (scn->request_irq_done == false)
743 		return;
744 
745 	if (sc->num_msi_intrs > 0) {
746 		/* MSI interrupt(s) */
747 		for (i = 0; i < sc->num_msi_intrs; i++) {
748 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
749 		}
750 		sc->num_msi_intrs = 0;
751 	} else {
752 		if (!scn->per_ce_irq) {
753 			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
754 		} else {
755 			for (i = 0; i < scn->ce_count; i++) {
756 				if (host_ce_conf[i].flags
757 						& CE_ATTR_DISABLE_INTR)
758 					continue;
759 				if (!hif_state->tasklets[i].inited)
760 					continue;
761 				pfrm_free_irq(
762 					scn->qdf_dev->dev,
763 					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
764 					&hif_state->tasklets[i]);
765 			}
766 			hif_ahb_deconfigure_grp_irq(scn);
767 		}
768 	}
769 	scn->request_irq_done = false;
770 
771 }
772 
773 /**
774  * ce_irq_enable() - enable copy engine IRQ
775  * @scn: struct hif_softc
776  * @ce_id: ce_id
777  *
778  * This function enables the interrupt for the radio.
779  *
780  * Return: N/A
781  */
782 void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
783 {
784 	uint32_t regval;
785 	uint32_t reg_offset = 0;
786 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
787 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
788 	struct hif_target_info *tgt_info = &scn->target_info;
789 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
790 
791 	if (scn->per_ce_irq) {
792 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
793 			reg_offset = HOST_IE_ADDRESS;
794 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
795 			regval = hif_read32_mb(scn, mem + reg_offset);
796 			regval |= HOST_IE_REG1_CE_BIT(ce_id);
797 			hif_write32_mb(scn, mem + reg_offset, regval);
798 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
799 		}
800 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
801 			reg_offset = HOST_IE_ADDRESS_2;
802 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
803 			regval = hif_read32_mb(scn, mem + reg_offset);
804 			regval |= HOST_IE_REG2_CE_BIT(ce_id);
805 			hif_write32_mb(scn, mem + reg_offset, regval);
806 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
807 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
808 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
809 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
810 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
811 				/* Enable destination ring interrupts for
812 				 * 8074, 8074V2, 6018 and 50xx
813 				 */
814 				regval = hif_read32_mb(scn, mem +
815 					HOST_IE_ADDRESS_3);
816 				regval |= HOST_IE_REG3_CE_BIT(ce_id);
817 
818 				hif_write32_mb(scn, mem +
819 					       HOST_IE_ADDRESS_3, regval);
820 			}
821 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
822 		}
823 	} else {
824 		hif_pci_irq_enable(scn, ce_id);
825 	}
826 }
827 
828 /**
829  * ce_irq_disable() - disable copy engine IRQ
830  * @scn: struct hif_softc
831  * @ce_id: ce_id
832  *
833  * Return: N/A
834  */
835 void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
836 {
837 	uint32_t regval;
838 	uint32_t reg_offset = 0;
839 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
840 	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
841 	struct hif_target_info *tgt_info = &scn->target_info;
842 	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
843 
844 	if (scn->per_ce_irq) {
845 		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
846 			reg_offset = HOST_IE_ADDRESS;
847 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
848 			regval = hif_read32_mb(scn, mem + reg_offset);
849 			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
850 			hif_write32_mb(scn, mem + reg_offset, regval);
851 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
852 		}
853 		if (target_ce_conf->pipedir & PIPEDIR_IN) {
854 			reg_offset = HOST_IE_ADDRESS_2;
855 			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
856 			regval = hif_read32_mb(scn, mem + reg_offset);
857 			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
858 			hif_write32_mb(scn, mem + reg_offset, regval);
859 			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
860 			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
861 			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
862 			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
863 			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
864 				/* Disable destination ring interrupts for
865 				 * 8074, 8074V2, 6018 and 50xx
866 				 */
867 				regval = hif_read32_mb(scn, mem +
868 					HOST_IE_ADDRESS_3);
869 				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
870 
871 				hif_write32_mb(scn, mem +
872 					       HOST_IE_ADDRESS_3, regval);
873 			}
874 			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
875 		}
876 	}
877 }
878 
879 void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
880 {
881 	int i;
882 
883 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
884 	if (hif_ext_group->irq_enabled) {
885 		for (i = 0; i < hif_ext_group->numirq; i++) {
886 			disable_irq_nosync(hif_ext_group->os_irq[i]);
887 		}
888 		hif_ext_group->irq_enabled = false;
889 	}
890 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
891 }
892 
893 void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
894 {
895 	int i;
896 
897 	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
898 	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
899 		for (i = 0; i < hif_ext_group->numirq; i++) {
900 			enable_irq(hif_ext_group->os_irq[i]);
901 		}
902 		hif_ext_group->irq_enabled = true;
903 	}
904 	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
905 }
906 
907 /**
908  * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
909  * @scn: hif context
910  *
911  * Return: true if soc needs driver bmi otherwise false
912  */
913 bool hif_ahb_needs_bmi(struct hif_softc *scn)
914 {
915 	return !ce_srng_based(scn);
916 }
917 
918 void hif_ahb_display_stats(struct hif_softc *scn)
919 {
920 	if (!scn) {
921 		hif_err("hif_scn null");
922 		return;
923 	}
924 	hif_display_ce_stats(scn);
925 }
926 
927 void hif_ahb_clear_stats(struct hif_softc *scn)
928 {
929 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
930 
931 	if (!hif_state) {
932 		hif_err("hif_state null");
933 		return;
934 	}
935 	hif_clear_ce_stats(hif_state);
936 }
937