1  /*
2   * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3   * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4   *
5   * Permission to use, copy, modify, and/or distribute this software for
6   * any purpose with or without fee is hereby granted, provided that the
7   * above copyright notice and this permission notice appear in all
8   * copies.
9   *
10   * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11   * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12   * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13   * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14   * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15   * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17   * PERFORMANCE OF THIS SOFTWARE.
18   */
19  
20  /**
21   * DOC: if_ahb.c
22   *
23   * c file for ahb specific implementations.
24   */
25  
26  #include "hif.h"
27  #include "target_type.h"
28  #include "hif_main.h"
29  #include "hif_debug.h"
30  #include "hif_io32.h"
31  #include "ce_main.h"
32  #include "ce_api.h"
33  #include "ce_tasklet.h"
34  #include "if_ahb.h"
35  #include "if_pci.h"
36  #include "ahb_api.h"
37  #include "pci_api.h"
38  #include "hif_napi.h"
39  #include "qal_vbus_dev.h"
40  #include "qdf_irq.h"
41  
42  #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
43  #define IRQF_DISABLED 0x00000020
44  #endif
45  
46  #define HIF_IC_CE0_IRQ_OFFSET 4
47  #define HIF_IC_MAX_IRQ 58
48  
49  static uint16_t ic_irqnum[HIF_IC_MAX_IRQ];
50  /* integrated chip irq names */
51  const char *ic_irqname[HIF_IC_MAX_IRQ] = {
52  "misc-pulse1",
53  "misc-latch",
54  "sw-exception",
55  "watchdog",
56  "ce0",
57  "ce1",
58  "ce2",
59  "ce3",
60  "ce4",
61  "ce5",
62  "ce6",
63  "ce7",
64  "ce8",
65  "ce9",
66  "ce10",
67  "ce11",
68  "host2wbm-desc-feed",
69  "host2reo-re-injection",
70  "host2reo-command",
71  "host2rxdma-monitor-ring3",
72  "host2rxdma-monitor-ring2",
73  "host2rxdma-monitor-ring1",
74  "reo2ost-exception",
75  "wbm2host-rx-release",
76  "reo2host-status",
77  "reo2host-destination-ring4",
78  "reo2host-destination-ring3",
79  "reo2host-destination-ring2",
80  "reo2host-destination-ring1",
81  "rxdma2host-monitor-destination-mac3",
82  "rxdma2host-monitor-destination-mac2",
83  "rxdma2host-monitor-destination-mac1",
84  "ppdu-end-interrupts-mac3",
85  "ppdu-end-interrupts-mac2",
86  "ppdu-end-interrupts-mac1",
87  "rxdma2host-monitor-status-ring-mac3",
88  "rxdma2host-monitor-status-ring-mac2",
89  "rxdma2host-monitor-status-ring-mac1",
90  "host2rxdma-host-buf-ring-mac3",
91  "host2rxdma-host-buf-ring-mac2",
92  "host2rxdma-host-buf-ring-mac1",
93  "rxdma2host-destination-ring-mac3",
94  "rxdma2host-destination-ring-mac2",
95  "rxdma2host-destination-ring-mac1",
96  "host2tcl-input-ring4",
97  "host2tcl-input-ring3",
98  "host2tcl-input-ring2",
99  "host2tcl-input-ring1",
100  "wbm2host-tx-completions-ring4",
101  "wbm2host-tx-completions-ring3",
102  "wbm2host-tx-completions-ring2",
103  "wbm2host-tx-completions-ring1",
104  "tcl2host-status-ring",
105  "txmon2host-monitor-destination-mac3",
106  "txmon2host-monitor-destination-mac2",
107  "txmon2host-monitor-destination-mac1",
108  "host2tx-monitor-ring1",
109  };
110  
111  /**
112   * hif_ahb_get_irq_name() - get irqname
113   * @irq_no: irq number
114   *
115   * This function gives irqnumber to irqname
116   * mapping.
117   *
118   * Return: irq name
119   */
hif_ahb_get_irq_name(int irq_no)120  const char *hif_ahb_get_irq_name(int irq_no)
121  {
122  	return ic_irqname[irq_no];
123  }
124  
125  /**
126   * hif_ahb_disable_isr() - disable isr
127   * @scn: struct hif_softc
128   *
129   * This function disables isr and kills tasklets
130   *
131   * Return: void
132   */
hif_ahb_disable_isr(struct hif_softc * scn)133  void hif_ahb_disable_isr(struct hif_softc *scn)
134  {
135  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
136  	hif_exec_kill(&scn->osc);
137  	hif_nointrs(scn);
138  	ce_tasklet_kill(scn);
139  	tasklet_kill(&sc->intr_tq);
140  	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
141  	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
142  }
143  
144  /**
145   * hif_ahb_dump_registers() - dump bus debug registers
146   * @hif_ctx: struct hif_opaque_softc
147   *
148   * This function dumps hif bus debug registers
149   *
150   * Return: 0 for success or error code
151   */
hif_ahb_dump_registers(struct hif_softc * hif_ctx)152  int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
153  {
154  	int status;
155  	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
156  
157  	status = hif_dump_ce_registers(scn);
158  	if (status)
159  		hif_err("Dump CE Registers Failed status %d", status);
160  
161  	return 0;
162  }
163  
164  /**
165   * hif_ahb_close() - hif_bus_close
166   * @scn: pointer to the hif context.
167   *
168   * This is a callback function for hif_bus_close.
169   *
170   *
171   * Return: n/a
172   */
hif_ahb_close(struct hif_softc * scn)173  void hif_ahb_close(struct hif_softc *scn)
174  {
175  	hif_ce_close(scn);
176  }
177  
178  /**
179   * hif_ahb_open() - hif_ahb open
180   * @hif_ctx: hif context
181   * @bus_type: bus type
182   *
183   * This is a callback function for hif_bus_open.
184   *
185   * Return: QDF_STATUS
186   */
hif_ahb_open(struct hif_softc * hif_ctx,enum qdf_bus_type bus_type)187  QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
188  {
189  
190  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
191  
192  	qdf_spinlock_create(&sc->irq_lock);
193  	return hif_ce_open(hif_ctx);
194  }
195  
196  /**
197   * hif_ahb_bus_configure() - Configure the bus
198   * @scn: pointer to the hif context.
199   *
200   * This function configure the ahb bus
201   *
202   * Return: 0 for success. nonzero for failure.
203   */
hif_ahb_bus_configure(struct hif_softc * scn)204  int hif_ahb_bus_configure(struct hif_softc *scn)
205  {
206  	return hif_pci_bus_configure(scn);
207  }
208  
hif_ahb_get_bar_addr_pld(struct hif_pci_softc * sc,struct device * dev)209  static void hif_ahb_get_bar_addr_pld(struct hif_pci_softc *sc,
210  				     struct device *dev)
211  {
212  	struct pld_soc_info info;
213  	int ret = 0;
214  
215  	ret = pld_get_soc_info(dev, &info);
216  	sc->mem = info.v_addr;
217  	pld_set_bar_addr(dev, info.v_addr);
218  	sc->ce_sc.ol_sc.mem    = info.v_addr;
219  	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
220  }
221  
hif_ahb_get_soc_cmem_info_pld(struct hif_pci_softc * sc,struct device * dev)222  static void hif_ahb_get_soc_cmem_info_pld(struct hif_pci_softc *sc,
223  					  struct device *dev)
224  {
225  	struct pld_soc_info info;
226  	int ret = 0;
227  	struct hif_softc *scn = HIF_GET_SOFTC(sc);
228  
229  	ret = pld_get_soc_info(dev, &info);
230  	/* dev_mem_info[0] is for CMEM */
231  	scn->cmem_start = info.dev_mem_info[0].start;
232  	scn->cmem_size = info.dev_mem_info[0].size;
233  }
234  
hif_ahb_configure_irq_by_ceid(struct hif_softc * scn,int ce_id)235  int hif_ahb_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
236  {
237  	int ret = 0;
238  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
239  	struct platform_device *pdev = (struct platform_device *)sc->pdev;
240  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
241  	int irq = 0;
242  
243  	if (ce_id >= CE_COUNT_MAX)
244  		return -EINVAL;
245  
246  	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
247  			   ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
248  			   HIF_IC_CE0_IRQ_OFFSET + ce_id, &irq);
249  	if (ret) {
250  		dev_err(&pdev->dev, "get irq failed\n");
251  		ret = -EFAULT;
252  		goto end;
253  	}
254  
255  	ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + ce_id] = irq;
256  	ret = pfrm_request_irq(&pdev->dev, irq,
257  			       hif_ahb_interrupt_handler,
258  			       IRQF_TRIGGER_RISING,
259  			       ic_irqname[HIF_IC_CE0_IRQ_OFFSET + ce_id],
260  			       &hif_state->tasklets[ce_id]);
261  	if (ret) {
262  		dev_err(&pdev->dev, "ath_request_irq failed\n");
263  		ret = -EFAULT;
264  		goto end;
265  	}
266  	hif_ahb_irq_enable(scn, ce_id);
267  
268  end:
269  	return ret;
270  }
271  
hif_ahb_configure_irq(struct hif_pci_softc * sc)272  int hif_ahb_configure_irq(struct hif_pci_softc *sc)
273  {
274  	int ret = 0;
275  	struct hif_softc *scn = HIF_GET_SOFTC(sc);
276  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
277  	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
278  	int i;
279  
280  	/* configure per CE interrupts */
281  	for (i = 0; i < scn->ce_count; i++) {
282  		if (host_ce_conf[i].flags & CE_ATTR_DISABLE_INTR)
283  			continue;
284  
285  		if (host_ce_conf[i].flags & CE_ATTR_INIT_ON_DEMAND)
286  			continue;
287  
288  		ret = hif_ahb_configure_irq_by_ceid(scn, i);
289  		if (ret)
290  			goto end;
291  	}
292  
293  end:
294  	return ret;
295  }
296  
hif_ahb_configure_grp_irq(struct hif_softc * scn,struct hif_exec_context * hif_ext_group)297  int hif_ahb_configure_grp_irq(struct hif_softc *scn,
298  			      struct hif_exec_context *hif_ext_group)
299  {
300  	int ret = 0;
301  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
302  	struct platform_device *pdev = (struct platform_device *)sc->pdev;
303  	int irq = 0;
304  	int j;
305  
306  	/* configure external interrupts */
307  	hif_ext_group->irq_enable = &hif_ahb_exec_grp_irq_enable;
308  	hif_ext_group->irq_disable = &hif_ahb_exec_grp_irq_disable;
309  	hif_ext_group->irq_name = &hif_ahb_get_irq_name;
310  	hif_ext_group->work_complete = &hif_dummy_grp_done;
311  
312  	for (j = 0; j < hif_ext_group->numirq; j++) {
313  		ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
314  				   ic_irqname[hif_ext_group->irq[j]],
315  				   hif_ext_group->irq[j], &irq);
316  		if (ret) {
317  			dev_err(&pdev->dev, "get irq failed\n");
318  			ret = -EFAULT;
319  			goto end;
320  		}
321  		ic_irqnum[hif_ext_group->irq[j]] = irq;
322  		hif_ext_group->os_irq[j] = irq;
323  	}
324  
325  	for (j = 0; j < hif_ext_group->numirq; j++) {
326  		irq = hif_ext_group->os_irq[j];
327  
328  		qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
329  		qdf_dev_set_irq_status_flags(irq, QDF_IRQ_DISABLE_UNLAZY);
330  		qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
331  
332  		ret = pfrm_request_irq(scn->qdf_dev->dev,
333  				       irq, hif_ext_group_interrupt_handler,
334  				       IRQF_TRIGGER_RISING | IRQF_SHARED,
335  				       ic_irqname[hif_ext_group->irq[j]],
336  				       hif_ext_group);
337  		if (ret) {
338  			dev_err(&pdev->dev, "ath_request_irq failed\n");
339  			ret = -EFAULT;
340  			goto end;
341  		}
342  	}
343  
344  	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
345  	hif_ext_group->irq_requested = true;
346  	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
347  end:
348  	return ret;
349  }
350  
hif_ahb_deconfigure_grp_irq(struct hif_softc * scn)351  void hif_ahb_deconfigure_grp_irq(struct hif_softc *scn)
352  {
353  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
354  	struct hif_exec_context *hif_ext_group;
355  	int i, j;
356  	int irq = 0;
357  
358  	/* configure external interrupts */
359  	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
360  		hif_ext_group = hif_state->hif_ext_group[i];
361  		if (hif_ext_group->irq_requested == true) {
362  			qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
363  			hif_ext_group->irq_requested = false;
364  			for (j = 0; j < hif_ext_group->numirq; j++) {
365  				irq = hif_ext_group->os_irq[j];
366  				hif_ext_group->irq_enabled = false;
367  				qdf_dev_clear_irq_status_flags(
368  							irq,
369  							QDF_IRQ_DISABLE_UNLAZY);
370  			}
371  			qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
372  
373  			/* Avoid holding the irq_lock while freeing the irq
374  			 * as the same lock is being held by the irq handler
375  			 * while disabling the irq. This causes a deadlock
376  			 * between free_irq and irq_handler.
377  			 */
378  			for (j = 0; j < hif_ext_group->numirq; j++) {
379  				irq = hif_ext_group->os_irq[j];
380  				pfrm_free_irq(scn->qdf_dev->dev,
381  					      irq, hif_ext_group);
382  			}
383  		}
384  	}
385  }
386  
hif_ahb_interrupt_handler(int irq,void * context)387  irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
388  {
389  	struct ce_tasklet_entry *tasklet_entry = context;
390  	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
391  }
392  
393  /**
394   * hif_ahb_disable_bus() - Disable the bus
395   * @scn : pointer to the hif context
396   *
397   * This function disables the bus and helds the target in reset state
398   *
399   * Return: none
400   */
hif_ahb_disable_bus(struct hif_softc * scn)401  void hif_ahb_disable_bus(struct hif_softc *scn)
402  {
403  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
404  	void __iomem *mem;
405  	struct platform_device *pdev = (struct platform_device *)sc->pdev;
406  	struct resource *memres = NULL;
407  	int mem_pa_size = 0;
408  	struct hif_target_info *tgt_info = NULL;
409  	struct qdf_vbus_resource *vmres = NULL;
410  	QDF_STATUS status;
411  
412  	tgt_info = &scn->target_info;
413  	/*Disable WIFI clock input*/
414  	if (sc->mem) {
415  		status = pfrm_platform_get_resource(
416  				scn->qdf_dev->dev,
417  				(struct qdf_pfm_hndl *)pdev, &vmres,
418  				IORESOURCE_MEM, 0);
419  		if (QDF_IS_STATUS_ERROR(status)) {
420  			hif_info("Failed to get IORESOURCE_MEM");
421  			return;
422  		}
423  		memres = (struct resource *)vmres;
424  		if (memres)
425  			mem_pa_size = memres->end - memres->start + 1;
426  
427  		if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
428  		    tgt_info->target_type == TARGET_TYPE_QCA5332) {
429  			iounmap(sc->mem_ce);
430  			sc->mem_ce = NULL;
431  			scn->mem_ce = NULL;
432  		}
433  		if (sc->mem_pmm_base) {
434  			iounmap(sc->mem_pmm_base);
435  			sc->mem_pmm_base = NULL;
436  			scn->mem_pmm_base = NULL;
437  		}
438  		if (sc->mem_cmem) {
439  			iounmap(sc->mem_cmem);
440  			sc->mem_cmem = NULL;
441  			scn->mem_cmem = NULL;
442  		}
443  		mem = (void __iomem *)sc->mem;
444  		if (mem) {
445  			pfrm_devm_iounmap(&pdev->dev, mem);
446  			pfrm_devm_release_mem_region(&pdev->dev, scn->mem_pa,
447  						     mem_pa_size);
448  			sc->mem = NULL;
449  			pld_set_bar_addr(&pdev->dev, NULL);
450  		}
451  	}
452  	scn->mem = NULL;
453  }
454  
455  /**
456   * hif_ahb_enable_bus() - Enable the bus
457   * @ol_sc: HIF context
458   * @dev: dev
459   * @bdev: bus dev
460   * @bid: bus id
461   * @type: bus type
462   *
463   * This function enables the radio bus by enabling necessary
464   * clocks and waits for the target to get ready to proceed further
465   *
466   * Return: QDF_STATUS
467   */
hif_ahb_enable_bus(struct hif_softc * ol_sc,struct device * dev,void * bdev,const struct hif_bus_id * bid,enum hif_enable_type type)468  QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
469  		struct device *dev, void *bdev,
470  		const struct hif_bus_id *bid,
471  		enum hif_enable_type type)
472  {
473  	int ret = 0;
474  	int hif_type;
475  	int target_type;
476  	const struct platform_device_id *id = (struct platform_device_id *)bid;
477  	struct platform_device *pdev = bdev;
478  	struct hif_target_info *tgt_info = NULL;
479  	struct resource *memres = NULL;
480  	void __iomem *mem = NULL;
481  	uint32_t revision_id = 0;
482  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
483  	QDF_STATUS status;
484  	struct qdf_vbus_resource *vmres = NULL;
485  
486  	sc->pdev = (struct pci_dev *)pdev;
487  	sc->dev = &pdev->dev;
488  	sc->devid = id->driver_data;
489  
490  	ret = hif_get_device_type(id->driver_data, revision_id,
491  			&hif_type, &target_type);
492  	if (ret < 0) {
493  		hif_err("Invalid device ret %d id %d revision_id %d",
494  			ret, (int)id->driver_data, revision_id);
495  		return QDF_STATUS_E_FAILURE;
496  	}
497  
498  	if (target_type == TARGET_TYPE_QCN6122 ||
499  	    target_type == TARGET_TYPE_QCN9160 ||
500  	    target_type == TARGET_TYPE_QCN6432) {
501  		hif_ahb_get_bar_addr_pld(sc, dev);
502  	}
503  
504  	/* 11BE SoC chipsets Need to call this function to get cmem addr */
505  	if (target_type == TARGET_TYPE_QCA5332 ||
506  	    target_type == TARGET_TYPE_QCN6432)
507  		hif_ahb_get_soc_cmem_info_pld(sc, dev);
508  
509  	if (target_type == TARGET_TYPE_QCN6122 ||
510  	    target_type == TARGET_TYPE_QCN9160 ||
511  	    target_type == TARGET_TYPE_QCN6432) {
512  		hif_update_irq_ops_with_pci(ol_sc);
513  	} else {
514  		status = pfrm_platform_get_resource(&pdev->dev,
515  						    (struct qdf_pfm_hndl *)pdev,
516  						    &vmres,
517  						    IORESOURCE_MEM, 0);
518  		if (QDF_IS_STATUS_ERROR(status)) {
519  			hif_err("Failed to get IORESOURCE_MEM");
520  			return status;
521  		}
522  		memres = (struct resource *)vmres;
523  		if (!memres) {
524  			hif_err("Failed to get IORESOURCE_MEM");
525  			return QDF_STATUS_E_IO;
526  		}
527  
528  		/* Arrange for access to Target SoC registers. */
529  #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
530  		status = pfrm_devm_ioremap_resource(
531  					dev,
532  					(struct qdf_vbus_resource *)memres,
533  					&mem);
534  #else
535  		status = pfrm_devm_request_and_ioremap(
536  					dev,
537  					(struct qdf_vbus_resource *)memres,
538  					&mem);
539  #endif
540  		if (QDF_IS_STATUS_ERROR(status)) {
541  			hif_err("ath: ioremap error");
542  			ret = PTR_ERR(mem);
543  			goto err_cleanup1;
544  		}
545  
546  		sc->mem = mem;
547  		pld_set_bar_addr(dev, mem);
548  		ol_sc->mem = mem;
549  		ol_sc->mem_pa = memres->start;
550  	}
551  
552  	ret = pfrm_dma_set_mask(dev, 32);
553  	if (ret) {
554  		hif_err("ath: 32-bit DMA not available");
555  		status = QDF_STATUS_E_IO;
556  		goto err_cleanup1;
557  	}
558  
559  #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
560  	ret = pfrm_dma_set_mask_and_coherent(dev, 32);
561  #else
562  	ret = pfrm_dma_set_coherent_mask(dev, 32);
563  #endif
564  	if (ret) {
565  		hif_err("Failed to set dma mask error = %d", ret);
566  		return QDF_STATUS_E_IO;
567  	}
568  
569  	tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
570  
571  	tgt_info->target_type = target_type;
572  	hif_register_tbl_attach(ol_sc, hif_type);
573  	hif_target_register_tbl_attach(ol_sc, target_type);
574  	/*
575  	 * In QCA5018 CE region moved to SOC outside WCSS block.
576  	 * Allocate separate I/O remap to access CE registers.
577  	 */
578  	if (tgt_info->target_type == TARGET_TYPE_QCA5018 ||
579  	    tgt_info->target_type == TARGET_TYPE_QCA5332) {
580  		struct hif_softc *scn = HIF_GET_SOFTC(sc);
581  
582  		sc->mem_ce = qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE);
583  		if (IS_ERR(sc->mem_ce)) {
584  			hif_err("CE: ioremap failed");
585  			return QDF_STATUS_E_IO;
586  		}
587  		ol_sc->mem_ce = sc->mem_ce;
588  		pld_set_bar_addr(dev, sc->mem_ce);
589  	}
590  
591  	if (tgt_info->target_type == TARGET_TYPE_QCA5332) {
592  		struct hif_softc *scn = HIF_GET_SOFTC(sc);
593  
594  		/*
595  		 * In QCA5332 CMEM region is outside WCSS block.
596  		 * Allocate separate I/O remap to access CMEM address.
597  		 */
598  		sc->mem_cmem = qdf_ioremap(HOST_CMEM_ADDRESS, HOST_CMEM_SIZE);
599  		if (IS_ERR(sc->mem_cmem)) {
600  			hif_err("CE: ioremap failed");
601  			return QDF_STATUS_E_IO;
602  		}
603  		ol_sc->mem_cmem = sc->mem_cmem;
604  
605  		/*
606  		 * PMM SCRATCH Register for QCA5332
607  		 */
608  		sc->mem_pmm_base = qdf_ioremap(PMM_SCRATCH_BASE,
609  						   PMM_SCRATCH_SIZE);
610  		if (IS_ERR(sc->mem_pmm_base)) {
611  			hif_err("CE: ioremap failed");
612  			return QDF_STATUS_E_IO;
613  		}
614  		ol_sc->mem_pmm_base = sc->mem_pmm_base;
615  	}
616  
617  	hif_info("X - hif_type = 0x%x, target_type = 0x%x",
618  		hif_type, target_type);
619  
620  	return QDF_STATUS_SUCCESS;
621  err_cleanup1:
622  	return status;
623  }
624  
625  /**
626   * hif_ahb_nointrs() - disable IRQ
627   *
628   * @scn: struct hif_softc
629   *
630   * This function stops interrupt(s)
631   *
632   * Return: none
633   */
hif_ahb_nointrs(struct hif_softc * scn)634  void hif_ahb_nointrs(struct hif_softc *scn)
635  {
636  	int i;
637  	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
638  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
639  	struct CE_attr *host_ce_conf = hif_state->host_ce_config;
640  
641  	scn->free_irq_done = true;
642  	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
643  
644  	if (scn->request_irq_done == false)
645  		return;
646  
647  	if (sc->num_msi_intrs > 0) {
648  		/* MSI interrupt(s) */
649  		for (i = 0; i < sc->num_msi_intrs; i++) {
650  			pfrm_free_irq(scn->qdf_dev->dev, sc->irq + i, sc);
651  		}
652  		sc->num_msi_intrs = 0;
653  	} else {
654  		if (!scn->per_ce_irq) {
655  			pfrm_free_irq(scn->qdf_dev->dev, sc->irq, sc);
656  		} else {
657  			for (i = 0; i < scn->ce_count; i++) {
658  				if (host_ce_conf[i].flags
659  						& CE_ATTR_DISABLE_INTR)
660  					continue;
661  				if (!hif_state->tasklets[i].inited)
662  					continue;
663  				pfrm_free_irq(
664  					scn->qdf_dev->dev,
665  					ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
666  					&hif_state->tasklets[i]);
667  			}
668  			hif_ahb_deconfigure_grp_irq(scn);
669  		}
670  	}
671  	scn->request_irq_done = false;
672  
673  }
674  
675  /**
676   * hif_ahb_irq_enable() - enable copy engine IRQ
677   * @scn: struct hif_softc
678   * @ce_id: ce_id
679   *
680   * This function enables the interrupt for the radio.
681   *
682   * Return: N/A
683   */
hif_ahb_irq_enable(struct hif_softc * scn,int ce_id)684  void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
685  {
686  	uint32_t regval;
687  	uint32_t reg_offset = 0;
688  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
689  	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
690  	struct hif_target_info *tgt_info = &scn->target_info;
691  	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
692  
693  	if (scn->per_ce_irq) {
694  		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
695  			reg_offset = HOST_IE_ADDRESS;
696  			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
697  			regval = hif_read32_mb(scn, mem + reg_offset);
698  			regval |= HOST_IE_REG1_CE_BIT(ce_id);
699  			hif_write32_mb(scn, mem + reg_offset, regval);
700  			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
701  		}
702  		if (target_ce_conf->pipedir & PIPEDIR_IN) {
703  			reg_offset = HOST_IE_ADDRESS_2;
704  			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
705  			regval = hif_read32_mb(scn, mem + reg_offset);
706  			regval |= HOST_IE_REG2_CE_BIT(ce_id);
707  			hif_write32_mb(scn, mem + reg_offset, regval);
708  			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
709  			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
710  			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
711  			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
712  			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
713  			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
714  				/* Enable destination ring interrupts for
715  				 * 8074, 8074V2, 6018 and 50xx
716  				 */
717  				regval = hif_read32_mb(scn, mem +
718  					HOST_IE_ADDRESS_3);
719  				regval |= HOST_IE_REG3_CE_BIT(ce_id);
720  
721  				hif_write32_mb(scn, mem +
722  					       HOST_IE_ADDRESS_3, regval);
723  			}
724  			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
725  		}
726  	} else {
727  		hif_pci_irq_enable(scn, ce_id);
728  	}
729  }
730  
731  /**
732   * hif_ahb_irq_disable() - disable copy engine IRQ
733   * @scn: struct hif_softc
734   * @ce_id: ce_id
735   *
736   * Return: N/A
737   */
hif_ahb_irq_disable(struct hif_softc * scn,int ce_id)738  void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
739  {
740  	uint32_t regval;
741  	uint32_t reg_offset = 0;
742  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
743  	struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
744  	struct hif_target_info *tgt_info = &scn->target_info;
745  	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
746  
747  	if (scn->per_ce_irq) {
748  		if (target_ce_conf->pipedir & PIPEDIR_OUT) {
749  			reg_offset = HOST_IE_ADDRESS;
750  			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
751  			regval = hif_read32_mb(scn, mem + reg_offset);
752  			regval &= ~HOST_IE_REG1_CE_BIT(ce_id);
753  			hif_write32_mb(scn, mem + reg_offset, regval);
754  			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
755  		}
756  		if (target_ce_conf->pipedir & PIPEDIR_IN) {
757  			reg_offset = HOST_IE_ADDRESS_2;
758  			qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
759  			regval = hif_read32_mb(scn, mem + reg_offset);
760  			regval &= ~HOST_IE_REG2_CE_BIT(ce_id);
761  			hif_write32_mb(scn, mem + reg_offset, regval);
762  			if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
763  			    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
764  			    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
765  			    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
766  			    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
767  			    tgt_info->target_type == TARGET_TYPE_QCA6018) {
768  				/* Disable destination ring interrupts for
769  				 * 8074, 8074V2, 6018 and 50xx
770  				 */
771  				regval = hif_read32_mb(scn, mem +
772  					HOST_IE_ADDRESS_3);
773  				regval &= ~HOST_IE_REG3_CE_BIT(ce_id);
774  
775  				hif_write32_mb(scn, mem +
776  					       HOST_IE_ADDRESS_3, regval);
777  			}
778  			qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
779  		}
780  	}
781  }
782  
hif_ahb_exec_grp_irq_disable(struct hif_exec_context * hif_ext_group)783  void hif_ahb_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
784  {
785  	int i;
786  
787  	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
788  	if (hif_ext_group->irq_enabled) {
789  		for (i = 0; i < hif_ext_group->numirq; i++) {
790  			disable_irq_nosync(hif_ext_group->os_irq[i]);
791  		}
792  		hif_ext_group->irq_enabled = false;
793  	}
794  	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
795  }
796  
hif_ahb_exec_grp_irq_enable(struct hif_exec_context * hif_ext_group)797  void hif_ahb_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
798  {
799  	int i;
800  
801  	qdf_spin_lock_irqsave(&hif_ext_group->irq_lock);
802  	if (hif_ext_group->irq_requested && !hif_ext_group->irq_enabled) {
803  		for (i = 0; i < hif_ext_group->numirq; i++) {
804  			enable_irq(hif_ext_group->os_irq[i]);
805  		}
806  		hif_ext_group->irq_enabled = true;
807  	}
808  	qdf_spin_unlock_irqrestore(&hif_ext_group->irq_lock);
809  }
810  
811  /**
812   * hif_ahb_needs_bmi() - return true if the soc needs bmi through the driver
813   * @scn: hif context
814   *
815   * Return: true if soc needs driver bmi otherwise false
816   */
hif_ahb_needs_bmi(struct hif_softc * scn)817  bool hif_ahb_needs_bmi(struct hif_softc *scn)
818  {
819  	return !ce_srng_based(scn);
820  }
821  
822  /**
823   * hif_display_ahb_irq_regs() - prints the host interrupt enable (IE) regs
824   * @scn: hif context
825   *
826   * Return: None
827   */
828  
hif_display_ahb_irq_regs(struct hif_softc * scn)829  void hif_display_ahb_irq_regs(struct hif_softc *scn)
830  {
831  	uint32_t regval;
832  	void *mem = scn->mem_ce ? scn->mem_ce : scn->mem;
833  	struct hif_target_info *tgt_info = &scn->target_info;
834  
835  	if (tgt_info->target_type == TARGET_TYPE_QCN6122 ||
836  	    tgt_info->target_type == TARGET_TYPE_QCN9160 ||
837  	    tgt_info->target_type == TARGET_TYPE_QCN6432) {
838  		return;
839  	}
840  	if (scn->per_ce_irq) {
841  		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS);
842  		hif_nofl_err("IRQ enable register value 0x%08x", regval);
843  
844  		regval = hif_read32_mb(scn, mem + HOST_IE_ADDRESS_2);
845  		hif_nofl_err("IRQ enable register 2 value 0x%08x", regval);
846  
847  		if (tgt_info->target_type == TARGET_TYPE_QCA8074 ||
848  		    tgt_info->target_type == TARGET_TYPE_QCA8074V2 ||
849  		    tgt_info->target_type == TARGET_TYPE_QCA9574 ||
850  		    tgt_info->target_type == TARGET_TYPE_QCA5332 ||
851  		    tgt_info->target_type == TARGET_TYPE_QCA5018 ||
852  		    tgt_info->target_type == TARGET_TYPE_QCA6018) {
853  			regval = hif_read32_mb(scn, mem +
854  					       HOST_IE_ADDRESS_3);
855  			hif_nofl_err("IRQ enable register 3 value 0x%08x",
856  				     regval);
857  		}
858  	}
859  }
860  
hif_ahb_display_stats(struct hif_softc * scn)861  void hif_ahb_display_stats(struct hif_softc *scn)
862  {
863  	if (!scn) {
864  		hif_err("hif_scn null");
865  		return;
866  	}
867  	hif_display_ahb_irq_regs(scn);
868  	hif_display_ce_stats(scn);
869  }
870  
hif_ahb_clear_stats(struct hif_softc * scn)871  void hif_ahb_clear_stats(struct hif_softc *scn)
872  {
873  	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
874  
875  	if (!hif_state) {
876  		hif_err("hif_state null");
877  		return;
878  	}
879  	hif_clear_ce_stats(hif_state);
880  }
881