xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.c (revision 2f4b444fb7e689b83a4ab0e7b3b38f0bf4def8e0)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7 
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/if_arp.h>
20 #include "hif_io32.h"
21 #include "if_ipci.h"
22 #include "hif.h"
23 #include "target_type.h"
24 #include "hif_main.h"
25 #include "ce_main.h"
26 #include "ce_api.h"
27 #include "ce_internal.h"
28 #include "ce_reg.h"
29 #include "ce_bmi.h"
30 #include "regtable.h"
31 #include "hif_hw_version.h"
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include "qdf_status.h"
35 #include "qdf_atomic.h"
36 #include "pld_common.h"
37 #include "mp_dev.h"
38 #include "hif_debug.h"
39 
40 #include "ce_tasklet.h"
41 #include "targaddrs.h"
42 #include "hif_exec.h"
43 
44 #include "ipci_api.h"
45 
46 #ifdef FEATURE_RUNTIME_PM
47 inline struct hif_runtime_pm_ctx *hif_ipci_get_rpm_ctx(struct hif_softc *scn)
48 {
49 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
50 
51 	return &sc->rpm_ctx;
52 }
53 
54 inline struct device *hif_ipci_get_dev(struct hif_softc *scn)
55 {
56 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
57 
58 	return sc->dev;
59 }
60 #endif
61 
62 void hif_ipci_enable_power_management(struct hif_softc *hif_sc,
63 				      bool is_packet_log_enabled)
64 {
65 	hif_pm_runtime_start(hif_sc);
66 }
67 
68 void hif_ipci_disable_power_management(struct hif_softc *hif_ctx)
69 {
70 	hif_pm_runtime_stop(hif_ctx);
71 }
72 
73 void hif_ipci_display_stats(struct hif_softc *hif_ctx)
74 {
75 	hif_display_ce_stats(hif_ctx);
76 }
77 
78 void hif_ipci_clear_stats(struct hif_softc *hif_ctx)
79 {
80 	struct hif_ipci_softc *ipci_ctx = HIF_GET_IPCI_SOFTC(hif_ctx);
81 
82 	if (!ipci_ctx) {
83 		hif_err("hif_ctx null");
84 		return;
85 	}
86 	hif_clear_ce_stats(&ipci_ctx->ce_sc);
87 }
88 
89 QDF_STATUS hif_ipci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
90 {
91 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(hif_ctx);
92 
93 	hif_ctx->bus_type = bus_type;
94 	hif_pm_runtime_open(hif_ctx);
95 
96 	qdf_spinlock_create(&sc->irq_lock);
97 
98 	return hif_ce_open(hif_ctx);
99 }
100 
101 /**
102  * hif_ce_msi_map_ce_to_irq() - map CE to IRQ
103  * @scn: hif context
104  * @ce_id: CE Id
105  *
106  * Return: IRQ number
107  */
108 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
109 {
110 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
111 
112 	return ipci_scn->ce_msi_irq_num[ce_id];
113 }
114 
115 int hif_ipci_bus_configure(struct hif_softc *hif_sc)
116 {
117 	int status = 0;
118 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
119 	uint8_t wake_ce_id;
120 
121 	hif_ce_prepare_config(hif_sc);
122 
123 	/* initialize sleep state adjust variables */
124 	hif_state->sleep_timer_init = true;
125 	hif_state->keep_awake_count = 0;
126 	hif_state->fake_sleep = false;
127 	hif_state->sleep_ticks = 0;
128 
129 	status = hif_wlan_enable(hif_sc);
130 	if (status) {
131 		hif_err("hif_wlan_enable error = %d", status);
132 		goto timer_free;
133 	}
134 
135 	A_TARGET_ACCESS_LIKELY(hif_sc);
136 
137 	status = hif_config_ce(hif_sc);
138 	if (status)
139 		goto disable_wlan;
140 
141 	status = hif_get_wake_ce_id(hif_sc, &wake_ce_id);
142 	if (status)
143 		goto unconfig_ce;
144 
145 	status = hif_configure_irq(hif_sc);
146 	if (status < 0)
147 		goto unconfig_ce;
148 
149 	hif_sc->wake_irq = hif_ce_msi_map_ce_to_irq(hif_sc, wake_ce_id);
150 	hif_sc->wake_irq_type = HIF_PM_CE_WAKE;
151 
152 	hif_info("expecting wake from ce %d, irq %d",
153 		 wake_ce_id, hif_sc->wake_irq);
154 
155 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
156 
157 	return status;
158 
159 unconfig_ce:
160 	hif_unconfig_ce(hif_sc);
161 disable_wlan:
162 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
163 	hif_wlan_disable(hif_sc);
164 
165 timer_free:
166 	qdf_timer_stop(&hif_state->sleep_timer);
167 	qdf_timer_free(&hif_state->sleep_timer);
168 	hif_state->sleep_timer_init = false;
169 
170 	hif_err("Failed, status = %d", status);
171 	return status;
172 }
173 
174 void hif_ipci_close(struct hif_softc *hif_sc)
175 {
176 	hif_pm_runtime_close(hif_sc);
177 	hif_ce_close(hif_sc);
178 }
179 
180 /**
181  * hif_ce_srng_msi_free_irq(): free CE msi IRQ
182  * @scn: struct hif_softc
183  *
184  * Return: ErrorNo
185  */
186 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
187 {
188 	int ret;
189 	int ce_id, irq;
190 	uint32_t msi_data_start;
191 	uint32_t msi_data_count;
192 	uint32_t msi_irq_start;
193 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
194 
195 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
196 					  &msi_data_count, &msi_data_start,
197 					  &msi_irq_start);
198 	if (ret)
199 		return ret;
200 
201 	/* needs to match the ce_id -> irq data mapping
202 	 * used in the srng parameter configuration
203 	 */
204 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
205 		unsigned int msi_data;
206 
207 		if (!ce_sc->tasklets[ce_id].inited)
208 			continue;
209 
210 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
211 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
212 
213 		hif_ce_irq_remove_affinity_hint(irq);
214 
215 		hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
216 			  ce_id, msi_data, irq);
217 
218 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
219 	}
220 
221 	return ret;
222 }
223 
224 /**
225  * hif_ipci_deconfigure_grp_irq(): deconfigure HW block IRQ
226  * @scn: struct hif_softc
227  *
228  * Return: none
229  */
230 void hif_ipci_deconfigure_grp_irq(struct hif_softc *scn)
231 {
232 	int i, j, irq;
233 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
234 	struct hif_exec_context *hif_ext_group;
235 
236 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
237 		hif_ext_group = hif_state->hif_ext_group[i];
238 		if (hif_ext_group->irq_requested) {
239 			hif_ext_group->irq_requested = false;
240 			for (j = 0; j < hif_ext_group->numirq; j++) {
241 				irq = hif_ext_group->os_irq[j];
242 				pfrm_free_irq(scn->qdf_dev->dev,
243 					      irq, hif_ext_group);
244 			}
245 			hif_ext_group->numirq = 0;
246 		}
247 	}
248 }
249 
250 void hif_ipci_nointrs(struct hif_softc *scn)
251 {
252 	int ret;
253 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
254 
255 	scn->free_irq_done = true;
256 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
257 
258 	if (scn->request_irq_done == false)
259 		return;
260 
261 	hif_ipci_deconfigure_grp_irq(scn);
262 
263 	ret = hif_ce_srng_msi_free_irq(scn);
264 
265 	scn->request_irq_done = false;
266 }
267 
268 void hif_ipci_disable_bus(struct hif_softc *scn)
269 {
270 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
271 	void __iomem *mem;
272 
273 	/* Attach did not succeed, all resources have been
274 	 * freed in error handler
275 	 */
276 	if (!sc)
277 		return;
278 
279 	mem = (void __iomem *)sc->mem;
280 	if (mem) {
281 		hif_dump_pipe_debug_count(scn);
282 		if (scn->athdiag_procfs_inited) {
283 			athdiag_procfs_remove();
284 			scn->athdiag_procfs_inited = false;
285 		}
286 		scn->mem = NULL;
287 	}
288 	hif_info("X");
289 }
290 
291 #ifdef CONFIG_PLD_PCIE_CNSS
292 void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
293 {
294 	int errno;
295 
296 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
297 	hif_runtime_prevent_linkdown(scn, flag);
298 
299 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
300 	if (errno)
301 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
302 }
303 #else
304 void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
305 {
306 }
307 #endif
308 
309 int hif_ipci_bus_suspend(struct hif_softc *scn)
310 {
311 	int ret;
312 
313 	ret = hif_apps_disable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
314 	if (ret) {
315 		hif_err("Failed to disable IRQs");
316 		goto disable_irq_fail;
317 	}
318 
319 	ret = hif_apps_enable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
320 	if (ret) {
321 		hif_err("Failed to enable Wake-IRQ");
322 		goto enable_wake_irq_fail;
323 	}
324 
325 	if (QDF_IS_STATUS_ERROR(hif_try_complete_tasks(scn))) {
326 		hif_err("hif_try_complete_tasks timed-out, so abort suspend");
327 		ret = -EBUSY;
328 		goto drain_tasks_fail;
329 	}
330 
331 	/*
332 	 * In an unlikely case, if draining becomes infinite loop,
333 	 * it returns an error, shall abort the bus suspend.
334 	 */
335 	ret = hif_drain_fw_diag_ce(scn);
336 	if (ret) {
337 		hif_err("draining fw_diag_ce goes infinite, so abort suspend");
338 		goto drain_tasks_fail;
339 	}
340 
341 	scn->bus_suspended = true;
342 
343 	return 0;
344 
345 drain_tasks_fail:
346 	hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
347 
348 enable_wake_irq_fail:
349 	hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
350 
351 disable_irq_fail:
352 	return ret;
353 }
354 
355 int hif_ipci_bus_resume(struct hif_softc *scn)
356 {
357 	int ret = 0;
358 
359 	ret = hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
360 	if (ret) {
361 		hif_err("Failed to disable Wake-IRQ");
362 		goto fail;
363 	}
364 
365 	ret = hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
366 	if (ret)
367 		hif_err("Failed to enable IRQs");
368 
369 	scn->bus_suspended = false;
370 
371 fail:
372 	return ret;
373 }
374 
375 int hif_ipci_bus_suspend_noirq(struct hif_softc *scn)
376 {
377 	/*
378 	 * If it is system suspend case and wake-IRQ received
379 	 * just before Kernel issuing suspend_noirq, that must
380 	 * have scheduled CE2 tasklet, so suspend activity can
381 	 * be aborted.
382 	 * Similar scenario for runtime suspend case, would be
383 	 * handled by hif_pm_runtime_check_and_request_resume
384 	 * in hif_ce_interrupt_handler.
385 	 *
386 	 */
387 	if (!hif_pm_runtime_get_monitor_wake_intr(GET_HIF_OPAQUE_HDL(scn)) &&
388 	    hif_get_num_active_tasklets(scn)) {
389 		hif_err("Tasklets are pending, abort sys suspend_noirq");
390 		return -EBUSY;
391 	}
392 
393 	return 0;
394 }
395 
396 int hif_ipci_bus_resume_noirq(struct hif_softc *scn)
397 {
398 	return 0;
399 }
400 
401 void hif_ipci_disable_isr(struct hif_softc *scn)
402 {
403 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
404 
405 	hif_exec_kill(&scn->osc);
406 	hif_nointrs(scn);
407 	/* Cancel the pending tasklet */
408 	ce_tasklet_kill(scn);
409 	tasklet_kill(&sc->intr_tq);
410 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
411 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
412 }
413 
414 int hif_ipci_dump_registers(struct hif_softc *hif_ctx)
415 {
416 	int status;
417 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
418 
419 	status = hif_dump_ce_registers(scn);
420 
421 	if (status)
422 		hif_err("Dump CE Registers Failed");
423 
424 	return 0;
425 }
426 
427 /**
428  * hif_ce_interrupt_handler() - interrupt handler for copy engine
429  * @irq: irq number
430  * @context: tasklet context
431  *
432  * Return: irqreturn_t
433  */
434 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
435 {
436 	struct ce_tasklet_entry *tasklet_entry = context;
437 
438 	hif_pm_runtime_check_and_request_resume(
439 			GET_HIF_OPAQUE_HDL(tasklet_entry->hif_ce_state));
440 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
441 }
442 
443 extern const char *ce_name[];
444 
445 /* hif_ce_srng_msi_irq_disable() - disable the irq for msi
446  * @hif_sc: hif context
447  * @ce_id: which ce to disable copy complete interrupts for
448  *
449  * @Return: none
450  */
451 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
452 {
453 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
454 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
455 
456 }
457 
458 /* hif_ce_srng_msi_irq_enable() - enable the irq for msi
459  * @hif_sc: hif context
460  * @ce_id: which ce to enable copy complete interrupts for
461  *
462  * @Return: none
463  */
464 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
465 {
466 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
467 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
468 
469 }
470 
471 /* hif_ce_msi_configure_irq() - configure the irq
472  * @scn: hif context
473  *
474  * @Return: none
475  */
476 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
477 {
478 	int ret;
479 	int ce_id, irq;
480 	uint32_t msi_data_start;
481 	uint32_t msi_data_count;
482 	uint32_t msi_irq_start;
483 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
484 	struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
485 	uint8_t wake_ce_id;
486 
487 	ret = hif_get_wake_ce_id(scn, &wake_ce_id);
488 	if (ret)
489 		return ret;
490 
491 	/* do ce irq assignments */
492 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
493 					  &msi_data_count, &msi_data_start,
494 					  &msi_irq_start);
495 	if (ret)
496 		return ret;
497 
498 	scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
499 	scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
500 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
501 
502 	/* needs to match the ce_id -> irq data mapping
503 	 * used in the srng parameter configuration
504 	 */
505 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
506 		unsigned long irqflags = IRQF_SHARED;
507 		unsigned int msi_data = (ce_id % msi_data_count) +
508 			msi_irq_start;
509 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
510 		hif_debug("(ce_id %d, msi_data %d, irq %d tasklet %pK)",
511 			 ce_id, msi_data, irq,
512 			 &ce_sc->tasklets[ce_id]);
513 
514 		/* implies the ce is also initialized */
515 		if (!ce_sc->tasklets[ce_id].inited)
516 			continue;
517 
518 		ipci_sc->ce_msi_irq_num[ce_id] = irq;
519 		ret = pfrm_request_irq(scn->qdf_dev->dev,
520 				       irq, hif_ce_interrupt_handler,
521 				       irqflags,
522 				       ce_name[ce_id],
523 				       &ce_sc->tasklets[ce_id]);
524 		if (ret)
525 			goto free_irq;
526 	}
527 
528 	return ret;
529 
530 free_irq:
531 	/* the request_irq for the last ce_id failed so skip it. */
532 	while (ce_id > 0 && ce_id < scn->ce_count) {
533 		unsigned int msi_data;
534 
535 		ce_id--;
536 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
537 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
538 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
539 	}
540 
541 	return ret;
542 }
543 
544 /**
545  * hif_exec_grp_irq_disable() - disable the irq for group
546  * @hif_ext_group: hif exec context
547  *
548  * Return: none
549  */
550 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
551 {
552 	int i;
553 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
554 
555 	for (i = 0; i < hif_ext_group->numirq; i++)
556 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
557 					hif_ext_group->os_irq[i]);
558 }
559 
560 /**
561  * hif_exec_grp_irq_enable() - enable the irq for group
562  * @hif_ext_group: hif exec context
563  *
564  * Return: none
565  */
566 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
567 {
568 	int i;
569 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
570 
571 	for (i = 0; i < hif_ext_group->numirq; i++)
572 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
573 }
574 
575 const char *hif_ipci_get_irq_name(int irq_no)
576 {
577 	return "pci-dummy";
578 }
579 
580 #ifdef HIF_CPU_PERF_AFFINE_MASK
581 static void hif_ipci_ce_irq_set_affinity_hint(struct hif_softc *scn)
582 {
583 	int ret;
584 	unsigned int cpus;
585 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
586 	struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
587 	struct CE_attr *host_ce_conf;
588 	int ce_id;
589 	qdf_cpu_mask ce_cpu_mask;
590 
591 	host_ce_conf = ce_sc->host_ce_config;
592 	qdf_cpumask_clear(&ce_cpu_mask);
593 
594 	qdf_for_each_online_cpu(cpus) {
595 		if (qdf_topology_physical_package_id(cpus) ==
596 			CPU_CLUSTER_TYPE_PERF) {
597 			qdf_cpumask_set_cpu(cpus,
598 					    &ce_cpu_mask);
599 		}
600 	}
601 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
602 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
603 		return;
604 	}
605 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
606 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
607 			continue;
608 		qdf_cpumask_clear(&ipci_sc->ce_irq_cpu_mask[ce_id]);
609 		qdf_cpumask_copy(&ipci_sc->ce_irq_cpu_mask[ce_id],
610 				 &ce_cpu_mask);
611 		qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
612 					  IRQ_NO_BALANCING, 0);
613 		ret = qdf_dev_set_irq_affinity(
614 		       ipci_sc->ce_msi_irq_num[ce_id],
615 		       (struct qdf_cpu_mask *)&ipci_sc->ce_irq_cpu_mask[ce_id]);
616 		qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
617 					  0, IRQ_NO_BALANCING);
618 		if (ret)
619 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
620 				   qdf_cpumask_pr_args(
621 					&ipci_sc->ce_irq_cpu_mask[ce_id]),
622 					ipci_sc->ce_msi_irq_num[ce_id]);
623 		else
624 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
625 				     qdf_cpumask_pr_args(
626 				     &ipci_sc->ce_irq_cpu_mask[ce_id]),
627 				     ipci_sc->ce_msi_irq_num[ce_id]);
628 	}
629 }
630 
631 void hif_ipci_config_irq_affinity(struct hif_softc *scn)
632 {
633 	hif_core_ctl_set_boost(true);
634 	/* Set IRQ affinity for CE interrupts*/
635 	hif_ipci_ce_irq_set_affinity_hint(scn);
636 }
637 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
638 
639 #ifdef HIF_CPU_CLEAR_AFFINITY
640 void hif_ipci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
641 					    int intr_ctxt_id, int cpu)
642 {
643 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
644 	struct hif_exec_context *hif_ext_group;
645 	int i, ret;
646 
647 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
648 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
649 		for (i = 0; i < hif_ext_group->numirq; i++) {
650 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
651 			qdf_cpumask_clear_cpu(cpu,
652 					      &hif_ext_group->new_cpu_mask[i]);
653 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
654 						  IRQ_NO_BALANCING, 0);
655 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
656 						       (struct qdf_cpu_mask *)
657 						       &hif_ext_group->
658 						       new_cpu_mask[i]);
659 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
660 						  0, IRQ_NO_BALANCING);
661 			if (ret)
662 				hif_err("Set affinity %*pbl fails for IRQ %d ",
663 					qdf_cpumask_pr_args(&hif_ext_group->
664 							    new_cpu_mask[i]),
665 					hif_ext_group->os_irq[i]);
666 			else
667 				hif_debug("Set affinity %*pbl for IRQ: %d",
668 					  qdf_cpumask_pr_args(&hif_ext_group->
669 							      new_cpu_mask[i]),
670 					  hif_ext_group->os_irq[0]);
671 		}
672 	}
673 }
674 #endif
675 
676 int hif_ipci_configure_grp_irq(struct hif_softc *scn,
677 			       struct hif_exec_context *hif_ext_group)
678 {
679 	int ret = 0;
680 	int irq = 0;
681 	int j;
682 
683 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
684 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
685 	hif_ext_group->irq_name = &hif_ipci_get_irq_name;
686 	hif_ext_group->work_complete = &hif_dummy_grp_done;
687 
688 	for (j = 0; j < hif_ext_group->numirq; j++) {
689 		irq = hif_ext_group->irq[j];
690 
691 		hif_info("request_irq = %d for grp %d",
692 			 irq, hif_ext_group->grp_id);
693 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
694 				       hif_ext_group_interrupt_handler,
695 				       IRQF_SHARED | IRQF_NO_SUSPEND,
696 				       "wlan_EXT_GRP",
697 				       hif_ext_group);
698 		if (ret) {
699 			hif_err("request_irq failed ret = %d", ret);
700 			return -EFAULT;
701 		}
702 		hif_ext_group->os_irq[j] = irq;
703 	}
704 	hif_ext_group->irq_requested = true;
705 	return 0;
706 }
707 
708 int hif_configure_irq(struct hif_softc *scn)
709 {
710 	int ret = 0;
711 
712 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
713 		scn->request_irq_done = false;
714 		return 0;
715 	}
716 
717 	ret = hif_ce_msi_configure_irq(scn);
718 	if (ret == 0)
719 		goto end;
720 
721 	if (ret < 0) {
722 		hif_err("hif_ipci_configure_irq error = %d", ret);
723 		return ret;
724 	}
725 end:
726 	scn->request_irq_done = true;
727 	return 0;
728 }
729 
730 /**
731  * hif_ipci_get_soc_info_pld() - get soc info for ipcie bus from pld target
732  * @sc: ipci context
733  * @dev: device structure
734  *
735  * Return: none
736  */
737 static void hif_ipci_get_soc_info_pld(struct hif_ipci_softc *sc,
738 				      struct device *dev)
739 {
740 	struct pld_soc_info info;
741 
742 	pld_get_soc_info(dev, &info);
743 	sc->mem = info.v_addr;
744 	sc->ce_sc.ol_sc.mem    = info.v_addr;
745 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
746 }
747 
748 /**
749  * hif_ipci_get_soc_info_nopld() - get soc info for ipcie bus for non pld target
750  * @sc: ipci context
751  * @dev: device structure
752  *
753  * Return: none
754  */
755 static void hif_ipci_get_soc_info_nopld(struct hif_ipci_softc *sc,
756 					struct device *dev)
757 {}
758 
759 /**
760  * hif_is_pld_based_target() - verify if the target is pld based
761  * @sc: ipci context
762  * @device_id: device id
763  *
764  * Return: none
765  */
766 static bool hif_is_pld_based_target(struct hif_ipci_softc *sc,
767 				    int device_id)
768 {
769 	if (!pld_have_platform_driver_support(sc->dev))
770 		return false;
771 
772 	switch (device_id) {
773 #ifdef QCA_WIFI_QCA6750
774 	case QCA6750_DEVICE_ID:
775 #endif
776 		return true;
777 	}
778 	return false;
779 }
780 
781 /**
782  * hif_ipci_init_deinit_ops_attach() - attach ops for ipci
783  * @sc: ipci context
784  * @device_id: device id
785  *
786  * Return: none
787  */
788 static void hif_ipci_init_deinit_ops_attach(struct hif_ipci_softc *sc,
789 					    int device_id)
790 {
791 	if (hif_is_pld_based_target(sc, device_id))
792 		sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_pld;
793 	else
794 		sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_nopld;
795 }
796 
797 QDF_STATUS hif_ipci_enable_bus(struct hif_softc *ol_sc,
798 			       struct device *dev, void *bdev,
799 			       const struct hif_bus_id *bid,
800 			       enum hif_enable_type type)
801 {
802 	int ret = 0;
803 	uint32_t hif_type, target_type;
804 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(ol_sc);
805 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
806 	uint16_t revision_id = 0;
807 	struct hif_target_info *tgt_info;
808 	int device_id = QCA6750_DEVICE_ID;
809 
810 	if (!ol_sc) {
811 		hif_err("hif_ctx is NULL");
812 		return QDF_STATUS_E_NOMEM;
813 	}
814 
815 	ret = qdf_set_dma_coherent_mask(dev,
816 					DMA_COHERENT_MASK_DEFAULT);
817 	if (ret) {
818 		hif_err("Failed to set dma mask error = %d", ret);
819 		return qdf_status_from_os_return(ret);
820 	}
821 
822 	sc->dev = dev;
823 	tgt_info = hif_get_target_info_handle(hif_hdl);
824 	hif_ipci_init_deinit_ops_attach(sc, device_id);
825 	sc->hif_ipci_get_soc_info(sc, dev);
826 	hif_debug("hif_enable_pci done");
827 
828 	ret = hif_get_device_type(device_id, revision_id,
829 				  &hif_type, &target_type);
830 	if (ret < 0) {
831 		hif_err("Invalid device id/revision_id");
832 		return QDF_STATUS_E_ABORTED;
833 	}
834 	hif_debug("hif_type = 0x%x, target_type = 0x%x",
835 		 hif_type, target_type);
836 
837 	hif_register_tbl_attach(ol_sc, hif_type);
838 	hif_target_register_tbl_attach(ol_sc, target_type);
839 	sc->use_register_windowing = false;
840 	tgt_info->target_type = target_type;
841 
842 	if (!ol_sc->mem_pa) {
843 		hif_err("BAR0 uninitialized");
844 		return QDF_STATUS_E_ABORTED;
845 	}
846 
847 	return QDF_STATUS_SUCCESS;
848 }
849 
850 bool hif_ipci_needs_bmi(struct hif_softc *scn)
851 {
852 	return !ce_srng_based(scn);
853 }
854 
855 #ifdef FORCE_WAKE
856 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
857 {
858 	uint32_t timeout = 0;
859 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
860 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
861 
862 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
863 		hif_err_rl("force wake request send failed");
864 		return -EINVAL;
865 	}
866 
867 	HIF_STATS_INC(ipci_scn, mhi_force_wake_request_vote, 1);
868 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
869 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
870 		if (qdf_in_interrupt())
871 			qdf_mdelay(FORCE_WAKE_DELAY_MS);
872 		else
873 			qdf_sleep(FORCE_WAKE_DELAY_MS);
874 
875 		timeout += FORCE_WAKE_DELAY_MS;
876 	}
877 
878 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
879 		hif_err("Unable to wake up mhi");
880 		HIF_STATS_INC(ipci_scn, mhi_force_wake_failure, 1);
881 		hif_force_wake_release(hif_handle);
882 		return -EINVAL;
883 	}
884 	HIF_STATS_INC(ipci_scn, mhi_force_wake_success, 1);
885 
886 	HIF_STATS_INC(ipci_scn, soc_force_wake_success, 1);
887 
888 	return 0;
889 }
890 
891 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
892 {
893 	int ret;
894 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
895 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
896 
897 	ret = pld_force_wake_release(scn->qdf_dev->dev);
898 	if (ret) {
899 		hif_err("force wake release failure");
900 		HIF_STATS_INC(ipci_scn, mhi_force_wake_release_failure, 1);
901 		return ret;
902 	}
903 
904 	HIF_STATS_INC(ipci_scn, mhi_force_wake_release_success, 1);
905 
906 	HIF_STATS_INC(ipci_scn, soc_force_wake_release_success, 1);
907 	return 0;
908 }
909 
910 void hif_print_ipci_stats(struct hif_ipci_softc *ipci_handle)
911 {
912 	hif_debug("mhi_force_wake_request_vote: %d",
913 		  ipci_handle->stats.mhi_force_wake_request_vote);
914 	hif_debug("mhi_force_wake_failure: %d",
915 		  ipci_handle->stats.mhi_force_wake_failure);
916 	hif_debug("mhi_force_wake_success: %d",
917 		  ipci_handle->stats.mhi_force_wake_success);
918 	hif_debug("soc_force_wake_register_write_success: %d",
919 		  ipci_handle->stats.soc_force_wake_register_write_success);
920 	hif_debug("soc_force_wake_failure: %d",
921 		  ipci_handle->stats.soc_force_wake_failure);
922 	hif_debug("soc_force_wake_success: %d",
923 		  ipci_handle->stats.soc_force_wake_success);
924 	hif_debug("mhi_force_wake_release_failure: %d",
925 		  ipci_handle->stats.mhi_force_wake_release_failure);
926 	hif_debug("mhi_force_wake_release_success: %d",
927 		  ipci_handle->stats.mhi_force_wake_release_success);
928 	hif_debug("oc_force_wake_release_success: %d",
929 		  ipci_handle->stats.soc_force_wake_release_success);
930 }
931 #endif /* FORCE_WAKE */
932 
933 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
934 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
935 {
936 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
937 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
938 	uint32_t start_time = 0, curr_time = 0;
939 	uint32_t count = 0;
940 
941 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
942 		return 0;
943 
944 	start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
945 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
946 	       curr_time <= start_time + EP_WAKE_RESET_DELAY_TIMEOUT_MS) {
947 		if (count < EP_VOTE_POLL_TIME_CNT) {
948 			qdf_udelay(EP_VOTE_POLL_TIME_US);
949 			count++;
950 		} else {
951 			qdf_sleep_us(EP_WAKE_RESET_DELAY_US);
952 		}
953 		curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
954 	}
955 
956 
957 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
958 		hif_err_rl(" EP state reset is not done to prevent l1");
959 		ipci_scn->ep_awake_reset_fail++;
960 		return 0;
961 	}
962 
963 	if (pld_prevent_l1(scn->qdf_dev->dev)) {
964 		hif_err_rl("pld prevent l1 failed");
965 		ipci_scn->prevent_l1_fail++;
966 		return 0;
967 	}
968 
969 	count = 0;
970 	ipci_scn->prevent_l1 = true;
971 	start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
972 	while (!pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
973 	       curr_time <= start_time + EP_WAKE_DELAY_TIMEOUT_MS) {
974 		if (count < EP_VOTE_POLL_TIME_CNT) {
975 			qdf_udelay(EP_WAKE_RESET_DELAY_US);
976 			count++;
977 		} else {
978 			qdf_sleep_us(EP_WAKE_DELAY_US);
979 		}
980 
981 		curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
982 	}
983 
984 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) <= 0) {
985 		hif_err_rl("Unable to wakeup pci ep");
986 		ipci_scn->ep_awake_set_fail++;
987 		return  0;
988 	}
989 
990 	return 0;
991 }
992 
993 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
994 {
995 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
996 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
997 
998 	if (qdf_likely(ipci_scn->prevent_l1)) {
999 		pld_allow_l1(scn->qdf_dev->dev);
1000 		ipci_scn->prevent_l1 = false;
1001 	}
1002 }
1003 #endif
1004 
1005 int hif_ipci_enable_grp_irqs(struct hif_softc *scn)
1006 {
1007 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1008 	int status;
1009 
1010 	if (!ipci_scn->grp_irqs_disabled) {
1011 		hif_err("Unbalanced group IRQs Enable called");
1012 		qdf_assert_always(0);
1013 	}
1014 
1015 	status = hif_apps_grp_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
1016 	if (!status)
1017 		ipci_scn->grp_irqs_disabled = false;
1018 
1019 	return status;
1020 }
1021 
1022 int hif_ipci_disable_grp_irqs(struct hif_softc *scn)
1023 {
1024 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1025 	int status;
1026 
1027 	if (ipci_scn->grp_irqs_disabled) {
1028 		hif_err("Unbalanced group IRQs disable called");
1029 		qdf_assert_always(0);
1030 	}
1031 
1032 	status = hif_apps_grp_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
1033 	if (!status)
1034 		ipci_scn->grp_irqs_disabled = true;
1035 
1036 	return status;
1037 }
1038