xref: /wlan-dirver/qca-wifi-host-cmn/hif/src/ipcie/if_ipci.c (revision aaf4382daf99a92dd525c63c2d626950de15c0cc)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "if_ipci.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #include "ipci_api.h"
46 
47 #ifdef FEATURE_RUNTIME_PM
48 inline struct hif_runtime_pm_ctx *hif_ipci_get_rpm_ctx(struct hif_softc *scn)
49 {
50 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
51 
52 	return &sc->rpm_ctx;
53 }
54 
55 inline struct device *hif_ipci_get_dev(struct hif_softc *scn)
56 {
57 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
58 
59 	return sc->dev;
60 }
61 #endif
62 
63 void hif_ipci_enable_power_management(struct hif_softc *hif_sc,
64 				      bool is_packet_log_enabled)
65 {
66 	hif_pm_runtime_start(hif_sc);
67 }
68 
69 void hif_ipci_disable_power_management(struct hif_softc *hif_ctx)
70 {
71 	hif_pm_runtime_stop(hif_ctx);
72 }
73 
74 void hif_ipci_display_stats(struct hif_softc *hif_ctx)
75 {
76 	hif_display_ce_stats(hif_ctx);
77 }
78 
79 void hif_ipci_clear_stats(struct hif_softc *hif_ctx)
80 {
81 	struct hif_ipci_softc *ipci_ctx = HIF_GET_IPCI_SOFTC(hif_ctx);
82 
83 	if (!ipci_ctx) {
84 		hif_err("hif_ctx null");
85 		return;
86 	}
87 	hif_clear_ce_stats(&ipci_ctx->ce_sc);
88 }
89 
90 QDF_STATUS hif_ipci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
91 {
92 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(hif_ctx);
93 
94 	hif_ctx->bus_type = bus_type;
95 	hif_pm_runtime_open(hif_ctx);
96 
97 	qdf_spinlock_create(&sc->irq_lock);
98 
99 	return hif_ce_open(hif_ctx);
100 }
101 
102 /**
103  * hif_ce_msi_map_ce_to_irq() - map CE to IRQ
104  * @scn: hif context
105  * @ce_id: CE Id
106  *
107  * Return: IRQ number
108  */
109 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
110 {
111 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
112 
113 	return ipci_scn->ce_msi_irq_num[ce_id];
114 }
115 
116 int hif_ipci_bus_configure(struct hif_softc *hif_sc)
117 {
118 	int status = 0;
119 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
120 	uint8_t wake_ce_id;
121 
122 	hif_ce_prepare_config(hif_sc);
123 
124 	/* initialize sleep state adjust variables */
125 	hif_state->sleep_timer_init = true;
126 	hif_state->keep_awake_count = 0;
127 	hif_state->fake_sleep = false;
128 	hif_state->sleep_ticks = 0;
129 
130 	status = hif_wlan_enable(hif_sc);
131 	if (status) {
132 		hif_err("hif_wlan_enable error = %d", status);
133 		goto timer_free;
134 	}
135 
136 	A_TARGET_ACCESS_LIKELY(hif_sc);
137 
138 	status = hif_config_ce(hif_sc);
139 	if (status)
140 		goto disable_wlan;
141 
142 	status = hif_get_wake_ce_id(hif_sc, &wake_ce_id);
143 	if (status)
144 		goto unconfig_ce;
145 
146 	status = hif_configure_irq(hif_sc);
147 	if (status < 0)
148 		goto unconfig_ce;
149 
150 	hif_sc->wake_irq = hif_ce_msi_map_ce_to_irq(hif_sc, wake_ce_id);
151 	hif_sc->wake_irq_type = HIF_PM_CE_WAKE;
152 
153 	hif_info("expecting wake from ce %d, irq %d",
154 		 wake_ce_id, hif_sc->wake_irq);
155 
156 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
157 
158 	return status;
159 
160 unconfig_ce:
161 	hif_unconfig_ce(hif_sc);
162 disable_wlan:
163 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
164 	hif_wlan_disable(hif_sc);
165 
166 timer_free:
167 	qdf_timer_stop(&hif_state->sleep_timer);
168 	qdf_timer_free(&hif_state->sleep_timer);
169 	hif_state->sleep_timer_init = false;
170 
171 	hif_err("Failed, status = %d", status);
172 	return status;
173 }
174 
175 void hif_ipci_close(struct hif_softc *hif_sc)
176 {
177 	hif_pm_runtime_close(hif_sc);
178 	hif_ce_close(hif_sc);
179 }
180 
181 /**
182  * hif_ce_srng_msi_free_irq(): free CE msi IRQ
183  * @scn: struct hif_softc
184  *
185  * Return: ErrorNo
186  */
187 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
188 {
189 	int ret;
190 	int ce_id, irq;
191 	uint32_t msi_data_start;
192 	uint32_t msi_data_count;
193 	uint32_t msi_irq_start;
194 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
195 
196 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
197 					  &msi_data_count, &msi_data_start,
198 					  &msi_irq_start);
199 	if (ret)
200 		return ret;
201 
202 	/* needs to match the ce_id -> irq data mapping
203 	 * used in the srng parameter configuration
204 	 */
205 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
206 		unsigned int msi_data;
207 
208 		if (!ce_sc->tasklets[ce_id].inited)
209 			continue;
210 
211 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
212 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
213 
214 		hif_ce_irq_remove_affinity_hint(irq);
215 
216 		hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
217 			  ce_id, msi_data, irq);
218 
219 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
220 	}
221 
222 	return ret;
223 }
224 
225 /**
226  * hif_ipci_deconfigure_grp_irq(): deconfigure HW block IRQ
227  * @scn: struct hif_softc
228  *
229  * Return: none
230  */
231 void hif_ipci_deconfigure_grp_irq(struct hif_softc *scn)
232 {
233 	int i, j, irq;
234 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
235 	struct hif_exec_context *hif_ext_group;
236 
237 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
238 		hif_ext_group = hif_state->hif_ext_group[i];
239 		if (hif_ext_group->irq_requested) {
240 			hif_ext_group->irq_requested = false;
241 			for (j = 0; j < hif_ext_group->numirq; j++) {
242 				irq = hif_ext_group->os_irq[j];
243 				pfrm_free_irq(scn->qdf_dev->dev,
244 					      irq, hif_ext_group);
245 			}
246 			hif_ext_group->numirq = 0;
247 		}
248 	}
249 }
250 
251 void hif_ipci_nointrs(struct hif_softc *scn)
252 {
253 	int ret;
254 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
255 
256 	scn->free_irq_done = true;
257 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
258 
259 	if (scn->request_irq_done == false)
260 		return;
261 
262 	hif_ipci_deconfigure_grp_irq(scn);
263 
264 	ret = hif_ce_srng_msi_free_irq(scn);
265 
266 	scn->request_irq_done = false;
267 }
268 
269 void hif_ipci_disable_bus(struct hif_softc *scn)
270 {
271 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
272 	void __iomem *mem;
273 
274 	/* Attach did not succeed, all resources have been
275 	 * freed in error handler
276 	 */
277 	if (!sc)
278 		return;
279 
280 	mem = (void __iomem *)sc->mem;
281 	if (mem) {
282 		hif_dump_pipe_debug_count(scn);
283 		if (scn->athdiag_procfs_inited) {
284 			athdiag_procfs_remove();
285 			scn->athdiag_procfs_inited = false;
286 		}
287 		scn->mem = NULL;
288 	}
289 	hif_info("X");
290 }
291 
292 #ifdef CONFIG_PLD_PCIE_CNSS
293 void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
294 {
295 	int errno;
296 
297 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
298 	hif_runtime_prevent_linkdown(scn, flag);
299 
300 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
301 	if (errno)
302 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
303 }
304 #else
305 void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
306 {
307 }
308 #endif
309 
310 int hif_ipci_bus_suspend(struct hif_softc *scn)
311 {
312 	int ret;
313 
314 	ret = hif_apps_disable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
315 	if (ret) {
316 		hif_err("Failed to disable IRQs");
317 		goto disable_irq_fail;
318 	}
319 
320 	ret = hif_apps_enable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
321 	if (ret) {
322 		hif_err("Failed to enable Wake-IRQ");
323 		goto enable_wake_irq_fail;
324 	}
325 
326 	if (QDF_IS_STATUS_ERROR(hif_try_complete_tasks(scn))) {
327 		hif_err("hif_try_complete_tasks timed-out, so abort suspend");
328 		ret = -EBUSY;
329 		goto drain_tasks_fail;
330 	}
331 
332 	/*
333 	 * In an unlikely case, if draining becomes infinite loop,
334 	 * it returns an error, shall abort the bus suspend.
335 	 */
336 	ret = hif_drain_fw_diag_ce(scn);
337 	if (ret) {
338 		hif_err("draining fw_diag_ce goes infinite, so abort suspend");
339 		goto drain_tasks_fail;
340 	}
341 
342 	scn->bus_suspended = true;
343 
344 	return 0;
345 
346 drain_tasks_fail:
347 	hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
348 
349 enable_wake_irq_fail:
350 	hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
351 
352 disable_irq_fail:
353 	return ret;
354 }
355 
356 int hif_ipci_bus_resume(struct hif_softc *scn)
357 {
358 	int ret = 0;
359 
360 	ret = hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
361 	if (ret) {
362 		hif_err("Failed to disable Wake-IRQ");
363 		goto fail;
364 	}
365 
366 	ret = hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
367 	if (ret)
368 		hif_err("Failed to enable IRQs");
369 
370 	scn->bus_suspended = false;
371 
372 fail:
373 	return ret;
374 }
375 
376 int hif_ipci_bus_suspend_noirq(struct hif_softc *scn)
377 {
378 	/*
379 	 * If it is system suspend case and wake-IRQ received
380 	 * just before Kernel issuing suspend_noirq, that must
381 	 * have scheduled CE2 tasklet, so suspend activity can
382 	 * be aborted.
383 	 * Similar scenario for runtime suspend case, would be
384 	 * handled by hif_pm_runtime_check_and_request_resume
385 	 * in hif_ce_interrupt_handler.
386 	 *
387 	 */
388 	if (!hif_pm_runtime_get_monitor_wake_intr(GET_HIF_OPAQUE_HDL(scn)) &&
389 	    hif_get_num_active_tasklets(scn)) {
390 		hif_err("Tasklets are pending, abort sys suspend_noirq");
391 		return -EBUSY;
392 	}
393 
394 	return 0;
395 }
396 
397 int hif_ipci_bus_resume_noirq(struct hif_softc *scn)
398 {
399 	return 0;
400 }
401 
402 void hif_ipci_disable_isr(struct hif_softc *scn)
403 {
404 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
405 
406 	hif_exec_kill(&scn->osc);
407 	hif_nointrs(scn);
408 	/* Cancel the pending tasklet */
409 	ce_tasklet_kill(scn);
410 	tasklet_kill(&sc->intr_tq);
411 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
412 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
413 }
414 
415 int hif_ipci_dump_registers(struct hif_softc *hif_ctx)
416 {
417 	int status;
418 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
419 
420 	status = hif_dump_ce_registers(scn);
421 
422 	if (status)
423 		hif_err("Dump CE Registers Failed");
424 
425 	return 0;
426 }
427 
428 /**
429  * hif_ce_interrupt_handler() - interrupt handler for copy engine
430  * @irq: irq number
431  * @context: tasklet context
432  *
433  * Return: irqreturn_t
434  */
435 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
436 {
437 	struct ce_tasklet_entry *tasklet_entry = context;
438 
439 	hif_pm_runtime_check_and_request_resume(
440 			GET_HIF_OPAQUE_HDL(tasklet_entry->hif_ce_state));
441 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
442 }
443 
444 extern const char *ce_name[];
445 
446 /* hif_ce_srng_msi_irq_disable() - disable the irq for msi
447  * @hif_sc: hif context
448  * @ce_id: which ce to disable copy complete interrupts for
449  *
450  * @Return: none
451  */
452 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
453 {
454 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
455 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
456 
457 }
458 
459 /* hif_ce_srng_msi_irq_enable() - enable the irq for msi
460  * @hif_sc: hif context
461  * @ce_id: which ce to enable copy complete interrupts for
462  *
463  * @Return: none
464  */
465 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
466 {
467 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
468 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
469 
470 }
471 
472 /* hif_ce_msi_configure_irq() - configure the irq
473  * @scn: hif context
474  *
475  * @Return: none
476  */
477 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
478 {
479 	int ret;
480 	int ce_id, irq;
481 	uint32_t msi_data_start;
482 	uint32_t msi_data_count;
483 	uint32_t msi_irq_start;
484 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
485 	struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
486 	uint8_t wake_ce_id;
487 
488 	ret = hif_get_wake_ce_id(scn, &wake_ce_id);
489 	if (ret)
490 		return ret;
491 
492 	/* do ce irq assignments */
493 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
494 					  &msi_data_count, &msi_data_start,
495 					  &msi_irq_start);
496 	if (ret)
497 		return ret;
498 
499 	scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
500 	scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
501 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
502 
503 	/* needs to match the ce_id -> irq data mapping
504 	 * used in the srng parameter configuration
505 	 */
506 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
507 		unsigned long irqflags = IRQF_SHARED;
508 		unsigned int msi_data = (ce_id % msi_data_count) +
509 			msi_irq_start;
510 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
511 		hif_debug("(ce_id %d, msi_data %d, irq %d tasklet %pK)",
512 			 ce_id, msi_data, irq,
513 			 &ce_sc->tasklets[ce_id]);
514 
515 		/* implies the ce is also initialized */
516 		if (!ce_sc->tasklets[ce_id].inited)
517 			continue;
518 
519 		ipci_sc->ce_msi_irq_num[ce_id] = irq;
520 		ret = pfrm_request_irq(scn->qdf_dev->dev,
521 				       irq, hif_ce_interrupt_handler,
522 				       irqflags,
523 				       ce_name[ce_id],
524 				       &ce_sc->tasklets[ce_id]);
525 		if (ret)
526 			goto free_irq;
527 	}
528 
529 	return ret;
530 
531 free_irq:
532 	/* the request_irq for the last ce_id failed so skip it. */
533 	while (ce_id > 0 && ce_id < scn->ce_count) {
534 		unsigned int msi_data;
535 
536 		ce_id--;
537 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
538 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
539 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
540 	}
541 
542 	return ret;
543 }
544 
545 /**
546  * hif_exec_grp_irq_disable() - disable the irq for group
547  * @hif_ext_group: hif exec context
548  *
549  * Return: none
550  */
551 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
552 {
553 	int i;
554 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
555 
556 	for (i = 0; i < hif_ext_group->numirq; i++)
557 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
558 					hif_ext_group->os_irq[i]);
559 }
560 
561 /**
562  * hif_exec_grp_irq_enable() - enable the irq for group
563  * @hif_ext_group: hif exec context
564  *
565  * Return: none
566  */
567 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
568 {
569 	int i;
570 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
571 
572 	for (i = 0; i < hif_ext_group->numirq; i++)
573 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
574 }
575 
576 const char *hif_ipci_get_irq_name(int irq_no)
577 {
578 	return "pci-dummy";
579 }
580 
581 #ifdef FEATURE_IRQ_AFFINITY
582 static
583 void hif_ipci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
584 				    bool perf)
585 {
586 	int i, ret;
587 	unsigned int cpus;
588 	bool mask_set = false;
589 	int cpu_cluster = perf ? CPU_CLUSTER_TYPE_PERF :
590 						CPU_CLUSTER_TYPE_LITTLE;
591 
592 	for (i = 0; i < hif_ext_group->numirq; i++)
593 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
594 
595 	for (i = 0; i < hif_ext_group->numirq; i++) {
596 		qdf_for_each_online_cpu(cpus) {
597 			if (qdf_topology_physical_package_id(cpus) ==
598 			    cpu_cluster) {
599 				qdf_cpumask_set_cpu(cpus,
600 						    &hif_ext_group->
601 						    new_cpu_mask[i]);
602 				mask_set = true;
603 			}
604 		}
605 	}
606 	for (i = 0; i < hif_ext_group->numirq; i++) {
607 		if (mask_set) {
608 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
609 						  IRQ_NO_BALANCING, 0);
610 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
611 						       (struct qdf_cpu_mask *)
612 						       &hif_ext_group->
613 						       new_cpu_mask[i]);
614 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
615 						  0, IRQ_NO_BALANCING);
616 			if (ret)
617 				qdf_debug("Set affinity %*pbl fails for IRQ %d ",
618 					  qdf_cpumask_pr_args(&hif_ext_group->
619 							      new_cpu_mask[i]),
620 					  hif_ext_group->os_irq[i]);
621 		} else {
622 			qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
623 				hif_ext_group->os_irq[i]);
624 		}
625 	}
626 }
627 
628 void hif_ipci_set_grp_intr_affinity(struct hif_softc *scn,
629 				    uint32_t grp_intr_bitmask, bool perf)
630 {
631 	int i;
632 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
633 	struct hif_exec_context *hif_ext_group;
634 
635 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
636 		if (!(grp_intr_bitmask & BIT(i)))
637 			continue;
638 
639 		hif_ext_group = hif_state->hif_ext_group[i];
640 		hif_ipci_irq_set_affinity_hint(hif_ext_group, perf);
641 		qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
642 	}
643 }
644 #endif
645 
646 #ifdef HIF_CPU_PERF_AFFINE_MASK
647 static void hif_ipci_ce_irq_set_affinity_hint(struct hif_softc *scn)
648 {
649 	int ret;
650 	unsigned int cpus;
651 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
652 	struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
653 	struct CE_attr *host_ce_conf;
654 	int ce_id;
655 	qdf_cpu_mask ce_cpu_mask;
656 
657 	host_ce_conf = ce_sc->host_ce_config;
658 	qdf_cpumask_clear(&ce_cpu_mask);
659 
660 	qdf_for_each_online_cpu(cpus) {
661 		if (qdf_topology_physical_package_id(cpus) ==
662 			CPU_CLUSTER_TYPE_PERF) {
663 			qdf_cpumask_set_cpu(cpus,
664 					    &ce_cpu_mask);
665 		}
666 	}
667 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
668 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
669 		return;
670 	}
671 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
672 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
673 			continue;
674 		qdf_cpumask_clear(&ipci_sc->ce_irq_cpu_mask[ce_id]);
675 		qdf_cpumask_copy(&ipci_sc->ce_irq_cpu_mask[ce_id],
676 				 &ce_cpu_mask);
677 		qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
678 					  IRQ_NO_BALANCING, 0);
679 		ret = qdf_dev_set_irq_affinity(
680 		       ipci_sc->ce_msi_irq_num[ce_id],
681 		       (struct qdf_cpu_mask *)&ipci_sc->ce_irq_cpu_mask[ce_id]);
682 		qdf_dev_modify_irq_status(ipci_sc->ce_msi_irq_num[ce_id],
683 					  0, IRQ_NO_BALANCING);
684 		if (ret)
685 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
686 				   qdf_cpumask_pr_args(
687 					&ipci_sc->ce_irq_cpu_mask[ce_id]),
688 					ipci_sc->ce_msi_irq_num[ce_id]);
689 		else
690 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
691 				     qdf_cpumask_pr_args(
692 				     &ipci_sc->ce_irq_cpu_mask[ce_id]),
693 				     ipci_sc->ce_msi_irq_num[ce_id]);
694 	}
695 }
696 
697 void hif_ipci_config_irq_affinity(struct hif_softc *scn)
698 {
699 	hif_core_ctl_set_boost(true);
700 	/* Set IRQ affinity for CE interrupts*/
701 	hif_ipci_ce_irq_set_affinity_hint(scn);
702 }
703 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
704 
705 #ifdef HIF_CPU_CLEAR_AFFINITY
706 void hif_ipci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
707 					    int intr_ctxt_id, int cpu)
708 {
709 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
710 	struct hif_exec_context *hif_ext_group;
711 	int i, ret;
712 
713 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
714 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
715 		for (i = 0; i < hif_ext_group->numirq; i++) {
716 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
717 			qdf_cpumask_clear_cpu(cpu,
718 					      &hif_ext_group->new_cpu_mask[i]);
719 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
720 						  IRQ_NO_BALANCING, 0);
721 			ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
722 						       (struct qdf_cpu_mask *)
723 						       &hif_ext_group->
724 						       new_cpu_mask[i]);
725 			qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
726 						  0, IRQ_NO_BALANCING);
727 			if (ret)
728 				hif_err("Set affinity %*pbl fails for IRQ %d ",
729 					qdf_cpumask_pr_args(&hif_ext_group->
730 							    new_cpu_mask[i]),
731 					hif_ext_group->os_irq[i]);
732 			else
733 				hif_debug("Set affinity %*pbl for IRQ: %d",
734 					  qdf_cpumask_pr_args(&hif_ext_group->
735 							      new_cpu_mask[i]),
736 					  hif_ext_group->os_irq[0]);
737 		}
738 	}
739 }
740 #endif
741 
742 int hif_ipci_configure_grp_irq(struct hif_softc *scn,
743 			       struct hif_exec_context *hif_ext_group)
744 {
745 	int ret = 0;
746 	int irq = 0;
747 	int j;
748 
749 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
750 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
751 	hif_ext_group->irq_name = &hif_ipci_get_irq_name;
752 	hif_ext_group->work_complete = &hif_dummy_grp_done;
753 
754 	for (j = 0; j < hif_ext_group->numirq; j++) {
755 		irq = hif_ext_group->irq[j];
756 
757 		hif_info("request_irq = %d for grp %d",
758 			 irq, hif_ext_group->grp_id);
759 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
760 				       hif_ext_group_interrupt_handler,
761 				       IRQF_SHARED | IRQF_NO_SUSPEND,
762 				       "wlan_EXT_GRP",
763 				       hif_ext_group);
764 		if (ret) {
765 			hif_err("request_irq failed ret = %d", ret);
766 			return -EFAULT;
767 		}
768 		hif_ext_group->os_irq[j] = irq;
769 	}
770 	hif_ext_group->irq_requested = true;
771 	return 0;
772 }
773 
774 int hif_configure_irq(struct hif_softc *scn)
775 {
776 	int ret = 0;
777 
778 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
779 		scn->request_irq_done = false;
780 		return 0;
781 	}
782 
783 	ret = hif_ce_msi_configure_irq(scn);
784 	if (ret == 0)
785 		goto end;
786 
787 	if (ret < 0) {
788 		hif_err("hif_ipci_configure_irq error = %d", ret);
789 		return ret;
790 	}
791 end:
792 	scn->request_irq_done = true;
793 	return 0;
794 }
795 
796 /**
797  * hif_ipci_get_soc_info_pld() - get soc info for ipcie bus from pld target
798  * @sc: ipci context
799  * @dev: device structure
800  *
801  * Return: none
802  */
803 static void hif_ipci_get_soc_info_pld(struct hif_ipci_softc *sc,
804 				      struct device *dev)
805 {
806 	struct pld_soc_info info;
807 
808 	pld_get_soc_info(dev, &info);
809 	sc->mem = info.v_addr;
810 	sc->ce_sc.ol_sc.mem    = info.v_addr;
811 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
812 }
813 
814 /**
815  * hif_ipci_get_soc_info_nopld() - get soc info for ipcie bus for non pld target
816  * @sc: ipci context
817  * @dev: device structure
818  *
819  * Return: none
820  */
821 static void hif_ipci_get_soc_info_nopld(struct hif_ipci_softc *sc,
822 					struct device *dev)
823 {}
824 
825 /**
826  * hif_is_pld_based_target() - verify if the target is pld based
827  * @sc: ipci context
828  * @device_id: device id
829  *
830  * Return: none
831  */
832 static bool hif_is_pld_based_target(struct hif_ipci_softc *sc,
833 				    int device_id)
834 {
835 	if (!pld_have_platform_driver_support(sc->dev))
836 		return false;
837 
838 	switch (device_id) {
839 #ifdef QCA_WIFI_QCA6750
840 	case QCA6750_DEVICE_ID:
841 #endif
842 		return true;
843 	}
844 	return false;
845 }
846 
847 /**
848  * hif_ipci_init_deinit_ops_attach() - attach ops for ipci
849  * @sc: ipci context
850  * @device_id: device id
851  *
852  * Return: none
853  */
854 static void hif_ipci_init_deinit_ops_attach(struct hif_ipci_softc *sc,
855 					    int device_id)
856 {
857 	if (hif_is_pld_based_target(sc, device_id))
858 		sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_pld;
859 	else
860 		sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_nopld;
861 }
862 
863 QDF_STATUS hif_ipci_enable_bus(struct hif_softc *ol_sc,
864 			       struct device *dev, void *bdev,
865 			       const struct hif_bus_id *bid,
866 			       enum hif_enable_type type)
867 {
868 	int ret = 0;
869 	uint32_t hif_type, target_type;
870 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(ol_sc);
871 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
872 	uint16_t revision_id = 0;
873 	struct hif_target_info *tgt_info;
874 	int device_id = QCA6750_DEVICE_ID;
875 
876 	if (!ol_sc) {
877 		hif_err("hif_ctx is NULL");
878 		return QDF_STATUS_E_NOMEM;
879 	}
880 
881 	ret = qdf_set_dma_coherent_mask(dev,
882 					DMA_COHERENT_MASK_DEFAULT);
883 	if (ret) {
884 		hif_err("Failed to set dma mask error = %d", ret);
885 		return qdf_status_from_os_return(ret);
886 	}
887 
888 	sc->dev = dev;
889 	tgt_info = hif_get_target_info_handle(hif_hdl);
890 	hif_ipci_init_deinit_ops_attach(sc, device_id);
891 	sc->hif_ipci_get_soc_info(sc, dev);
892 	hif_debug("hif_enable_pci done");
893 
894 	ret = hif_get_device_type(device_id, revision_id,
895 				  &hif_type, &target_type);
896 	if (ret < 0) {
897 		hif_err("Invalid device id/revision_id");
898 		return QDF_STATUS_E_ABORTED;
899 	}
900 	hif_debug("hif_type = 0x%x, target_type = 0x%x",
901 		 hif_type, target_type);
902 
903 	hif_register_tbl_attach(ol_sc, hif_type);
904 	hif_target_register_tbl_attach(ol_sc, target_type);
905 	sc->use_register_windowing = false;
906 	tgt_info->target_type = target_type;
907 
908 	if (!ol_sc->mem_pa) {
909 		hif_err("BAR0 uninitialized");
910 		return QDF_STATUS_E_ABORTED;
911 	}
912 
913 	return QDF_STATUS_SUCCESS;
914 }
915 
916 bool hif_ipci_needs_bmi(struct hif_softc *scn)
917 {
918 	return !ce_srng_based(scn);
919 }
920 
921 #ifdef FORCE_WAKE
922 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
923 {
924 	uint32_t timeout = 0;
925 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
926 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
927 
928 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
929 		hif_err_rl("force wake request send failed");
930 		return -EINVAL;
931 	}
932 
933 	HIF_STATS_INC(ipci_scn, mhi_force_wake_request_vote, 1);
934 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
935 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
936 		if (qdf_in_interrupt())
937 			qdf_mdelay(FORCE_WAKE_DELAY_MS);
938 		else
939 			qdf_sleep(FORCE_WAKE_DELAY_MS);
940 
941 		timeout += FORCE_WAKE_DELAY_MS;
942 	}
943 
944 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
945 		hif_err("Unable to wake up mhi");
946 		HIF_STATS_INC(ipci_scn, mhi_force_wake_failure, 1);
947 		hif_force_wake_release(hif_handle);
948 		return -EINVAL;
949 	}
950 	HIF_STATS_INC(ipci_scn, mhi_force_wake_success, 1);
951 
952 	HIF_STATS_INC(ipci_scn, soc_force_wake_success, 1);
953 
954 	return 0;
955 }
956 
957 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
958 {
959 	int ret;
960 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
961 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
962 
963 	ret = pld_force_wake_release(scn->qdf_dev->dev);
964 	if (ret) {
965 		hif_err("force wake release failure");
966 		HIF_STATS_INC(ipci_scn, mhi_force_wake_release_failure, 1);
967 		return ret;
968 	}
969 
970 	HIF_STATS_INC(ipci_scn, mhi_force_wake_release_success, 1);
971 
972 	HIF_STATS_INC(ipci_scn, soc_force_wake_release_success, 1);
973 	return 0;
974 }
975 
976 void hif_print_ipci_stats(struct hif_ipci_softc *ipci_handle)
977 {
978 	hif_debug("mhi_force_wake_request_vote: %d",
979 		  ipci_handle->stats.mhi_force_wake_request_vote);
980 	hif_debug("mhi_force_wake_failure: %d",
981 		  ipci_handle->stats.mhi_force_wake_failure);
982 	hif_debug("mhi_force_wake_success: %d",
983 		  ipci_handle->stats.mhi_force_wake_success);
984 	hif_debug("soc_force_wake_register_write_success: %d",
985 		  ipci_handle->stats.soc_force_wake_register_write_success);
986 	hif_debug("soc_force_wake_failure: %d",
987 		  ipci_handle->stats.soc_force_wake_failure);
988 	hif_debug("soc_force_wake_success: %d",
989 		  ipci_handle->stats.soc_force_wake_success);
990 	hif_debug("mhi_force_wake_release_failure: %d",
991 		  ipci_handle->stats.mhi_force_wake_release_failure);
992 	hif_debug("mhi_force_wake_release_success: %d",
993 		  ipci_handle->stats.mhi_force_wake_release_success);
994 	hif_debug("oc_force_wake_release_success: %d",
995 		  ipci_handle->stats.soc_force_wake_release_success);
996 }
997 #endif /* FORCE_WAKE */
998 
999 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
1000 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
1001 {
1002 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
1003 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1004 	uint32_t start_time = 0, curr_time = 0;
1005 	uint32_t count = 0;
1006 
1007 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1008 		return 0;
1009 
1010 	start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
1011 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
1012 	       curr_time <= start_time + EP_WAKE_RESET_DELAY_TIMEOUT_MS) {
1013 		if (count < EP_VOTE_POLL_TIME_CNT) {
1014 			qdf_udelay(EP_VOTE_POLL_TIME_US);
1015 			count++;
1016 		} else {
1017 			qdf_sleep_us(EP_WAKE_RESET_DELAY_US);
1018 		}
1019 		curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
1020 	}
1021 
1022 
1023 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1024 		hif_err_rl(" EP state reset is not done to prevent l1");
1025 		ipci_scn->ep_awake_reset_fail++;
1026 		return 0;
1027 	}
1028 
1029 	if (pld_prevent_l1(scn->qdf_dev->dev)) {
1030 		hif_err_rl("pld prevent l1 failed");
1031 		ipci_scn->prevent_l1_fail++;
1032 		return 0;
1033 	}
1034 
1035 	count = 0;
1036 	ipci_scn->prevent_l1 = true;
1037 	start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
1038 	while (!pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
1039 	       curr_time <= start_time + EP_WAKE_DELAY_TIMEOUT_MS) {
1040 		if (count < EP_VOTE_POLL_TIME_CNT) {
1041 			qdf_udelay(EP_WAKE_RESET_DELAY_US);
1042 			count++;
1043 		} else {
1044 			qdf_sleep_us(EP_WAKE_DELAY_US);
1045 		}
1046 
1047 		curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
1048 	}
1049 
1050 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) <= 0) {
1051 		hif_err_rl("Unable to wakeup pci ep");
1052 		ipci_scn->ep_awake_set_fail++;
1053 		return  0;
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1060 {
1061 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
1062 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1063 
1064 	if (qdf_likely(ipci_scn->prevent_l1)) {
1065 		pld_allow_l1(scn->qdf_dev->dev);
1066 		ipci_scn->prevent_l1 = false;
1067 	}
1068 }
1069 #endif
1070 
1071 int hif_ipci_enable_grp_irqs(struct hif_softc *scn)
1072 {
1073 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1074 	int status;
1075 
1076 	if (!ipci_scn->grp_irqs_disabled) {
1077 		hif_err("Unbalanced group IRQs Enable called");
1078 		qdf_assert_always(0);
1079 	}
1080 
1081 	status = hif_apps_grp_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
1082 	if (!status)
1083 		ipci_scn->grp_irqs_disabled = false;
1084 
1085 	return status;
1086 }
1087 
1088 int hif_ipci_disable_grp_irqs(struct hif_softc *scn)
1089 {
1090 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1091 	int status;
1092 
1093 	if (ipci_scn->grp_irqs_disabled) {
1094 		hif_err("Unbalanced group IRQs disable called");
1095 		qdf_assert_always(0);
1096 	}
1097 
1098 	status = hif_apps_grp_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
1099 	if (!status)
1100 		ipci_scn->grp_irqs_disabled = true;
1101 
1102 	return status;
1103 }
1104