1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8 
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 #include <linux/if_arp.h>
21 #include "hif_io32.h"
22 #include "if_ipci.h"
23 #include "hif.h"
24 #include "target_type.h"
25 #include "hif_main.h"
26 #include "ce_main.h"
27 #include "ce_api.h"
28 #include "ce_internal.h"
29 #include "ce_reg.h"
30 #include "ce_bmi.h"
31 #include "regtable.h"
32 #include "hif_hw_version.h"
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35 #include "qdf_status.h"
36 #include "qdf_atomic.h"
37 #include "pld_common.h"
38 #include "mp_dev.h"
39 #include "hif_debug.h"
40 
41 #include "ce_tasklet.h"
42 #include "targaddrs.h"
43 #include "hif_exec.h"
44 
45 #include "ipci_api.h"
46 
hif_ipci_enable_power_management(struct hif_softc * hif_sc,bool is_packet_log_enabled)47 void hif_ipci_enable_power_management(struct hif_softc *hif_sc,
48 				      bool is_packet_log_enabled)
49 {
50 	hif_rtpm_start(hif_sc);
51 }
52 
hif_ipci_disable_power_management(struct hif_softc * hif_ctx)53 void hif_ipci_disable_power_management(struct hif_softc *hif_ctx)
54 {
55 	hif_rtpm_stop(hif_ctx);
56 }
57 
hif_ipci_display_stats(struct hif_softc * hif_ctx)58 void hif_ipci_display_stats(struct hif_softc *hif_ctx)
59 {
60 	hif_display_ce_stats(hif_ctx);
61 }
62 
hif_ipci_clear_stats(struct hif_softc * hif_ctx)63 void hif_ipci_clear_stats(struct hif_softc *hif_ctx)
64 {
65 	struct hif_ipci_softc *ipci_ctx = HIF_GET_IPCI_SOFTC(hif_ctx);
66 
67 	if (!ipci_ctx) {
68 		hif_err("hif_ctx null");
69 		return;
70 	}
71 	hif_clear_ce_stats(&ipci_ctx->ce_sc);
72 }
73 
hif_ipci_open(struct hif_softc * hif_ctx,enum qdf_bus_type bus_type)74 QDF_STATUS hif_ipci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
75 {
76 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(hif_ctx);
77 
78 	hif_ctx->bus_type = bus_type;
79 	hif_rtpm_open(hif_ctx);
80 
81 	qdf_spinlock_create(&sc->irq_lock);
82 
83 	return hif_ce_open(hif_ctx);
84 }
85 
86 /**
87  * hif_ce_msi_map_ce_to_irq() - map CE to IRQ
88  * @scn: hif context
89  * @ce_id: CE Id
90  *
91  * Return: IRQ number
92  */
hif_ce_msi_map_ce_to_irq(struct hif_softc * scn,int ce_id)93 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
94 {
95 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
96 
97 	return ipci_scn->ce_msi_irq_num[ce_id];
98 }
99 
hif_ipci_bus_configure(struct hif_softc * hif_sc)100 int hif_ipci_bus_configure(struct hif_softc *hif_sc)
101 {
102 	int status = 0;
103 	uint8_t wake_ce_id;
104 
105 	hif_ce_prepare_config(hif_sc);
106 
107 	status = hif_wlan_enable(hif_sc);
108 	if (status) {
109 		hif_err("hif_wlan_enable error = %d", status);
110 		return status;
111 	}
112 
113 	A_TARGET_ACCESS_LIKELY(hif_sc);
114 
115 	status = hif_config_ce(hif_sc);
116 	if (status)
117 		goto disable_wlan;
118 
119 	status = hif_get_wake_ce_id(hif_sc, &wake_ce_id);
120 	if (status)
121 		goto unconfig_ce;
122 
123 	status = hif_configure_irq(hif_sc);
124 	if (status < 0)
125 		goto unconfig_ce;
126 
127 	hif_sc->wake_irq = hif_ce_msi_map_ce_to_irq(hif_sc, wake_ce_id);
128 	hif_sc->wake_irq_type = HIF_PM_CE_WAKE;
129 
130 	hif_info("expecting wake from ce %d, irq %d",
131 		 wake_ce_id, hif_sc->wake_irq);
132 
133 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
134 
135 	return status;
136 
137 unconfig_ce:
138 	hif_unconfig_ce(hif_sc);
139 disable_wlan:
140 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
141 	hif_wlan_disable(hif_sc);
142 
143 	hif_err("Failed, status = %d", status);
144 	return status;
145 }
146 
hif_ipci_close(struct hif_softc * hif_sc)147 void hif_ipci_close(struct hif_softc *hif_sc)
148 {
149 	hif_rtpm_close(hif_sc);
150 	hif_ce_close(hif_sc);
151 }
152 
153 /**
154  * hif_ce_srng_msi_free_irq(): free CE msi IRQ
155  * @scn: struct hif_softc
156  *
157  * Return: ErrorNo
158  */
hif_ce_srng_msi_free_irq(struct hif_softc * scn)159 static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
160 {
161 	int ret;
162 	int ce_id, irq;
163 	uint32_t msi_data_start;
164 	uint32_t msi_data_count;
165 	uint32_t msi_irq_start;
166 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
167 
168 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
169 					  &msi_data_count, &msi_data_start,
170 					  &msi_irq_start);
171 	if (ret)
172 		return ret;
173 
174 	/* needs to match the ce_id -> irq data mapping
175 	 * used in the srng parameter configuration
176 	 */
177 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
178 		unsigned int msi_data;
179 
180 		if (!ce_sc->tasklets[ce_id].inited)
181 			continue;
182 
183 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
184 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
185 
186 		hif_ce_irq_remove_affinity_hint(irq);
187 
188 		hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
189 			  ce_id, msi_data, irq);
190 
191 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
192 	}
193 
194 	return ret;
195 }
196 
197 /**
198  * hif_ipci_deconfigure_grp_irq(): deconfigure HW block IRQ
199  * @scn: struct hif_softc
200  *
201  * Return: none
202  */
hif_ipci_deconfigure_grp_irq(struct hif_softc * scn)203 void hif_ipci_deconfigure_grp_irq(struct hif_softc *scn)
204 {
205 	int i, j, irq;
206 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
207 	struct hif_exec_context *hif_ext_group;
208 
209 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
210 		hif_ext_group = hif_state->hif_ext_group[i];
211 		if (hif_ext_group->irq_requested) {
212 			hif_ext_group->irq_requested = false;
213 			for (j = 0; j < hif_ext_group->numirq; j++) {
214 				irq = hif_ext_group->os_irq[j];
215 				pfrm_free_irq(scn->qdf_dev->dev,
216 					      irq, hif_ext_group);
217 			}
218 			hif_ext_group->numirq = 0;
219 		}
220 	}
221 }
222 
hif_ipci_nointrs(struct hif_softc * scn)223 void hif_ipci_nointrs(struct hif_softc *scn)
224 {
225 	int ret;
226 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
227 
228 	scn->free_irq_done = true;
229 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
230 
231 	if (scn->request_irq_done == false)
232 		return;
233 
234 	hif_ipci_deconfigure_grp_irq(scn);
235 
236 	ret = hif_ce_srng_msi_free_irq(scn);
237 
238 	scn->request_irq_done = false;
239 }
240 
hif_ipci_disable_bus(struct hif_softc * scn)241 void hif_ipci_disable_bus(struct hif_softc *scn)
242 {
243 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
244 	void __iomem *mem;
245 
246 	/* Attach did not succeed, all resources have been
247 	 * freed in error handler
248 	 */
249 	if (!sc)
250 		return;
251 
252 	mem = (void __iomem *)sc->mem;
253 	if (mem) {
254 		hif_dump_pipe_debug_count(scn);
255 		if (scn->athdiag_procfs_inited) {
256 			athdiag_procfs_remove();
257 			scn->athdiag_procfs_inited = false;
258 		}
259 		scn->mem = NULL;
260 	}
261 	hif_info("X");
262 }
263 
264 #ifdef CONFIG_PLD_PCIE_CNSS
hif_ipci_prevent_linkdown(struct hif_softc * scn,bool flag)265 void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
266 {
267 	int errno;
268 
269 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
270 	hif_runtime_prevent_linkdown(scn, flag);
271 
272 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
273 	if (errno)
274 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
275 }
276 #else
hif_ipci_prevent_linkdown(struct hif_softc * scn,bool flag)277 void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
278 {
279 }
280 #endif
281 
hif_ipci_bus_suspend(struct hif_softc * scn)282 int hif_ipci_bus_suspend(struct hif_softc *scn)
283 {
284 	int ret;
285 
286 	ret = hif_apps_disable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
287 	if (ret) {
288 		hif_err("Failed to disable IRQs");
289 		goto disable_irq_fail;
290 	}
291 
292 	ret = hif_apps_enable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
293 	if (ret) {
294 		hif_err("Failed to enable Wake-IRQ");
295 		goto enable_wake_irq_fail;
296 	}
297 
298 	if (QDF_IS_STATUS_ERROR(hif_try_complete_tasks(scn))) {
299 		hif_err("hif_try_complete_tasks timed-out, so abort suspend");
300 		ret = -EBUSY;
301 		goto drain_tasks_fail;
302 	}
303 
304 	/*
305 	 * In an unlikely case, if draining becomes infinite loop,
306 	 * it returns an error, shall abort the bus suspend.
307 	 */
308 	ret = hif_drain_fw_diag_ce(scn);
309 	if (ret) {
310 		hif_err("draining fw_diag_ce goes infinite, so abort suspend");
311 		goto drain_tasks_fail;
312 	}
313 
314 	scn->bus_suspended = true;
315 
316 	return 0;
317 
318 drain_tasks_fail:
319 	hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
320 
321 enable_wake_irq_fail:
322 	hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
323 
324 disable_irq_fail:
325 	return ret;
326 }
327 
hif_ipci_bus_resume(struct hif_softc * scn)328 int hif_ipci_bus_resume(struct hif_softc *scn)
329 {
330 	int ret = 0;
331 
332 	ret = hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
333 	if (ret) {
334 		hif_err("Failed to disable Wake-IRQ");
335 		goto fail;
336 	}
337 
338 	ret = hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
339 	if (ret)
340 		hif_err("Failed to enable IRQs");
341 
342 	scn->bus_suspended = false;
343 
344 fail:
345 	return ret;
346 }
347 
hif_ipci_bus_suspend_noirq(struct hif_softc * scn)348 int hif_ipci_bus_suspend_noirq(struct hif_softc *scn)
349 {
350 	/*
351 	 * If it is system suspend case and wake-IRQ received
352 	 * just before Kernel issuing suspend_noirq, that must
353 	 * have scheduled CE2 tasklet, so suspend activity can
354 	 * be aborted.
355 	 * Similar scenario for runtime suspend case, would be
356 	 * handled by hif_rtpm_check_and_request_resume
357 	 * in hif_ce_interrupt_handler.
358 	 *
359 	 */
360 	if (!hif_rtpm_get_monitor_wake_intr() &&
361 	    hif_get_num_active_tasklets(scn)) {
362 		hif_err("Tasklets are pending, abort sys suspend_noirq");
363 		return -EBUSY;
364 	}
365 
366 	return 0;
367 }
368 
hif_ipci_bus_resume_noirq(struct hif_softc * scn)369 int hif_ipci_bus_resume_noirq(struct hif_softc *scn)
370 {
371 	return 0;
372 }
373 
hif_ipci_disable_isr(struct hif_softc * scn)374 void hif_ipci_disable_isr(struct hif_softc *scn)
375 {
376 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
377 
378 	hif_exec_kill(&scn->osc);
379 	hif_nointrs(scn);
380 	/* Cancel the pending tasklet */
381 	ce_tasklet_kill(scn);
382 	tasklet_kill(&sc->intr_tq);
383 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
384 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
385 }
386 
hif_ipci_dump_registers(struct hif_softc * hif_ctx)387 int hif_ipci_dump_registers(struct hif_softc *hif_ctx)
388 {
389 	int status;
390 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
391 
392 	status = hif_dump_ce_registers(scn);
393 
394 	if (status)
395 		hif_err("Dump CE Registers Failed");
396 
397 	return 0;
398 }
399 
400 /**
401  * hif_ce_interrupt_handler() - interrupt handler for copy engine
402  * @irq: irq number
403  * @context: tasklet context
404  *
405  * Return: irqreturn_t
406  */
hif_ce_interrupt_handler(int irq,void * context)407 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
408 {
409 	struct ce_tasklet_entry *tasklet_entry = context;
410 
411 	hif_rtpm_check_and_request_resume(false);
412 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
413 }
414 
415 extern const char *ce_name[];
416 
417 /* hif_ce_srng_msi_irq_disable() - disable the irq for msi
418  * @hif_sc: hif context
419  * @ce_id: which ce to disable copy complete interrupts for
420  *
421  * @Return: none
422  */
hif_ce_srng_msi_irq_disable(struct hif_softc * hif_sc,int ce_id)423 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
424 {
425 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
426 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
427 
428 }
429 
430 /* hif_ce_srng_msi_irq_enable() - enable the irq for msi
431  * @hif_sc: hif context
432  * @ce_id: which ce to enable copy complete interrupts for
433  *
434  * @Return: none
435  */
hif_ce_srng_msi_irq_enable(struct hif_softc * hif_sc,int ce_id)436 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
437 {
438 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
439 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
440 
441 }
442 
443 /* hif_ce_msi_configure_irq() - configure the irq
444  * @scn: hif context
445  *
446  * @Return: none
447  */
hif_ce_msi_configure_irq(struct hif_softc * scn)448 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
449 {
450 	int ret;
451 	int ce_id, irq;
452 	uint32_t msi_data_start;
453 	uint32_t msi_data_count;
454 	uint32_t msi_irq_start;
455 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
456 	struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
457 	uint8_t wake_ce_id;
458 
459 	ret = hif_get_wake_ce_id(scn, &wake_ce_id);
460 	if (ret)
461 		return ret;
462 
463 	/* do ce irq assignments */
464 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
465 					  &msi_data_count, &msi_data_start,
466 					  &msi_irq_start);
467 	if (ret)
468 		return ret;
469 
470 	scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
471 	scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
472 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
473 
474 	/* needs to match the ce_id -> irq data mapping
475 	 * used in the srng parameter configuration
476 	 */
477 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
478 		unsigned long irqflags = IRQF_SHARED;
479 		unsigned int msi_data = (ce_id % msi_data_count) +
480 			msi_irq_start;
481 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
482 		hif_debug("(ce_id %d, msi_data %d, irq %d tasklet %pK)",
483 			 ce_id, msi_data, irq,
484 			 &ce_sc->tasklets[ce_id]);
485 
486 		/* implies the ce is also initialized */
487 		if (!ce_sc->tasklets[ce_id].inited)
488 			continue;
489 
490 		ipci_sc->ce_msi_irq_num[ce_id] = irq;
491 		ret = pfrm_request_irq(scn->qdf_dev->dev,
492 				       irq, hif_ce_interrupt_handler,
493 				       irqflags,
494 				       ce_name[ce_id],
495 				       &ce_sc->tasklets[ce_id]);
496 		if (ret)
497 			goto free_irq;
498 	}
499 
500 	return ret;
501 
502 free_irq:
503 	/* the request_irq for the last ce_id failed so skip it. */
504 	while (ce_id > 0 && ce_id < scn->ce_count) {
505 		unsigned int msi_data;
506 
507 		ce_id--;
508 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
509 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
510 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
511 	}
512 
513 	return ret;
514 }
515 
516 /**
517  * hif_exec_grp_irq_disable() - disable the irq for group
518  * @hif_ext_group: hif exec context
519  *
520  * Return: none
521  */
hif_exec_grp_irq_disable(struct hif_exec_context * hif_ext_group)522 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
523 {
524 	int i;
525 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
526 
527 	for (i = 0; i < hif_ext_group->numirq; i++)
528 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
529 					hif_ext_group->os_irq[i]);
530 }
531 
532 /**
533  * hif_exec_grp_irq_enable() - enable the irq for group
534  * @hif_ext_group: hif exec context
535  *
536  * Return: none
537  */
hif_exec_grp_irq_enable(struct hif_exec_context * hif_ext_group)538 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
539 {
540 	int i;
541 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
542 
543 	for (i = 0; i < hif_ext_group->numirq; i++)
544 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
545 }
546 
hif_ipci_get_irq_name(int irq_no)547 const char *hif_ipci_get_irq_name(int irq_no)
548 {
549 	return "pci-dummy";
550 }
551 
552 #ifdef FEATURE_IRQ_AFFINITY
553 static
hif_ipci_irq_set_affinity_hint(struct hif_exec_context * hif_ext_group,bool perf)554 void hif_ipci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
555 				    bool perf)
556 {
557 	int i, ret;
558 	unsigned int cpus;
559 	bool mask_set = false;
560 	int package_id;
561 	int cpu_cluster = perf ? hif_get_perf_cluster_bitmap() :
562 				 BIT(CPU_CLUSTER_TYPE_LITTLE);
563 
564 	for (i = 0; i < hif_ext_group->numirq; i++)
565 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
566 
567 	for (i = 0; i < hif_ext_group->numirq; i++) {
568 		qdf_for_each_online_cpu(cpus) {
569 			package_id = qdf_topology_physical_package_id(cpus);
570 			if (package_id >= 0 && BIT(package_id) & cpu_cluster) {
571 				qdf_cpumask_set_cpu(cpus,
572 						    &hif_ext_group->
573 						    new_cpu_mask[i]);
574 				mask_set = true;
575 			}
576 		}
577 	}
578 	for (i = 0; i < hif_ext_group->numirq && i < HIF_MAX_GRP_IRQ; i++) {
579 		if (mask_set) {
580 			ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
581 								    hif_ext_group->os_irq[i],
582 								    hif_ext_group->grp_id, i,
583 								    &hif_ext_group->new_cpu_mask[i]);
584 			if (ret)
585 				qdf_debug("Set affinity %*pbl fails for IRQ %d ",
586 					  qdf_cpumask_pr_args(&hif_ext_group->
587 							      new_cpu_mask[i]),
588 					  hif_ext_group->os_irq[i]);
589 		} else {
590 			qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
591 				hif_ext_group->os_irq[i]);
592 		}
593 	}
594 }
595 
hif_ipci_set_grp_intr_affinity(struct hif_softc * scn,uint32_t grp_intr_bitmask,bool perf)596 void hif_ipci_set_grp_intr_affinity(struct hif_softc *scn,
597 				    uint32_t grp_intr_bitmask, bool perf)
598 {
599 	int i;
600 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
601 	struct hif_exec_context *hif_ext_group;
602 
603 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
604 		if (!(grp_intr_bitmask & BIT(i)))
605 			continue;
606 
607 		hif_ext_group = hif_state->hif_ext_group[i];
608 		hif_ipci_irq_set_affinity_hint(hif_ext_group, perf);
609 		qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
610 	}
611 }
612 #endif
613 
614 #ifdef HIF_CPU_PERF_AFFINE_MASK
hif_ipci_ce_irq_set_affinity_hint(struct hif_softc * scn)615 static void hif_ipci_ce_irq_set_affinity_hint(struct hif_softc *scn)
616 {
617 	int ret;
618 	unsigned int cpus;
619 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
620 	struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
621 	struct CE_attr *host_ce_conf;
622 	int ce_id;
623 	qdf_cpu_mask ce_cpu_mask, updated_mask;
624 	int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
625 	int package_id;
626 
627 	host_ce_conf = ce_sc->host_ce_config;
628 	qdf_cpumask_clear(&ce_cpu_mask);
629 
630 	qdf_for_each_online_cpu(cpus) {
631 		package_id = qdf_topology_physical_package_id(cpus);
632 		if (package_id >= 0 && BIT(package_id) & perf_cpu_cluster) {
633 			qdf_cpumask_set_cpu(cpus,
634 					    &ce_cpu_mask);
635 		}
636 	}
637 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
638 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
639 		return;
640 	}
641 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
642 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
643 			continue;
644 		qdf_cpumask_copy(&updated_mask, &ce_cpu_mask);
645 		ret = hif_affinity_mgr_set_ce_irq_affinity(scn, ipci_sc->ce_msi_irq_num[ce_id],
646 							   ce_id,
647 							   &updated_mask);
648 		qdf_cpumask_clear(&ipci_sc->ce_irq_cpu_mask[ce_id]);
649 		qdf_cpumask_copy(&ipci_sc->ce_irq_cpu_mask[ce_id],
650 				 &updated_mask);
651 		if (ret)
652 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
653 				   qdf_cpumask_pr_args(
654 					&ipci_sc->ce_irq_cpu_mask[ce_id]),
655 					ipci_sc->ce_msi_irq_num[ce_id]);
656 		else
657 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
658 				     qdf_cpumask_pr_args(
659 				     &ipci_sc->ce_irq_cpu_mask[ce_id]),
660 				     ipci_sc->ce_msi_irq_num[ce_id]);
661 	}
662 }
663 
hif_ipci_config_irq_affinity(struct hif_softc * scn)664 void hif_ipci_config_irq_affinity(struct hif_softc *scn)
665 {
666 	hif_core_ctl_set_boost(true);
667 	/* Set IRQ affinity for CE interrupts*/
668 	hif_ipci_ce_irq_set_affinity_hint(scn);
669 }
670 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
671 
672 #ifdef HIF_CPU_CLEAR_AFFINITY
hif_ipci_config_irq_clear_cpu_affinity(struct hif_softc * scn,int intr_ctxt_id,int cpu)673 void hif_ipci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
674 					    int intr_ctxt_id, int cpu)
675 {
676 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
677 	struct hif_exec_context *hif_ext_group;
678 	int i, ret;
679 
680 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
681 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
682 		for (i = 0; i < hif_ext_group->numirq; i++) {
683 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
684 			qdf_cpumask_clear_cpu(cpu,
685 					      &hif_ext_group->new_cpu_mask[i]);
686 			ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
687 								    hif_ext_group->os_irq[i],
688 								    hif_ext_group->grp_id, i,
689 								    &hif_ext_group->new_cpu_mask[i]);
690 			if (ret)
691 				hif_err("Set affinity %*pbl fails for IRQ %d ",
692 					qdf_cpumask_pr_args(&hif_ext_group->
693 							    new_cpu_mask[i]),
694 					hif_ext_group->os_irq[i]);
695 			else
696 				hif_debug("Set affinity %*pbl for IRQ: %d",
697 					  qdf_cpumask_pr_args(&hif_ext_group->
698 							      new_cpu_mask[i]),
699 					  hif_ext_group->os_irq[0]);
700 		}
701 	}
702 }
703 #endif
704 
hif_ipci_configure_grp_irq(struct hif_softc * scn,struct hif_exec_context * hif_ext_group)705 int hif_ipci_configure_grp_irq(struct hif_softc *scn,
706 			       struct hif_exec_context *hif_ext_group)
707 {
708 	int ret = 0;
709 	int irq = 0;
710 	int j;
711 
712 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
713 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
714 	hif_ext_group->irq_name = &hif_ipci_get_irq_name;
715 	hif_ext_group->work_complete = &hif_dummy_grp_done;
716 
717 	for (j = 0; j < hif_ext_group->numirq; j++) {
718 		irq = hif_ext_group->irq[j];
719 
720 		hif_info("request_irq = %d for grp %d",
721 			 irq, hif_ext_group->grp_id);
722 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
723 				       hif_ext_group_interrupt_handler,
724 				       IRQF_SHARED | IRQF_NO_SUSPEND,
725 				       "wlan_EXT_GRP",
726 				       hif_ext_group);
727 		if (ret) {
728 			hif_err("request_irq failed ret = %d", ret);
729 			return -EFAULT;
730 		}
731 		hif_ext_group->os_irq[j] = irq;
732 	}
733 	hif_ext_group->irq_requested = true;
734 	return 0;
735 }
736 
hif_configure_irq(struct hif_softc * scn)737 int hif_configure_irq(struct hif_softc *scn)
738 {
739 	int ret = 0;
740 
741 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
742 		scn->request_irq_done = false;
743 		return 0;
744 	}
745 
746 	ret = hif_ce_msi_configure_irq(scn);
747 	if (ret == 0)
748 		goto end;
749 
750 	if (ret < 0) {
751 		hif_err("hif_ipci_configure_irq error = %d", ret);
752 		return ret;
753 	}
754 end:
755 	scn->request_irq_done = true;
756 	return 0;
757 }
758 
759 /**
760  * hif_ipci_get_soc_info_pld() - get soc info for ipcie bus from pld target
761  * @sc: ipci context
762  * @dev: device structure
763  *
764  * Return: none
765  */
hif_ipci_get_soc_info_pld(struct hif_ipci_softc * sc,struct device * dev)766 static void hif_ipci_get_soc_info_pld(struct hif_ipci_softc *sc,
767 				      struct device *dev)
768 {
769 	struct pld_soc_info info;
770 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
771 
772 	pld_get_soc_info(dev, &info);
773 	sc->mem = info.v_addr;
774 	sc->ce_sc.ol_sc.mem    = info.v_addr;
775 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
776 
777 	scn->target_info.target_version = info.soc_id;
778 	scn->target_info.target_revision = 0;
779 }
780 
781 /**
782  * hif_ipci_get_soc_info_nopld() - get soc info for ipcie bus for non pld target
783  * @sc: ipci context
784  * @dev: device structure
785  *
786  * Return: none
787  */
hif_ipci_get_soc_info_nopld(struct hif_ipci_softc * sc,struct device * dev)788 static void hif_ipci_get_soc_info_nopld(struct hif_ipci_softc *sc,
789 					struct device *dev)
790 {}
791 
792 /**
793  * hif_is_pld_based_target() - verify if the target is pld based
794  * @sc: ipci context
795  * @device_id: device id
796  *
797  * Return: none
798  */
hif_is_pld_based_target(struct hif_ipci_softc * sc,int device_id)799 static bool hif_is_pld_based_target(struct hif_ipci_softc *sc,
800 				    int device_id)
801 {
802 	if (!pld_have_platform_driver_support(sc->dev))
803 		return false;
804 
805 	switch (device_id) {
806 #ifdef QCA_WIFI_QCA6750
807 	case QCA6750_DEVICE_ID:
808 #endif
809 	case WCN6450_DEVICE_ID:
810 		return true;
811 	}
812 	return false;
813 }
814 
815 /**
816  * hif_ipci_init_deinit_ops_attach() - attach ops for ipci
817  * @sc: ipci context
818  * @device_id: device id
819  *
820  * Return: none
821  */
hif_ipci_init_deinit_ops_attach(struct hif_ipci_softc * sc,int device_id)822 static void hif_ipci_init_deinit_ops_attach(struct hif_ipci_softc *sc,
823 					    int device_id)
824 {
825 	if (hif_is_pld_based_target(sc, device_id))
826 		sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_pld;
827 	else
828 		sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_nopld;
829 }
830 
hif_ipci_enable_bus(struct hif_softc * ol_sc,struct device * dev,void * bdev,const struct hif_bus_id * bid,enum hif_enable_type type)831 QDF_STATUS hif_ipci_enable_bus(struct hif_softc *ol_sc,
832 			       struct device *dev, void *bdev,
833 			       const struct hif_bus_id *bid,
834 			       enum hif_enable_type type)
835 {
836 	int ret = 0;
837 	uint32_t hif_type, target_type;
838 	struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(ol_sc);
839 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
840 	uint16_t revision_id = 0;
841 	struct hif_target_info *tgt_info;
842 	int device_id = HIF_IPCI_DEVICE_ID;
843 
844 	if (!ol_sc) {
845 		hif_err("hif_ctx is NULL");
846 		return QDF_STATUS_E_NOMEM;
847 	}
848 
849 	ret = qdf_set_dma_coherent_mask(dev,
850 					DMA_COHERENT_MASK_DEFAULT);
851 	if (ret) {
852 		hif_err("Failed to set dma mask error = %d", ret);
853 		return qdf_status_from_os_return(ret);
854 	}
855 
856 	sc->dev = dev;
857 	tgt_info = hif_get_target_info_handle(hif_hdl);
858 	hif_ipci_init_deinit_ops_attach(sc, device_id);
859 	sc->hif_ipci_get_soc_info(sc, dev);
860 	hif_debug("hif_enable_pci done");
861 
862 	ret = hif_get_device_type(device_id, revision_id,
863 				  &hif_type, &target_type);
864 	if (ret < 0) {
865 		hif_err("Invalid device id/revision_id");
866 		return QDF_STATUS_E_ABORTED;
867 	}
868 	hif_debug("hif_type = 0x%x, target_type = 0x%x",
869 		 hif_type, target_type);
870 
871 	hif_register_tbl_attach(ol_sc, hif_type);
872 	hif_target_register_tbl_attach(ol_sc, target_type);
873 	sc->use_register_windowing = false;
874 	tgt_info->target_type = target_type;
875 
876 	if (!ol_sc->mem_pa) {
877 		hif_err("BAR0 uninitialized");
878 		return QDF_STATUS_E_ABORTED;
879 	}
880 
881 	return QDF_STATUS_SUCCESS;
882 }
883 
hif_ipci_needs_bmi(struct hif_softc * scn)884 bool hif_ipci_needs_bmi(struct hif_softc *scn)
885 {
886 	return !ce_srng_based(scn);
887 }
888 
889 #ifdef FORCE_WAKE
hif_force_wake_request(struct hif_opaque_softc * hif_handle)890 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
891 {
892 	uint32_t timeout = 0;
893 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
894 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
895 
896 	if (pld_force_wake_request(scn->qdf_dev->dev)) {
897 		hif_err_rl("force wake request send failed");
898 		return -EINVAL;
899 	}
900 
901 	HIF_STATS_INC(ipci_scn, mhi_force_wake_request_vote, 1);
902 	while (!pld_is_device_awake(scn->qdf_dev->dev) &&
903 	       timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
904 		if (qdf_in_interrupt())
905 			qdf_mdelay(FORCE_WAKE_DELAY_MS);
906 		else
907 			qdf_sleep(FORCE_WAKE_DELAY_MS);
908 
909 		timeout += FORCE_WAKE_DELAY_MS;
910 	}
911 
912 	if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
913 		hif_err("Unable to wake up mhi");
914 		HIF_STATS_INC(ipci_scn, mhi_force_wake_failure, 1);
915 		hif_force_wake_release(hif_handle);
916 		return -EINVAL;
917 	}
918 	HIF_STATS_INC(ipci_scn, mhi_force_wake_success, 1);
919 
920 	HIF_STATS_INC(ipci_scn, soc_force_wake_success, 1);
921 
922 	return 0;
923 }
924 
hif_force_wake_release(struct hif_opaque_softc * hif_handle)925 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
926 {
927 	int ret;
928 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
929 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
930 
931 	ret = pld_force_wake_release(scn->qdf_dev->dev);
932 	if (ret) {
933 		hif_err("force wake release failure");
934 		HIF_STATS_INC(ipci_scn, mhi_force_wake_release_failure, 1);
935 		return ret;
936 	}
937 
938 	HIF_STATS_INC(ipci_scn, mhi_force_wake_release_success, 1);
939 
940 	HIF_STATS_INC(ipci_scn, soc_force_wake_release_success, 1);
941 	return 0;
942 }
943 
hif_print_ipci_stats(struct hif_ipci_softc * ipci_handle)944 void hif_print_ipci_stats(struct hif_ipci_softc *ipci_handle)
945 {
946 	hif_debug("mhi_force_wake_request_vote: %d",
947 		  ipci_handle->stats.mhi_force_wake_request_vote);
948 	hif_debug("mhi_force_wake_failure: %d",
949 		  ipci_handle->stats.mhi_force_wake_failure);
950 	hif_debug("mhi_force_wake_success: %d",
951 		  ipci_handle->stats.mhi_force_wake_success);
952 	hif_debug("soc_force_wake_register_write_success: %d",
953 		  ipci_handle->stats.soc_force_wake_register_write_success);
954 	hif_debug("soc_force_wake_failure: %d",
955 		  ipci_handle->stats.soc_force_wake_failure);
956 	hif_debug("soc_force_wake_success: %d",
957 		  ipci_handle->stats.soc_force_wake_success);
958 	hif_debug("mhi_force_wake_release_failure: %d",
959 		  ipci_handle->stats.mhi_force_wake_release_failure);
960 	hif_debug("mhi_force_wake_release_success: %d",
961 		  ipci_handle->stats.mhi_force_wake_release_success);
962 	hif_debug("oc_force_wake_release_success: %d",
963 		  ipci_handle->stats.soc_force_wake_release_success);
964 }
965 #endif /* FORCE_WAKE */
966 
967 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
968 	defined(FEATURE_HIF_DELAYED_REG_WRITE)
hif_prevent_link_low_power_states(struct hif_opaque_softc * hif)969 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
970 {
971 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
972 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
973 	uint32_t start_time = 0, curr_time = 0;
974 	uint32_t count = 0;
975 
976 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
977 		return 0;
978 
979 	if (hif_is_ep_vote_access_disabled(scn)) {
980 		hif_info_high("EP access disabled in flight skip vote");
981 		return 0;
982 	}
983 
984 	start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
985 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
986 	       curr_time <= start_time + EP_WAKE_RESET_DELAY_TIMEOUT_MS) {
987 		if (count < EP_VOTE_POLL_TIME_CNT) {
988 			qdf_udelay(EP_VOTE_POLL_TIME_US);
989 			count++;
990 		} else {
991 			qdf_sleep_us(EP_WAKE_RESET_DELAY_US);
992 		}
993 		curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
994 	}
995 
996 
997 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
998 		hif_err_rl(" EP state reset is not done to prevent l1");
999 		ipci_scn->ep_awake_reset_fail++;
1000 		return 0;
1001 	}
1002 
1003 	if (pld_prevent_l1(scn->qdf_dev->dev)) {
1004 		hif_err_rl("pld prevent l1 failed");
1005 		ipci_scn->prevent_l1_fail++;
1006 		return 0;
1007 	}
1008 
1009 	count = 0;
1010 	ipci_scn->prevent_l1 = true;
1011 	start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
1012 	while (!pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
1013 	       curr_time <= start_time + EP_WAKE_DELAY_TIMEOUT_MS) {
1014 		if (count < EP_VOTE_POLL_TIME_CNT) {
1015 			qdf_udelay(EP_WAKE_RESET_DELAY_US);
1016 			count++;
1017 		} else {
1018 			qdf_sleep_us(EP_WAKE_DELAY_US);
1019 		}
1020 
1021 		curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
1022 	}
1023 
1024 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) <= 0) {
1025 		hif_err_rl("Unable to wakeup pci ep");
1026 		ipci_scn->ep_awake_set_fail++;
1027 		return  0;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
hif_allow_link_low_power_states(struct hif_opaque_softc * hif)1033 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
1034 {
1035 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
1036 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1037 
1038 	if (qdf_likely(ipci_scn->prevent_l1)) {
1039 		pld_allow_l1(scn->qdf_dev->dev);
1040 		ipci_scn->prevent_l1 = false;
1041 	}
1042 }
1043 #endif
1044 
1045 #ifndef QCA_WIFI_WCN6450
hif_ipci_enable_grp_irqs(struct hif_softc * scn)1046 int hif_ipci_enable_grp_irqs(struct hif_softc *scn)
1047 {
1048 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1049 	int status;
1050 
1051 	if (!ipci_scn->grp_irqs_disabled) {
1052 		hif_err("Unbalanced group IRQs Enable called");
1053 		qdf_assert_always(0);
1054 	}
1055 
1056 	status = hif_apps_grp_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
1057 	if (!status)
1058 		ipci_scn->grp_irqs_disabled = false;
1059 
1060 	return status;
1061 }
1062 
hif_ipci_disable_grp_irqs(struct hif_softc * scn)1063 int hif_ipci_disable_grp_irqs(struct hif_softc *scn)
1064 {
1065 	struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
1066 	int status;
1067 
1068 	if (ipci_scn->grp_irqs_disabled) {
1069 		hif_err("Unbalanced group IRQs disable called");
1070 		qdf_assert_always(0);
1071 	}
1072 
1073 	status = hif_apps_grp_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
1074 	if (!status)
1075 		ipci_scn->grp_irqs_disabled = true;
1076 
1077 	return status;
1078 }
1079 #endif
1080