xref: /wlan-dirver/platform/icnss2/main.c (revision 2f4117dcd235d3e2887b05fad04e6d5cf6fabb2a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2020, 2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) "icnss2: " fmt
8 
9 #include <linux/of_address.h>
10 #include <linux/clk.h>
11 #include <linux/iommu.h>
12 #include <linux/export.h>
13 #include <linux/err.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/init.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/thread_info.h>
29 #include <linux/uaccess.h>
30 #include <linux/etherdevice.h>
31 #include <linux/of.h>
32 #include <linux/of_irq.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/soc/qcom/qmi.h>
35 #include <linux/sysfs.h>
36 #include <linux/thermal.h>
37 #include <soc/qcom/memory_dump.h>
38 #include <soc/qcom/secure_buffer.h>
39 #include <soc/qcom/socinfo.h>
40 #include <soc/qcom/qcom_ramdump.h>
41 #include <linux/soc/qcom/smem.h>
42 #include <linux/soc/qcom/smem_state.h>
43 #include <linux/remoteproc.h>
44 #include <linux/remoteproc/qcom_rproc.h>
45 #include <linux/soc/qcom/pdr.h>
46 #include <linux/remoteproc.h>
47 #include <trace/hooks/remoteproc.h>
48 #ifdef SLATE_MODULE_ENABLED
49 #include <linux/soc/qcom/slatecom_interface.h>
50 #include <linux/soc/qcom/slate_events_bridge_intf.h>
51 #include <uapi/linux/slatecom_interface.h>
52 #endif
53 #include "main.h"
54 #include "qmi.h"
55 #include "debug.h"
56 #include "power.h"
57 #include "genl.h"
58 
59 #define MAX_PROP_SIZE			32
60 #define NUM_LOG_PAGES			10
61 #define NUM_LOG_LONG_PAGES		4
62 #define ICNSS_MAGIC			0x5abc5abc
63 
64 #define ICNSS_WLAN_SERVICE_NAME					"wlan/fw"
65 #define ICNSS_WLANPD_NAME					"msm/modem/wlan_pd"
66 #define ICNSS_DEFAULT_FEATURE_MASK 0x01
67 
68 #define ICNSS_M3_SEGMENT(segment)		"wcnss_"segment
69 #define ICNSS_M3_SEGMENT_PHYAREG		"phyareg"
70 #define ICNSS_M3_SEGMENT_PHYA			"phydbg"
71 #define ICNSS_M3_SEGMENT_WMACREG		"wmac0reg"
72 #define ICNSS_M3_SEGMENT_WCSSDBG		"WCSSDBG"
73 #define ICNSS_M3_SEGMENT_PHYAM3			"PHYAPDMEM"
74 
75 #define ICNSS_QUIRKS_DEFAULT		BIT(FW_REJUVENATE_ENABLE)
76 #define ICNSS_MAX_PROBE_CNT		2
77 
78 #define ICNSS_BDF_TYPE_DEFAULT         ICNSS_BDF_ELF
79 
80 #define PROBE_TIMEOUT                 15000
81 #define SMP2P_SOC_WAKE_TIMEOUT        500
82 #ifdef CONFIG_ICNSS2_DEBUG
83 static unsigned long qmi_timeout = 3000;
84 module_param(qmi_timeout, ulong, 0600);
85 #define WLFW_TIMEOUT                    msecs_to_jiffies(qmi_timeout)
86 #else
87 #define WLFW_TIMEOUT                    msecs_to_jiffies(3000)
88 #endif
89 
90 #define ICNSS_RECOVERY_TIMEOUT		60000
91 #define ICNSS_WPSS_SSR_TIMEOUT          5000
92 #define ICNSS_CAL_TIMEOUT		40000
93 
94 static struct icnss_priv *penv;
95 static struct work_struct wpss_loader;
96 static struct work_struct wpss_ssr_work;
97 uint64_t dynamic_feature_mask = ICNSS_DEFAULT_FEATURE_MASK;
98 
99 #define ICNSS_EVENT_PENDING			2989
100 
101 #define ICNSS_EVENT_SYNC			BIT(0)
102 #define ICNSS_EVENT_UNINTERRUPTIBLE		BIT(1)
103 #define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE	(ICNSS_EVENT_UNINTERRUPTIBLE | \
104 						 ICNSS_EVENT_SYNC)
105 #define ICNSS_DMS_QMI_CONNECTION_WAIT_MS 50
106 #define ICNSS_DMS_QMI_CONNECTION_WAIT_RETRY 200
107 
108 #define SMP2P_GET_MAX_RETRY		4
109 #define SMP2P_GET_RETRY_DELAY_MS	500
110 
111 #define RAMDUMP_NUM_DEVICES		256
112 #define ICNSS_RAMDUMP_NAME		"icnss_ramdump"
113 
114 #define WLAN_EN_TEMP_THRESHOLD		5000
115 #define WLAN_EN_DELAY			500
116 
117 #define ICNSS_RPROC_LEN			10
118 static DEFINE_IDA(rd_minor_id);
119 
120 enum icnss_pdr_cause_index {
121 	ICNSS_FW_CRASH,
122 	ICNSS_ROOT_PD_CRASH,
123 	ICNSS_ROOT_PD_SHUTDOWN,
124 	ICNSS_HOST_ERROR,
125 };
126 
127 static const char * const icnss_pdr_cause[] = {
128 	[ICNSS_FW_CRASH] = "FW crash",
129 	[ICNSS_ROOT_PD_CRASH] = "Root PD crashed",
130 	[ICNSS_ROOT_PD_SHUTDOWN] = "Root PD shutdown",
131 	[ICNSS_HOST_ERROR] = "Host error",
132 };
133 
134 static void icnss_set_plat_priv(struct icnss_priv *priv)
135 {
136 	penv = priv;
137 }
138 
139 static struct icnss_priv *icnss_get_plat_priv(void)
140 {
141 	return penv;
142 }
143 
144 static inline void icnss_wpss_unload(struct icnss_priv *priv)
145 {
146 	if (priv && priv->rproc) {
147 		rproc_shutdown(priv->rproc);
148 		rproc_put(priv->rproc);
149 		priv->rproc = NULL;
150 	}
151 }
152 
153 static ssize_t icnss_sysfs_store(struct kobject *kobj,
154 				 struct kobj_attribute *attr,
155 				 const char *buf, size_t count)
156 {
157 	struct icnss_priv *priv = icnss_get_plat_priv();
158 
159 	if (!priv)
160 		return count;
161 
162 	icnss_pr_dbg("Received shutdown indication");
163 
164 	atomic_set(&priv->is_shutdown, true);
165 	if ((priv->wpss_supported || priv->rproc_fw_download) &&
166 	    priv->device_id == ADRASTEA_DEVICE_ID)
167 		icnss_wpss_unload(priv);
168 	return count;
169 }
170 
171 static struct kobj_attribute icnss_sysfs_attribute =
172 __ATTR(shutdown, 0660, NULL, icnss_sysfs_store);
173 
174 static void icnss_pm_stay_awake(struct icnss_priv *priv)
175 {
176 	if (atomic_inc_return(&priv->pm_count) != 1)
177 		return;
178 
179 	icnss_pr_vdbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
180 		     atomic_read(&priv->pm_count));
181 
182 	pm_stay_awake(&priv->pdev->dev);
183 
184 	priv->stats.pm_stay_awake++;
185 }
186 
187 static void icnss_pm_relax(struct icnss_priv *priv)
188 {
189 	int r = atomic_dec_return(&priv->pm_count);
190 
191 	WARN_ON(r < 0);
192 
193 	if (r != 0)
194 		return;
195 
196 	icnss_pr_vdbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
197 		     atomic_read(&priv->pm_count));
198 
199 	pm_relax(&priv->pdev->dev);
200 	priv->stats.pm_relax++;
201 }
202 
203 char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
204 {
205 	switch (type) {
206 	case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
207 		return "SERVER_ARRIVE";
208 	case ICNSS_DRIVER_EVENT_SERVER_EXIT:
209 		return "SERVER_EXIT";
210 	case ICNSS_DRIVER_EVENT_FW_READY_IND:
211 		return "FW_READY";
212 	case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
213 		return "REGISTER_DRIVER";
214 	case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
215 		return "UNREGISTER_DRIVER";
216 	case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
217 		return "PD_SERVICE_DOWN";
218 	case ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND:
219 		return "FW_EARLY_CRASH_IND";
220 	case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
221 		return "IDLE_SHUTDOWN";
222 	case ICNSS_DRIVER_EVENT_IDLE_RESTART:
223 		return "IDLE_RESTART";
224 	case ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND:
225 		return "FW_INIT_DONE";
226 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
227 		return "QDSS_TRACE_REQ_MEM";
228 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
229 		return "QDSS_TRACE_SAVE";
230 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
231 		return "QDSS_TRACE_FREE";
232 	case ICNSS_DRIVER_EVENT_M3_DUMP_UPLOAD_REQ:
233 		return "M3_DUMP_UPLOAD";
234 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
235 		return "QDSS_TRACE_REQ_DATA";
236 	case ICNSS_DRIVER_EVENT_SUBSYS_RESTART_LEVEL:
237 		return "SUBSYS_RESTART_LEVEL";
238 	case ICNSS_DRIVER_EVENT_MAX:
239 		return "EVENT_MAX";
240 	}
241 
242 	return "UNKNOWN";
243 };
244 
245 char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type)
246 {
247 	switch (type) {
248 	case ICNSS_SOC_WAKE_REQUEST_EVENT:
249 		return "SOC_WAKE_REQUEST";
250 	case ICNSS_SOC_WAKE_RELEASE_EVENT:
251 		return "SOC_WAKE_RELEASE";
252 	case ICNSS_SOC_WAKE_EVENT_MAX:
253 		return "SOC_EVENT_MAX";
254 	}
255 
256 	return "UNKNOWN";
257 };
258 
259 int icnss_driver_event_post(struct icnss_priv *priv,
260 			    enum icnss_driver_event_type type,
261 			    u32 flags, void *data)
262 {
263 	struct icnss_driver_event *event;
264 	unsigned long irq_flags;
265 	int gfp = GFP_KERNEL;
266 	int ret = 0;
267 
268 	if (!priv)
269 		return -ENODEV;
270 
271 	icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
272 		     icnss_driver_event_to_str(type), type, current->comm,
273 		     flags, priv->state);
274 
275 	if (type >= ICNSS_DRIVER_EVENT_MAX) {
276 		icnss_pr_err("Invalid Event type: %d, can't post", type);
277 		return -EINVAL;
278 	}
279 
280 	if (in_interrupt() || irqs_disabled())
281 		gfp = GFP_ATOMIC;
282 
283 	event = kzalloc(sizeof(*event), gfp);
284 	if (event == NULL)
285 		return -ENOMEM;
286 
287 	icnss_pm_stay_awake(priv);
288 
289 	event->type = type;
290 	event->data = data;
291 	init_completion(&event->complete);
292 	event->ret = ICNSS_EVENT_PENDING;
293 	event->sync = !!(flags & ICNSS_EVENT_SYNC);
294 
295 	spin_lock_irqsave(&priv->event_lock, irq_flags);
296 	list_add_tail(&event->list, &priv->event_list);
297 	spin_unlock_irqrestore(&priv->event_lock, irq_flags);
298 
299 	priv->stats.events[type].posted++;
300 	queue_work(priv->event_wq, &priv->event_work);
301 
302 	if (!(flags & ICNSS_EVENT_SYNC))
303 		goto out;
304 
305 	if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
306 		wait_for_completion(&event->complete);
307 	else
308 		ret = wait_for_completion_interruptible(&event->complete);
309 
310 	icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
311 		     icnss_driver_event_to_str(type), type, priv->state, ret,
312 		     event->ret);
313 
314 	spin_lock_irqsave(&priv->event_lock, irq_flags);
315 	if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
316 		event->sync = false;
317 		spin_unlock_irqrestore(&priv->event_lock, irq_flags);
318 		ret = -EINTR;
319 		goto out;
320 	}
321 	spin_unlock_irqrestore(&priv->event_lock, irq_flags);
322 
323 	ret = event->ret;
324 	kfree(event);
325 
326 out:
327 	icnss_pm_relax(priv);
328 	return ret;
329 }
330 
331 int icnss_soc_wake_event_post(struct icnss_priv *priv,
332 			      enum icnss_soc_wake_event_type type,
333 			      u32 flags, void *data)
334 {
335 	struct icnss_soc_wake_event *event;
336 	unsigned long irq_flags;
337 	int gfp = GFP_KERNEL;
338 	int ret = 0;
339 
340 	if (!priv)
341 		return -ENODEV;
342 
343 	icnss_pr_soc_wake("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
344 			  icnss_soc_wake_event_to_str(type),
345 			  type, current->comm, flags, priv->state);
346 
347 	if (type >= ICNSS_SOC_WAKE_EVENT_MAX) {
348 		icnss_pr_err("Invalid Event type: %d, can't post", type);
349 		return -EINVAL;
350 	}
351 
352 	if (in_interrupt() || irqs_disabled())
353 		gfp = GFP_ATOMIC;
354 
355 	event = kzalloc(sizeof(*event), gfp);
356 	if (!event)
357 		return -ENOMEM;
358 
359 	icnss_pm_stay_awake(priv);
360 
361 	event->type = type;
362 	event->data = data;
363 	init_completion(&event->complete);
364 	event->ret = ICNSS_EVENT_PENDING;
365 	event->sync = !!(flags & ICNSS_EVENT_SYNC);
366 
367 	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
368 	list_add_tail(&event->list, &priv->soc_wake_msg_list);
369 	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
370 
371 	priv->stats.soc_wake_events[type].posted++;
372 	queue_work(priv->soc_wake_wq, &priv->soc_wake_msg_work);
373 
374 	if (!(flags & ICNSS_EVENT_SYNC))
375 		goto out;
376 
377 	if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
378 		wait_for_completion(&event->complete);
379 	else
380 		ret = wait_for_completion_interruptible(&event->complete);
381 
382 	icnss_pr_soc_wake("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
383 			  icnss_soc_wake_event_to_str(type),
384 			  type, priv->state, ret, event->ret);
385 
386 	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
387 	if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
388 		event->sync = false;
389 		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
390 		ret = -EINTR;
391 		goto out;
392 	}
393 	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
394 
395 	ret = event->ret;
396 	kfree(event);
397 
398 out:
399 	icnss_pm_relax(priv);
400 	return ret;
401 }
402 
403 bool icnss_is_fw_ready(void)
404 {
405 	if (!penv)
406 		return false;
407 	else
408 		return test_bit(ICNSS_FW_READY, &penv->state);
409 }
410 EXPORT_SYMBOL(icnss_is_fw_ready);
411 
412 void icnss_block_shutdown(bool status)
413 {
414 	if (!penv)
415 		return;
416 
417 	if (status) {
418 		set_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
419 		reinit_completion(&penv->unblock_shutdown);
420 	} else {
421 		clear_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
422 		complete(&penv->unblock_shutdown);
423 	}
424 }
425 EXPORT_SYMBOL(icnss_block_shutdown);
426 
427 bool icnss_is_fw_down(void)
428 {
429 
430 	struct icnss_priv *priv = icnss_get_plat_priv();
431 
432 	if (!priv)
433 		return false;
434 
435 	return test_bit(ICNSS_FW_DOWN, &priv->state) ||
436 		test_bit(ICNSS_PD_RESTART, &priv->state) ||
437 		test_bit(ICNSS_REJUVENATE, &priv->state);
438 }
439 EXPORT_SYMBOL(icnss_is_fw_down);
440 
441 unsigned long icnss_get_device_config(void)
442 {
443 	struct icnss_priv *priv = icnss_get_plat_priv();
444 
445 	if (!priv)
446 		return 0;
447 
448 	return priv->device_config;
449 }
450 EXPORT_SYMBOL(icnss_get_device_config);
451 
452 bool icnss_is_rejuvenate(void)
453 {
454 	if (!penv)
455 		return false;
456 	else
457 		return test_bit(ICNSS_REJUVENATE, &penv->state);
458 }
459 EXPORT_SYMBOL(icnss_is_rejuvenate);
460 
461 bool icnss_is_pdr(void)
462 {
463 	if (!penv)
464 		return false;
465 	else
466 		return test_bit(ICNSS_PDR, &penv->state);
467 }
468 EXPORT_SYMBOL(icnss_is_pdr);
469 
470 static int icnss_send_smp2p(struct icnss_priv *priv,
471 			    enum icnss_smp2p_msg_id msg_id,
472 			    enum smp2p_out_entry smp2p_entry)
473 {
474 	unsigned int value = 0;
475 	int ret;
476 
477 	if (!priv || IS_ERR(priv->smp2p_info[smp2p_entry].smem_state))
478 		return -EINVAL;
479 
480 	/* No Need to check FW_DOWN for ICNSS_RESET_MSG */
481 	if (msg_id == ICNSS_RESET_MSG) {
482 		priv->smp2p_info[smp2p_entry].seq = 0;
483 		ret = qcom_smem_state_update_bits(
484 				priv->smp2p_info[smp2p_entry].smem_state,
485 				ICNSS_SMEM_VALUE_MASK,
486 				0);
487 		if (ret)
488 			icnss_pr_err("Error in SMP2P sent. ret: %d, %s\n",
489 				     ret, icnss_smp2p_str[smp2p_entry]);
490 
491 		return ret;
492 	}
493 
494 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
495 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
496 		icnss_pr_smp2p("FW down, ignoring sending SMP2P state: 0x%lx\n",
497 				  priv->state);
498 		return -EINVAL;
499 	}
500 
501 	value |= priv->smp2p_info[smp2p_entry].seq++;
502 	value <<= ICNSS_SMEM_SEQ_NO_POS;
503 	value |= msg_id;
504 
505 	icnss_pr_smp2p("Sending SMP2P value: 0x%X\n", value);
506 
507 	if (msg_id == ICNSS_SOC_WAKE_REQ || msg_id == ICNSS_SOC_WAKE_REL)
508 		reinit_completion(&penv->smp2p_soc_wake_wait);
509 
510 	ret = qcom_smem_state_update_bits(
511 			priv->smp2p_info[smp2p_entry].smem_state,
512 			ICNSS_SMEM_VALUE_MASK,
513 			value);
514 	if (ret) {
515 		icnss_pr_smp2p("Error in SMP2P send ret: %d, %s\n", ret,
516 			       icnss_smp2p_str[smp2p_entry]);
517 	} else {
518 		if (msg_id == ICNSS_SOC_WAKE_REQ ||
519 		    msg_id == ICNSS_SOC_WAKE_REL) {
520 			if (!wait_for_completion_timeout(
521 					&priv->smp2p_soc_wake_wait,
522 					msecs_to_jiffies(SMP2P_SOC_WAKE_TIMEOUT))) {
523 				icnss_pr_err("SMP2P Soc Wake timeout msg %d, %s\n", msg_id,
524 					     icnss_smp2p_str[smp2p_entry]);
525 				if (!test_bit(ICNSS_FW_DOWN, &priv->state))
526 					ICNSS_ASSERT(0);
527 			}
528 		}
529 	}
530 
531 	return ret;
532 }
533 
534 bool icnss_is_low_power(void)
535 {
536 	if (!penv)
537 		return false;
538 	else
539 		return test_bit(ICNSS_LOW_POWER, &penv->state);
540 }
541 EXPORT_SYMBOL(icnss_is_low_power);
542 
543 static irqreturn_t fw_error_fatal_handler(int irq, void *ctx)
544 {
545 	struct icnss_priv *priv = ctx;
546 
547 	if (priv)
548 		priv->force_err_fatal = true;
549 
550 	icnss_pr_err("Received force error fatal request from FW\n");
551 
552 	return IRQ_HANDLED;
553 }
554 
555 static irqreturn_t fw_crash_indication_handler(int irq, void *ctx)
556 {
557 	struct icnss_priv *priv = ctx;
558 	struct icnss_uevent_fw_down_data fw_down_data = {0};
559 
560 	icnss_pr_err("Received early crash indication from FW\n");
561 
562 	if (priv) {
563 		if (priv->wpss_self_recovery_enabled)
564 			mod_timer(&priv->wpss_ssr_timer,
565 				  jiffies + msecs_to_jiffies(ICNSS_WPSS_SSR_TIMEOUT));
566 
567 		set_bit(ICNSS_FW_DOWN, &priv->state);
568 		icnss_ignore_fw_timeout(true);
569 
570 		if (test_bit(ICNSS_FW_READY, &priv->state)) {
571 			clear_bit(ICNSS_FW_READY, &priv->state);
572 			fw_down_data.crashed = true;
573 			icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
574 						 &fw_down_data);
575 		}
576 	}
577 
578 	icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND,
579 				0, NULL);
580 
581 	return IRQ_HANDLED;
582 }
583 
584 static void register_fw_error_notifications(struct device *dev)
585 {
586 	struct icnss_priv *priv = dev_get_drvdata(dev);
587 	struct device_node *dev_node;
588 	int irq = 0, ret = 0;
589 
590 	if (!priv)
591 		return;
592 
593 	dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in");
594 	if (!dev_node) {
595 		icnss_pr_err("Failed to get smp2p node for force-fatal-error\n");
596 		return;
597 	}
598 
599 	icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
600 
601 	if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) {
602 		ret = irq = of_irq_get_byname(dev_node,
603 					      "qcom,smp2p-force-fatal-error");
604 		if (ret < 0) {
605 			icnss_pr_err("Unable to get force-fatal-error irq %d\n",
606 				     irq);
607 			return;
608 		}
609 	}
610 
611 	ret = devm_request_threaded_irq(dev, irq, NULL, fw_error_fatal_handler,
612 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
613 					"wlanfw-err", priv);
614 	if (ret < 0) {
615 		icnss_pr_err("Unable to register for error fatal IRQ handler %d ret = %d",
616 			     irq, ret);
617 		return;
618 	}
619 	icnss_pr_dbg("FW force error fatal handler registered irq = %d\n", irq);
620 	priv->fw_error_fatal_irq = irq;
621 }
622 
623 static void register_early_crash_notifications(struct device *dev)
624 {
625 	struct icnss_priv *priv = dev_get_drvdata(dev);
626 	struct device_node *dev_node;
627 	int irq = 0, ret = 0;
628 
629 	if (!priv)
630 		return;
631 
632 	dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in");
633 	if (!dev_node) {
634 		icnss_pr_err("Failed to get smp2p node for early-crash-ind\n");
635 		return;
636 	}
637 
638 	icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
639 
640 	if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) {
641 		ret = irq = of_irq_get_byname(dev_node,
642 					      "qcom,smp2p-early-crash-ind");
643 		if (ret < 0) {
644 			icnss_pr_err("Unable to get early-crash-ind irq %d\n",
645 				     irq);
646 			return;
647 		}
648 	}
649 
650 	ret = devm_request_threaded_irq(dev, irq, NULL,
651 					fw_crash_indication_handler,
652 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
653 					"wlanfw-early-crash-ind", priv);
654 	if (ret < 0) {
655 		icnss_pr_err("Unable to register for early crash indication IRQ handler %d ret = %d",
656 			     irq, ret);
657 		return;
658 	}
659 	icnss_pr_dbg("FW crash indication handler registered irq = %d\n", irq);
660 	priv->fw_early_crash_irq = irq;
661 }
662 
663 static int icnss_get_temperature(struct icnss_priv *priv, int *temp)
664 {
665 	struct thermal_zone_device *thermal_dev;
666 	const char *tsens;
667 	int ret;
668 
669 	ret = of_property_read_string(priv->pdev->dev.of_node,
670 				      "tsens",
671 				      &tsens);
672 	if (ret)
673 		return ret;
674 
675 	icnss_pr_dbg("Thermal Sensor is %s\n", tsens);
676 	thermal_dev = thermal_zone_get_zone_by_name(tsens);
677 	if (IS_ERR(thermal_dev)) {
678 		icnss_pr_err("Fail to get thermal zone. ret: %d",
679 			     PTR_ERR(thermal_dev));
680 		return PTR_ERR(thermal_dev);
681 	}
682 
683 	ret = thermal_zone_get_temp(thermal_dev, temp);
684 	if (ret)
685 		icnss_pr_err("Fail to get temperature. ret: %d", ret);
686 
687 	return ret;
688 }
689 
690 static irqreturn_t fw_soc_wake_ack_handler(int irq, void *ctx)
691 {
692 	struct icnss_priv *priv = ctx;
693 
694 	if (priv)
695 		complete(&priv->smp2p_soc_wake_wait);
696 
697 	return IRQ_HANDLED;
698 }
699 
700 static void register_soc_wake_notif(struct device *dev)
701 {
702 	struct icnss_priv *priv = dev_get_drvdata(dev);
703 	struct device_node *dev_node;
704 	int irq = 0, ret = 0;
705 
706 	if (!priv)
707 		return;
708 
709 	dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_2_in");
710 	if (!dev_node) {
711 		icnss_pr_err("Failed to get smp2p node for soc-wake-ack\n");
712 		return;
713 	}
714 
715 	icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
716 
717 	if (strcmp("qcom,smp2p_map_wlan_2_in", dev_node->name) == 0) {
718 		ret = irq = of_irq_get_byname(dev_node,
719 					      "qcom,smp2p-soc-wake-ack");
720 		if (ret < 0) {
721 			icnss_pr_err("Unable to get soc wake ack irq %d\n",
722 				     irq);
723 			return;
724 		}
725 	}
726 
727 	ret = devm_request_threaded_irq(dev, irq, NULL,
728 					fw_soc_wake_ack_handler,
729 					IRQF_ONESHOT | IRQF_TRIGGER_RISING |
730 					IRQF_TRIGGER_FALLING,
731 					"wlanfw-soc-wake-ack", priv);
732 	if (ret < 0) {
733 		icnss_pr_err("Unable to register for SOC Wake ACK IRQ handler %d ret = %d",
734 			     irq, ret);
735 		return;
736 	}
737 	icnss_pr_dbg("FW SOC Wake ACK handler registered irq = %d\n", irq);
738 	priv->fw_soc_wake_ack_irq = irq;
739 }
740 
741 
742 int icnss_call_driver_uevent(struct icnss_priv *priv,
743 				    enum icnss_uevent uevent, void *data)
744 {
745 	struct icnss_uevent_data uevent_data;
746 
747 	if (!priv->ops || !priv->ops->uevent)
748 		return 0;
749 
750 	icnss_pr_dbg("Calling driver uevent state: 0x%lx, uevent: %d\n",
751 		     priv->state, uevent);
752 
753 	uevent_data.uevent = uevent;
754 	uevent_data.data = data;
755 
756 	return priv->ops->uevent(&priv->pdev->dev, &uevent_data);
757 }
758 
759 static int icnss_setup_dms_mac(struct icnss_priv *priv)
760 {
761 	int i;
762 	int ret = 0;
763 
764 	ret = icnss_qmi_get_dms_mac(priv);
765 	if (ret == 0 && priv->dms.mac_valid)
766 		goto qmi_send;
767 
768 	/* DTSI property use-nv-mac is used to force DMS MAC address for WLAN.
769 	 * Thus assert on failure to get MAC from DMS even after retries
770 	 */
771 	if (priv->use_nv_mac) {
772 		for (i = 0; i < ICNSS_DMS_QMI_CONNECTION_WAIT_RETRY; i++) {
773 			if (priv->dms.mac_valid)
774 				break;
775 
776 			ret = icnss_qmi_get_dms_mac(priv);
777 			if (ret != -EAGAIN)
778 				break;
779 			msleep(ICNSS_DMS_QMI_CONNECTION_WAIT_MS);
780 		}
781 		if (!priv->dms.nv_mac_not_prov && !priv->dms.mac_valid) {
782 			icnss_pr_err("Unable to get MAC from DMS after retries\n");
783 			ICNSS_ASSERT(0);
784 			return -EINVAL;
785 		}
786 	}
787 qmi_send:
788 	if (priv->dms.mac_valid)
789 		ret =
790 		icnss_wlfw_wlan_mac_req_send_sync(priv, priv->dms.mac,
791 						  ARRAY_SIZE(priv->dms.mac));
792 	return ret;
793 }
794 
795 static void icnss_get_smp2p_info(struct icnss_priv *priv,
796 				 enum smp2p_out_entry smp2p_entry)
797 {
798 	int retry = 0;
799 	int error;
800 
801 	if (priv->smp2p_info[smp2p_entry].smem_state)
802 		return;
803 retry:
804 	priv->smp2p_info[smp2p_entry].smem_state =
805 		qcom_smem_state_get(&priv->pdev->dev,
806 				    icnss_smp2p_str[smp2p_entry],
807 				    &priv->smp2p_info[smp2p_entry].smem_bit);
808 	if (IS_ERR(priv->smp2p_info[smp2p_entry].smem_state)) {
809 		if (retry++ < SMP2P_GET_MAX_RETRY) {
810 			error = PTR_ERR(priv->smp2p_info[smp2p_entry].smem_state);
811 			icnss_pr_err("Failed to get smem state, ret: %d Entry: %s",
812 				     error, icnss_smp2p_str[smp2p_entry]);
813 			msleep(SMP2P_GET_RETRY_DELAY_MS);
814 			goto retry;
815 		}
816 		ICNSS_ASSERT(0);
817 		return;
818 	}
819 
820 	icnss_pr_dbg("smem state, Entry: %s", icnss_smp2p_str[smp2p_entry]);
821 }
822 
823 static inline
824 void icnss_set_wlan_en_delay(struct icnss_priv *priv)
825 {
826 	if (priv->wlan_en_delay_ms_user > WLAN_EN_DELAY) {
827 		priv->wlan_en_delay_ms = priv->wlan_en_delay_ms_user;
828 	} else {
829 		priv->wlan_en_delay_ms = WLAN_EN_DELAY;
830 	}
831 }
832 
833 static enum wlfw_wlan_rf_subtype_v01 icnss_rf_subtype_value_to_type(u32 val)
834 {
835 	switch (val) {
836 	case WLAN_RF_SLATE:
837 		return WLFW_WLAN_RF_SLATE_V01;
838 	case WLAN_RF_APACHE:
839 		return WLFW_WLAN_RF_APACHE_V01;
840 	default:
841 		return WLFW_WLAN_RF_SUBTYPE_MAX_VAL_V01;
842 	}
843 }
844 
845 #ifdef SLATE_MODULE_ENABLED
846 static void icnss_send_wlan_boot_init(void)
847 {
848 	send_wlan_state(GMI_MGR_WLAN_BOOT_INIT);
849 	icnss_pr_info("sent wlan boot init command\n");
850 }
851 
852 static void icnss_send_wlan_boot_complete(void)
853 {
854 	send_wlan_state(GMI_MGR_WLAN_BOOT_COMPLETE);
855 	icnss_pr_info("sent wlan boot complete command\n");
856 }
857 
858 static int icnss_wait_for_slate_complete(struct icnss_priv *priv)
859 {
860 	if (!test_bit(ICNSS_SLATE_UP, &priv->state)) {
861 		reinit_completion(&priv->slate_boot_complete);
862 		icnss_pr_err("Waiting for slate boot up notification, 0x%lx\n",
863 			     priv->state);
864 		wait_for_completion(&priv->slate_boot_complete);
865 	}
866 
867 	if (!test_bit(ICNSS_SLATE_UP, &priv->state))
868 		return -EINVAL;
869 
870 	icnss_send_wlan_boot_init();
871 
872 	return 0;
873 }
874 #else
875 static void icnss_send_wlan_boot_complete(void)
876 {
877 }
878 
879 static int icnss_wait_for_slate_complete(struct icnss_priv *priv)
880 {
881 	return 0;
882 }
883 #endif
884 
885 static int icnss_driver_event_server_arrive(struct icnss_priv *priv,
886 						 void *data)
887 {
888 	int ret = 0;
889 	int temp = 0;
890 	bool ignore_assert = false;
891 	enum wlfw_wlan_rf_subtype_v01 rf_subtype;
892 
893 	if (!priv)
894 		return -ENODEV;
895 
896 	set_bit(ICNSS_WLFW_EXISTS, &priv->state);
897 	clear_bit(ICNSS_FW_DOWN, &priv->state);
898 	clear_bit(ICNSS_FW_READY, &priv->state);
899 
900 	if (priv->is_slate_rfa) {
901 		ret = icnss_wait_for_slate_complete(priv);
902 		if (ret == -EINVAL) {
903 			icnss_pr_err("Slate complete failed\n");
904 			return ret;
905 		}
906 	}
907 
908 	icnss_ignore_fw_timeout(false);
909 
910 	if (test_bit(ICNSS_WLFW_CONNECTED, &priv->state)) {
911 		icnss_pr_err("QMI Server already in Connected State\n");
912 		ICNSS_ASSERT(0);
913 	}
914 
915 	ret = icnss_connect_to_fw_server(priv, data);
916 	if (ret)
917 		goto fail;
918 
919 	set_bit(ICNSS_WLFW_CONNECTED, &priv->state);
920 
921 	ret = wlfw_ind_register_send_sync_msg(priv);
922 	if (ret < 0) {
923 		if (ret == -EALREADY) {
924 			ret = 0;
925 			goto qmi_registered;
926 		}
927 		ignore_assert = true;
928 		goto fail;
929 	}
930 
931 	if (priv->is_rf_subtype_valid) {
932 		rf_subtype = icnss_rf_subtype_value_to_type(priv->rf_subtype);
933 		if (rf_subtype != WLFW_WLAN_RF_SUBTYPE_MAX_VAL_V01) {
934 			ret = wlfw_wlan_hw_init_cfg_msg(priv, rf_subtype);
935 			if (ret < 0)
936 				icnss_pr_dbg("Sending rf_subtype failed ret %d\n",
937 					     ret);
938 		} else {
939 			icnss_pr_dbg("Invalid rf subtype %d in DT\n",
940 				     priv->rf_subtype);
941 		}
942 	}
943 
944 	if (priv->device_id == WCN6750_DEVICE_ID ||
945 	    priv->device_id == WCN6450_DEVICE_ID) {
946 		if (!icnss_get_temperature(priv, &temp)) {
947 			icnss_pr_dbg("Temperature: %d\n", temp);
948 			if (temp < WLAN_EN_TEMP_THRESHOLD)
949 				icnss_set_wlan_en_delay(priv);
950 		}
951 
952 		ret = wlfw_host_cap_send_sync(priv);
953 		if (ret < 0)
954 			goto fail;
955 	}
956 
957 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
958 		if (!priv->msa_va) {
959 			icnss_pr_err("Invalid MSA address\n");
960 			ret = -EINVAL;
961 			goto fail;
962 		}
963 
964 		ret = wlfw_msa_mem_info_send_sync_msg(priv);
965 		if (ret < 0) {
966 			ignore_assert = true;
967 			goto fail;
968 		}
969 
970 		ret = wlfw_msa_ready_send_sync_msg(priv);
971 		if (ret < 0) {
972 			ignore_assert = true;
973 			goto fail;
974 		}
975 	}
976 
977 	if (priv->device_id == WCN6450_DEVICE_ID)
978 		icnss_hw_power_off(priv);
979 
980 	ret = wlfw_cap_send_sync_msg(priv);
981 	if (ret < 0) {
982 		ignore_assert = true;
983 		goto fail;
984 	}
985 
986 	ret = icnss_hw_power_on(priv);
987 	if (ret)
988 		goto fail;
989 
990 	if (priv->device_id == WCN6750_DEVICE_ID ||
991 	    priv->device_id == WCN6450_DEVICE_ID) {
992 		ret = wlfw_device_info_send_msg(priv);
993 		if (ret < 0) {
994 			ignore_assert = true;
995 			goto  device_info_failure;
996 		}
997 
998 		priv->mem_base_va = devm_ioremap(&priv->pdev->dev,
999 						 priv->mem_base_pa,
1000 						 priv->mem_base_size);
1001 		if (!priv->mem_base_va) {
1002 			icnss_pr_err("Ioremap failed for bar address\n");
1003 			goto device_info_failure;
1004 		}
1005 
1006 		icnss_pr_dbg("Non-Secured Bar Address pa: %pa, va: 0x%pK\n",
1007 			     &priv->mem_base_pa,
1008 			     priv->mem_base_va);
1009 
1010 		if (priv->mhi_state_info_pa)
1011 			priv->mhi_state_info_va = devm_ioremap(&priv->pdev->dev,
1012 						priv->mhi_state_info_pa,
1013 						PAGE_SIZE);
1014 		if (!priv->mhi_state_info_va)
1015 			icnss_pr_err("Ioremap failed for MHI info address\n");
1016 
1017 		icnss_pr_dbg("MHI state info Address pa: %pa, va: 0x%pK\n",
1018 			     &priv->mhi_state_info_pa,
1019 			     priv->mhi_state_info_va);
1020 	}
1021 
1022 	if (priv->bdf_download_support) {
1023 		icnss_wlfw_bdf_dnld_send_sync(priv, ICNSS_BDF_REGDB);
1024 
1025 		ret = icnss_wlfw_bdf_dnld_send_sync(priv,
1026 						    priv->ctrl_params.bdf_type);
1027 		if (ret < 0)
1028 			goto device_info_failure;
1029 	}
1030 
1031 	if (priv->device_id == WCN6450_DEVICE_ID) {
1032 		ret = icnss_wlfw_qdss_dnld_send_sync(priv);
1033 		if (ret < 0)
1034 			icnss_pr_info("Failed to download qdss config file for WCN6450, ret = %d\n",
1035 				      ret);
1036 	}
1037 
1038 	if (priv->device_id == WCN6750_DEVICE_ID ||
1039 	    priv->device_id == WCN6450_DEVICE_ID) {
1040 		if (!priv->fw_soc_wake_ack_irq)
1041 			register_soc_wake_notif(&priv->pdev->dev);
1042 
1043 		icnss_get_smp2p_info(priv, ICNSS_SMP2P_OUT_SOC_WAKE);
1044 		icnss_get_smp2p_info(priv, ICNSS_SMP2P_OUT_EP_POWER_SAVE);
1045 	}
1046 
1047 	if (priv->wpss_supported)
1048 		icnss_get_smp2p_info(priv, ICNSS_SMP2P_OUT_POWER_SAVE);
1049 
1050 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
1051 		if (priv->bdf_download_support) {
1052 			ret = wlfw_cal_report_req(priv);
1053 			if (ret < 0)
1054 				goto device_info_failure;
1055 		}
1056 
1057 		wlfw_dynamic_feature_mask_send_sync_msg(priv,
1058 							dynamic_feature_mask);
1059 	}
1060 
1061 	if (!priv->fw_error_fatal_irq)
1062 		register_fw_error_notifications(&priv->pdev->dev);
1063 
1064 	if (!priv->fw_early_crash_irq)
1065 		register_early_crash_notifications(&priv->pdev->dev);
1066 
1067 	if (priv->psf_supported)
1068 		queue_work(priv->soc_update_wq, &priv->soc_update_work);
1069 
1070 	return ret;
1071 
1072 device_info_failure:
1073 	icnss_hw_power_off(priv);
1074 fail:
1075 	ICNSS_ASSERT(ignore_assert);
1076 qmi_registered:
1077 	return ret;
1078 }
1079 
1080 static int icnss_driver_event_server_exit(struct icnss_priv *priv)
1081 {
1082 	if (!priv)
1083 		return -ENODEV;
1084 
1085 	icnss_pr_info("WLAN FW Service Disconnected: 0x%lx\n", priv->state);
1086 
1087 	icnss_clear_server(priv);
1088 
1089 	if (priv->psf_supported)
1090 		priv->last_updated_voltage = 0;
1091 
1092 	return 0;
1093 }
1094 
1095 static int icnss_call_driver_probe(struct icnss_priv *priv)
1096 {
1097 	int ret = 0;
1098 	int probe_cnt = 0;
1099 
1100 	if (!priv->ops || !priv->ops->probe)
1101 		return 0;
1102 
1103 	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1104 		return -EINVAL;
1105 
1106 	icnss_pr_dbg("Calling driver probe state: 0x%lx\n", priv->state);
1107 
1108 	icnss_hw_power_on(priv);
1109 
1110 	icnss_block_shutdown(true);
1111 	while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
1112 		ret = priv->ops->probe(&priv->pdev->dev);
1113 		probe_cnt++;
1114 		if (ret != -EPROBE_DEFER)
1115 			break;
1116 	}
1117 	if (ret < 0) {
1118 		icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
1119 			     ret, priv->state, probe_cnt);
1120 		icnss_block_shutdown(false);
1121 		goto out;
1122 	}
1123 
1124 	icnss_block_shutdown(false);
1125 	set_bit(ICNSS_DRIVER_PROBED, &priv->state);
1126 
1127 	return 0;
1128 
1129 out:
1130 	icnss_hw_power_off(priv);
1131 	return ret;
1132 }
1133 
1134 static int icnss_call_driver_shutdown(struct icnss_priv *priv)
1135 {
1136 	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1137 		goto out;
1138 
1139 	if (!priv->ops || !priv->ops->shutdown)
1140 		goto out;
1141 
1142 	if (test_bit(ICNSS_SHUTDOWN_DONE, &priv->state))
1143 		goto out;
1144 
1145 	icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
1146 
1147 	priv->ops->shutdown(&priv->pdev->dev);
1148 	set_bit(ICNSS_SHUTDOWN_DONE, &priv->state);
1149 
1150 out:
1151 	return 0;
1152 }
1153 
1154 static int icnss_pd_restart_complete(struct icnss_priv *priv)
1155 {
1156 	int ret = 0;
1157 
1158 	icnss_pm_relax(priv);
1159 
1160 	icnss_call_driver_shutdown(priv);
1161 
1162 	clear_bit(ICNSS_PDR, &priv->state);
1163 	clear_bit(ICNSS_REJUVENATE, &priv->state);
1164 	clear_bit(ICNSS_PD_RESTART, &priv->state);
1165 	clear_bit(ICNSS_LOW_POWER, &priv->state);
1166 	priv->early_crash_ind = false;
1167 	priv->is_ssr = false;
1168 
1169 	if (!priv->ops || !priv->ops->reinit)
1170 		goto out;
1171 
1172 	if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
1173 		icnss_pr_err("FW is in bad state, state: 0x%lx\n",
1174 			     priv->state);
1175 		goto out;
1176 	}
1177 
1178 	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1179 		goto call_probe;
1180 
1181 	icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
1182 
1183 	icnss_hw_power_on(priv);
1184 
1185 	icnss_block_shutdown(true);
1186 
1187 	ret = priv->ops->reinit(&priv->pdev->dev);
1188 	if (ret < 0) {
1189 		icnss_fatal_err("Driver reinit failed: %d, state: 0x%lx\n",
1190 				ret, priv->state);
1191 		if (!priv->allow_recursive_recovery)
1192 			ICNSS_ASSERT(false);
1193 		icnss_block_shutdown(false);
1194 		goto out_power_off;
1195 	}
1196 
1197 	icnss_block_shutdown(false);
1198 	clear_bit(ICNSS_SHUTDOWN_DONE, &priv->state);
1199 	return 0;
1200 
1201 call_probe:
1202 	return icnss_call_driver_probe(priv);
1203 
1204 out_power_off:
1205 	icnss_hw_power_off(priv);
1206 
1207 out:
1208 	return ret;
1209 }
1210 
1211 
1212 static int icnss_driver_event_fw_ready_ind(struct icnss_priv *priv, void *data)
1213 {
1214 	int ret = 0;
1215 
1216 	if (!priv)
1217 		return -ENODEV;
1218 
1219 	del_timer(&priv->recovery_timer);
1220 	set_bit(ICNSS_FW_READY, &priv->state);
1221 	clear_bit(ICNSS_MODE_ON, &priv->state);
1222 	atomic_set(&priv->soc_wake_ref_count, 0);
1223 
1224 	if (priv->device_id == WCN6750_DEVICE_ID ||
1225 	    priv->device_id == WCN6450_DEVICE_ID)
1226 		icnss_free_qdss_mem(priv);
1227 
1228 	icnss_pr_info("WLAN FW is ready: 0x%lx\n", priv->state);
1229 
1230 	icnss_hw_power_off(priv);
1231 
1232 	if (!priv->pdev) {
1233 		icnss_pr_err("Device is not ready\n");
1234 		ret = -ENODEV;
1235 		goto out;
1236 	}
1237 
1238 	if (priv->is_slate_rfa && test_bit(ICNSS_SLATE_UP, &priv->state))
1239 		icnss_send_wlan_boot_complete();
1240 
1241 	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
1242 		ret = icnss_pd_restart_complete(priv);
1243 	} else {
1244 		if (priv->wpss_supported)
1245 			icnss_setup_dms_mac(priv);
1246 		ret = icnss_call_driver_probe(priv);
1247 	}
1248 
1249 	icnss_vreg_unvote(priv);
1250 
1251 out:
1252 	return ret;
1253 }
1254 
1255 static int icnss_driver_event_fw_init_done(struct icnss_priv *priv, void *data)
1256 {
1257 	int ret = 0;
1258 
1259 	if (!priv)
1260 		return -ENODEV;
1261 
1262 	icnss_pr_info("WLAN FW Initialization done: 0x%lx\n", priv->state);
1263 
1264 	if (priv->device_id == WCN6750_DEVICE_ID) {
1265 		ret = icnss_wlfw_qdss_dnld_send_sync(priv);
1266 		if (ret < 0)
1267 			icnss_pr_info("Failed to download qdss config file for WCN6750, ret = %d\n",
1268 				      ret);
1269 	}
1270 
1271 	if (test_bit(ICNSS_COLD_BOOT_CAL, &priv->state)) {
1272 		mod_timer(&priv->recovery_timer,
1273 			  jiffies + msecs_to_jiffies(ICNSS_CAL_TIMEOUT));
1274 		ret = wlfw_wlan_mode_send_sync_msg(priv,
1275 			(enum wlfw_driver_mode_enum_v01)ICNSS_CALIBRATION);
1276 	} else {
1277 		icnss_driver_event_fw_ready_ind(priv, NULL);
1278 	}
1279 
1280 	return ret;
1281 }
1282 
1283 int icnss_alloc_qdss_mem(struct icnss_priv *priv)
1284 {
1285 	struct platform_device *pdev = priv->pdev;
1286 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1287 	int i, j;
1288 
1289 	for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1290 		if (!qdss_mem[i].va && qdss_mem[i].size) {
1291 			qdss_mem[i].va =
1292 				dma_alloc_coherent(&pdev->dev,
1293 						   qdss_mem[i].size,
1294 						   &qdss_mem[i].pa,
1295 						   GFP_KERNEL);
1296 			if (!qdss_mem[i].va) {
1297 				icnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
1298 					     qdss_mem[i].size,
1299 					     qdss_mem[i].type, i);
1300 				break;
1301 			}
1302 		}
1303 	}
1304 
1305 	/* Best-effort allocation for QDSS trace */
1306 	if (i < priv->qdss_mem_seg_len) {
1307 		for (j = i; j < priv->qdss_mem_seg_len; j++) {
1308 			qdss_mem[j].type = 0;
1309 			qdss_mem[j].size = 0;
1310 		}
1311 		priv->qdss_mem_seg_len = i;
1312 	}
1313 
1314 	return 0;
1315 }
1316 
1317 void icnss_free_qdss_mem(struct icnss_priv *priv)
1318 {
1319 	struct platform_device *pdev = priv->pdev;
1320 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1321 	int i;
1322 
1323 	for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1324 		if (qdss_mem[i].va && qdss_mem[i].size) {
1325 			icnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
1326 				     &qdss_mem[i].pa, qdss_mem[i].size,
1327 				     qdss_mem[i].type);
1328 			dma_free_coherent(&pdev->dev,
1329 					  qdss_mem[i].size, qdss_mem[i].va,
1330 					  qdss_mem[i].pa);
1331 			qdss_mem[i].va = NULL;
1332 			qdss_mem[i].pa = 0;
1333 			qdss_mem[i].size = 0;
1334 			qdss_mem[i].type = 0;
1335 		}
1336 	}
1337 	priv->qdss_mem_seg_len = 0;
1338 }
1339 
1340 static int icnss_qdss_trace_req_mem_hdlr(struct icnss_priv *priv)
1341 {
1342 	int ret = 0;
1343 
1344 	ret = icnss_alloc_qdss_mem(priv);
1345 	if (ret < 0)
1346 		return ret;
1347 
1348 	return wlfw_qdss_trace_mem_info_send_sync(priv);
1349 }
1350 
1351 static void *icnss_qdss_trace_pa_to_va(struct icnss_priv *priv,
1352 				       u64 pa, u32 size, int *seg_id)
1353 {
1354 	int i = 0;
1355 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1356 	u64 offset = 0;
1357 	void *va = NULL;
1358 	u64 local_pa;
1359 	u32 local_size;
1360 
1361 	for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1362 		local_pa = (u64)qdss_mem[i].pa;
1363 		local_size = (u32)qdss_mem[i].size;
1364 		if (pa == local_pa && size <= local_size) {
1365 			va = qdss_mem[i].va;
1366 			break;
1367 		}
1368 		if (pa > local_pa &&
1369 		    pa < local_pa + local_size &&
1370 		    pa + size <= local_pa + local_size) {
1371 			offset = pa - local_pa;
1372 			va = qdss_mem[i].va + offset;
1373 			break;
1374 		}
1375 	}
1376 
1377 	*seg_id = i;
1378 	return va;
1379 }
1380 
1381 static int icnss_qdss_trace_save_hdlr(struct icnss_priv *priv,
1382 				      void *data)
1383 {
1384 	struct icnss_qmi_event_qdss_trace_save_data *event_data = data;
1385 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1386 	int ret = 0;
1387 	int i;
1388 	void *va = NULL;
1389 	u64 pa;
1390 	u32 size;
1391 	int seg_id = 0;
1392 
1393 	if (!priv->qdss_mem_seg_len) {
1394 		icnss_pr_err("Memory for QDSS trace is not available\n");
1395 		return -ENOMEM;
1396 	}
1397 
1398 	if (event_data->mem_seg_len == 0) {
1399 		for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1400 			ret = icnss_genl_send_msg(qdss_mem[i].va,
1401 						  ICNSS_GENL_MSG_TYPE_QDSS,
1402 						  event_data->file_name,
1403 						  qdss_mem[i].size);
1404 			if (ret < 0) {
1405 				icnss_pr_err("Fail to save QDSS data: %d\n",
1406 					     ret);
1407 				break;
1408 			}
1409 		}
1410 	} else {
1411 		for (i = 0; i < event_data->mem_seg_len; i++) {
1412 			pa = event_data->mem_seg[i].addr;
1413 			size = event_data->mem_seg[i].size;
1414 			va = icnss_qdss_trace_pa_to_va(priv, pa,
1415 						       size, &seg_id);
1416 			if (!va) {
1417 				icnss_pr_err("Fail to find matching va for pa %pa\n",
1418 					     &pa);
1419 				ret = -EINVAL;
1420 				break;
1421 			}
1422 			ret = icnss_genl_send_msg(va, ICNSS_GENL_MSG_TYPE_QDSS,
1423 						  event_data->file_name, size);
1424 			if (ret < 0) {
1425 				icnss_pr_err("Fail to save QDSS data: %d\n",
1426 					     ret);
1427 				break;
1428 			}
1429 		}
1430 	}
1431 
1432 	kfree(data);
1433 	return ret;
1434 }
1435 
1436 static inline int icnss_atomic_dec_if_greater_one(atomic_t *v)
1437 {
1438 	int dec, c = atomic_read(v);
1439 
1440 	do {
1441 		dec = c - 1;
1442 		if (unlikely(dec < 1))
1443 			break;
1444 	} while (!atomic_try_cmpxchg(v, &c, dec));
1445 
1446 	return dec;
1447 }
1448 
1449 static int icnss_qdss_trace_req_data_hdlr(struct icnss_priv *priv,
1450 					  void *data)
1451 {
1452 	int ret = 0;
1453 	struct icnss_qmi_event_qdss_trace_save_data *event_data = data;
1454 
1455 	if (!priv)
1456 		return -ENODEV;
1457 
1458 	if (!data)
1459 		return -EINVAL;
1460 
1461 	ret = icnss_wlfw_qdss_data_send_sync(priv, event_data->file_name,
1462 					     event_data->total_size);
1463 
1464 	kfree(data);
1465 	return ret;
1466 }
1467 
1468 static int icnss_event_soc_wake_request(struct icnss_priv *priv, void *data)
1469 {
1470 	int ret = 0;
1471 
1472 	if (!priv)
1473 		return -ENODEV;
1474 
1475 	if (atomic_inc_not_zero(&priv->soc_wake_ref_count)) {
1476 		icnss_pr_soc_wake("SOC awake after posting work, Ref count: %d",
1477 				  atomic_read(&priv->soc_wake_ref_count));
1478 		return 0;
1479 	}
1480 
1481 	ret = icnss_send_smp2p(priv, ICNSS_SOC_WAKE_REQ,
1482 			       ICNSS_SMP2P_OUT_SOC_WAKE);
1483 	if (!ret)
1484 		atomic_inc(&priv->soc_wake_ref_count);
1485 
1486 	return ret;
1487 }
1488 
1489 static int icnss_event_soc_wake_release(struct icnss_priv *priv, void *data)
1490 {
1491 	int ret = 0;
1492 
1493 	if (!priv)
1494 		return -ENODEV;
1495 
1496 	if (atomic_dec_if_positive(&priv->soc_wake_ref_count)) {
1497 		icnss_pr_soc_wake("Wake release not called. Ref count: %d",
1498 				  priv->soc_wake_ref_count);
1499 		return 0;
1500 	}
1501 
1502 	ret = icnss_send_smp2p(priv, ICNSS_SOC_WAKE_REL,
1503 			       ICNSS_SMP2P_OUT_SOC_WAKE);
1504 	return ret;
1505 }
1506 
1507 static int icnss_driver_event_register_driver(struct icnss_priv *priv,
1508 							 void *data)
1509 {
1510 	int ret = 0;
1511 	int probe_cnt = 0;
1512 
1513 	if (priv->ops)
1514 		return -EEXIST;
1515 
1516 	priv->ops = data;
1517 
1518 	if (test_bit(SKIP_QMI, &priv->ctrl_params.quirks))
1519 		set_bit(ICNSS_FW_READY, &priv->state);
1520 
1521 	if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
1522 		icnss_pr_err("FW is in bad state, state: 0x%lx\n",
1523 			     priv->state);
1524 		return -ENODEV;
1525 	}
1526 
1527 	if (!test_bit(ICNSS_FW_READY, &priv->state)) {
1528 		icnss_pr_dbg("FW is not ready yet, state: 0x%lx\n",
1529 			     priv->state);
1530 		goto out;
1531 	}
1532 
1533 	ret = icnss_hw_power_on(priv);
1534 	if (ret)
1535 		goto out;
1536 
1537 	icnss_block_shutdown(true);
1538 	while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
1539 		ret = priv->ops->probe(&priv->pdev->dev);
1540 		probe_cnt++;
1541 		if (ret != -EPROBE_DEFER)
1542 			break;
1543 	}
1544 	if (ret) {
1545 		icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
1546 			     ret, priv->state, probe_cnt);
1547 		icnss_block_shutdown(false);
1548 		goto power_off;
1549 	}
1550 
1551 	icnss_block_shutdown(false);
1552 	set_bit(ICNSS_DRIVER_PROBED, &priv->state);
1553 
1554 	return 0;
1555 
1556 power_off:
1557 	icnss_hw_power_off(priv);
1558 out:
1559 	return ret;
1560 }
1561 
1562 static int icnss_driver_event_unregister_driver(struct icnss_priv *priv,
1563 							 void *data)
1564 {
1565 	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
1566 		priv->ops = NULL;
1567 		goto out;
1568 	}
1569 
1570 	set_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1571 
1572 	icnss_block_shutdown(true);
1573 
1574 	if (priv->ops)
1575 		priv->ops->remove(&priv->pdev->dev);
1576 
1577 	icnss_block_shutdown(false);
1578 
1579 	clear_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1580 	clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
1581 
1582 	priv->ops = NULL;
1583 
1584 	icnss_hw_power_off(priv);
1585 
1586 out:
1587 	return 0;
1588 }
1589 
1590 static int icnss_fw_crashed(struct icnss_priv *priv,
1591 			    struct icnss_event_pd_service_down_data *event_data)
1592 {
1593 	struct icnss_uevent_fw_down_data fw_down_data = {0};
1594 
1595 	icnss_pr_dbg("FW crashed, state: 0x%lx\n", priv->state);
1596 
1597 	set_bit(ICNSS_PD_RESTART, &priv->state);
1598 
1599 	icnss_pm_stay_awake(priv);
1600 
1601 	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state) &&
1602 	    test_bit(ICNSS_FW_READY, &priv->state)) {
1603 		clear_bit(ICNSS_FW_READY, &priv->state);
1604 		fw_down_data.crashed = true;
1605 		icnss_call_driver_uevent(priv,
1606 					 ICNSS_UEVENT_FW_DOWN,
1607 					 &fw_down_data);
1608 	}
1609 
1610 	if (event_data && event_data->fw_rejuvenate)
1611 		wlfw_rejuvenate_ack_send_sync_msg(priv);
1612 
1613 	return 0;
1614 }
1615 
1616 int icnss_update_hang_event_data(struct icnss_priv *priv,
1617 				 struct icnss_uevent_hang_data *hang_data)
1618 {
1619 	if (!priv->hang_event_data_va)
1620 		return -EINVAL;
1621 
1622 	priv->hang_event_data = kmemdup(priv->hang_event_data_va,
1623 					priv->hang_event_data_len,
1624 					GFP_ATOMIC);
1625 	if (!priv->hang_event_data)
1626 		return -ENOMEM;
1627 
1628 	// Update the hang event params
1629 	hang_data->hang_event_data = priv->hang_event_data;
1630 	hang_data->hang_event_data_len = priv->hang_event_data_len;
1631 
1632 	return 0;
1633 }
1634 
1635 int icnss_send_hang_event_data(struct icnss_priv *priv)
1636 {
1637 	struct icnss_uevent_hang_data hang_data = {0};
1638 	int ret = 0xFF;
1639 
1640 	if (priv->early_crash_ind) {
1641 		ret = icnss_update_hang_event_data(priv, &hang_data);
1642 		if (ret)
1643 			icnss_pr_err("Unable to allocate memory for Hang event data\n");
1644 	}
1645 	icnss_call_driver_uevent(priv, ICNSS_UEVENT_HANG_DATA,
1646 				 &hang_data);
1647 
1648 	if (!ret) {
1649 		kfree(priv->hang_event_data);
1650 		priv->hang_event_data = NULL;
1651 	}
1652 
1653 	return 0;
1654 }
1655 
1656 static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
1657 					      void *data)
1658 {
1659 	struct icnss_event_pd_service_down_data *event_data = data;
1660 
1661 	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
1662 		icnss_ignore_fw_timeout(false);
1663 		goto out;
1664 	}
1665 
1666 	if (priv->force_err_fatal)
1667 		ICNSS_ASSERT(0);
1668 
1669 	if (priv->device_id == WCN6750_DEVICE_ID ||
1670 	    priv->device_id == WCN6450_DEVICE_ID) {
1671 		icnss_send_smp2p(priv, ICNSS_RESET_MSG,
1672 				 ICNSS_SMP2P_OUT_SOC_WAKE);
1673 		icnss_send_smp2p(priv, ICNSS_RESET_MSG,
1674 				 ICNSS_SMP2P_OUT_EP_POWER_SAVE);
1675 	}
1676 
1677 	if (priv->wpss_supported)
1678 		icnss_send_smp2p(priv, ICNSS_RESET_MSG,
1679 				 ICNSS_SMP2P_OUT_POWER_SAVE);
1680 
1681 	icnss_send_hang_event_data(priv);
1682 
1683 	if (priv->early_crash_ind) {
1684 		icnss_pr_dbg("PD Down ignored as early indication is processed: %d, state: 0x%lx\n",
1685 			     event_data->crashed, priv->state);
1686 		goto out;
1687 	}
1688 
1689 	if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
1690 		icnss_fatal_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
1691 				event_data->crashed, priv->state);
1692 		if (!priv->allow_recursive_recovery)
1693 			ICNSS_ASSERT(0);
1694 		goto out;
1695 	}
1696 
1697 	if (!test_bit(ICNSS_PD_RESTART, &priv->state))
1698 		icnss_fw_crashed(priv, event_data);
1699 
1700 out:
1701 	kfree(data);
1702 
1703 	return 0;
1704 }
1705 
1706 static int icnss_driver_event_early_crash_ind(struct icnss_priv *priv,
1707 					      void *data)
1708 {
1709 	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
1710 		icnss_ignore_fw_timeout(false);
1711 		goto out;
1712 	}
1713 
1714 	priv->early_crash_ind = true;
1715 	icnss_fw_crashed(priv, NULL);
1716 
1717 out:
1718 	kfree(data);
1719 
1720 	return 0;
1721 }
1722 
1723 static int icnss_driver_event_idle_shutdown(struct icnss_priv *priv,
1724 					    void *data)
1725 {
1726 	int ret = 0;
1727 
1728 	if (!priv->ops || !priv->ops->idle_shutdown)
1729 		return 0;
1730 
1731 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
1732 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
1733 		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown callback\n");
1734 		ret = -EBUSY;
1735 	} else {
1736 		icnss_pr_dbg("Calling driver idle shutdown, state: 0x%lx\n",
1737 								priv->state);
1738 		icnss_block_shutdown(true);
1739 		ret = priv->ops->idle_shutdown(&priv->pdev->dev);
1740 		icnss_block_shutdown(false);
1741 	}
1742 
1743 	return ret;
1744 }
1745 
1746 static int icnss_driver_event_idle_restart(struct icnss_priv *priv,
1747 					   void *data)
1748 {
1749 	int ret = 0;
1750 
1751 	if (!priv->ops || !priv->ops->idle_restart)
1752 		return 0;
1753 
1754 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
1755 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
1756 		icnss_pr_err("SSR/PDR is already in-progress during idle restart callback\n");
1757 		ret = -EBUSY;
1758 	} else {
1759 		icnss_pr_dbg("Calling driver idle restart, state: 0x%lx\n",
1760 								priv->state);
1761 		icnss_block_shutdown(true);
1762 		ret = priv->ops->idle_restart(&priv->pdev->dev);
1763 		icnss_block_shutdown(false);
1764 	}
1765 
1766 	return ret;
1767 }
1768 
1769 static int icnss_qdss_trace_free_hdlr(struct icnss_priv *priv)
1770 {
1771 	icnss_free_qdss_mem(priv);
1772 
1773 	return 0;
1774 }
1775 
1776 static int icnss_m3_dump_upload_req_hdlr(struct icnss_priv *priv,
1777 					 void *data)
1778 {
1779 	struct icnss_m3_upload_segments_req_data *event_data = data;
1780 	struct qcom_dump_segment segment;
1781 	int i, status = 0, ret = 0;
1782 	struct list_head head;
1783 
1784 	if (!dump_enabled()) {
1785 		icnss_pr_info("Dump collection is not enabled\n");
1786 		return ret;
1787 	}
1788 
1789 	if (IS_ERR_OR_NULL(priv->m3_dump_phyareg) ||
1790 	    IS_ERR_OR_NULL(priv->m3_dump_phydbg) ||
1791 	    IS_ERR_OR_NULL(priv->m3_dump_wmac0reg) ||
1792 	    IS_ERR_OR_NULL(priv->m3_dump_wcssdbg) ||
1793 	    IS_ERR_OR_NULL(priv->m3_dump_phyapdmem))
1794 		return ret;
1795 
1796 	INIT_LIST_HEAD(&head);
1797 
1798 	for (i = 0; i < event_data->no_of_valid_segments; i++) {
1799 		memset(&segment, 0, sizeof(segment));
1800 
1801 		segment.va = devm_ioremap(&priv->pdev->dev,
1802 					  event_data->m3_segment[i].addr,
1803 					  event_data->m3_segment[i].size);
1804 		if (!segment.va) {
1805 			icnss_pr_err("Failed to ioremap M3 Dump region");
1806 			ret = -ENOMEM;
1807 			goto send_resp;
1808 		}
1809 
1810 		segment.size = event_data->m3_segment[i].size;
1811 
1812 		list_add(&segment.node, &head);
1813 		icnss_pr_dbg("Started Dump colletcion for %s segment",
1814 			     event_data->m3_segment[i].name);
1815 
1816 		switch (event_data->m3_segment[i].type) {
1817 		case QMI_M3_SEGMENT_PHYAREG_V01:
1818 			ret = qcom_dump(&head, priv->m3_dump_phyareg->dev);
1819 			break;
1820 		case QMI_M3_SEGMENT_PHYDBG_V01:
1821 			ret = qcom_dump(&head, priv->m3_dump_phydbg->dev);
1822 			break;
1823 		case QMI_M3_SEGMENT_WMAC0_REG_V01:
1824 			ret = qcom_dump(&head, priv->m3_dump_wmac0reg->dev);
1825 			break;
1826 		case QMI_M3_SEGMENT_WCSSDBG_V01:
1827 			ret = qcom_dump(&head, priv->m3_dump_wcssdbg->dev);
1828 			break;
1829 		case QMI_M3_SEGMENT_PHYAPDMEM_V01:
1830 			ret = qcom_dump(&head, priv->m3_dump_phyapdmem->dev);
1831 			break;
1832 		default:
1833 			icnss_pr_err("Invalid Segment type: %d",
1834 				     event_data->m3_segment[i].type);
1835 		}
1836 
1837 		if (ret) {
1838 			status = ret;
1839 			icnss_pr_err("Failed to dump m3 %s segment, err = %d\n",
1840 				     event_data->m3_segment[i].name, ret);
1841 		}
1842 		list_del(&segment.node);
1843 	}
1844 send_resp:
1845 	icnss_wlfw_m3_dump_upload_done_send_sync(priv, event_data->pdev_id,
1846 						 status);
1847 
1848 	return ret;
1849 }
1850 
1851 static int icnss_subsys_restart_level(struct icnss_priv *priv, void *data)
1852 {
1853 	int ret = 0;
1854 	struct icnss_subsys_restart_level_data *event_data = data;
1855 
1856 	if (!priv)
1857 		return -ENODEV;
1858 
1859 	if (!data)
1860 		return -EINVAL;
1861 
1862 	ret = wlfw_subsys_restart_level_msg(priv, event_data->restart_level);
1863 
1864 	kfree(data);
1865 
1866 	return ret;
1867 }
1868 
1869 static void icnss_wpss_self_recovery(struct work_struct *wpss_load_work)
1870 {
1871 	int ret;
1872 	struct icnss_priv *priv = icnss_get_plat_priv();
1873 
1874 	rproc_shutdown(priv->rproc);
1875 	ret = rproc_boot(priv->rproc);
1876 	if (ret) {
1877 		icnss_pr_err("Failed to self recover wpss rproc, ret: %d", ret);
1878 		rproc_put(priv->rproc);
1879 	}
1880 }
1881 
1882 static void icnss_driver_event_work(struct work_struct *work)
1883 {
1884 	struct icnss_priv *priv =
1885 		container_of(work, struct icnss_priv, event_work);
1886 	struct icnss_driver_event *event;
1887 	unsigned long flags;
1888 	int ret;
1889 
1890 	icnss_pm_stay_awake(priv);
1891 
1892 	spin_lock_irqsave(&priv->event_lock, flags);
1893 
1894 	while (!list_empty(&priv->event_list)) {
1895 		event = list_first_entry(&priv->event_list,
1896 					 struct icnss_driver_event, list);
1897 		list_del(&event->list);
1898 		spin_unlock_irqrestore(&priv->event_lock, flags);
1899 
1900 		icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
1901 			     icnss_driver_event_to_str(event->type),
1902 			     event->sync ? "-sync" : "", event->type,
1903 			     priv->state);
1904 
1905 		switch (event->type) {
1906 		case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
1907 			ret = icnss_driver_event_server_arrive(priv,
1908 								 event->data);
1909 			break;
1910 		case ICNSS_DRIVER_EVENT_SERVER_EXIT:
1911 			ret = icnss_driver_event_server_exit(priv);
1912 			break;
1913 		case ICNSS_DRIVER_EVENT_FW_READY_IND:
1914 			ret = icnss_driver_event_fw_ready_ind(priv,
1915 								 event->data);
1916 			break;
1917 		case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
1918 			ret = icnss_driver_event_register_driver(priv,
1919 								 event->data);
1920 			break;
1921 		case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
1922 			ret = icnss_driver_event_unregister_driver(priv,
1923 								   event->data);
1924 			break;
1925 		case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
1926 			ret = icnss_driver_event_pd_service_down(priv,
1927 								 event->data);
1928 			break;
1929 		case ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND:
1930 			ret = icnss_driver_event_early_crash_ind(priv,
1931 								 event->data);
1932 			break;
1933 		case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
1934 			ret = icnss_driver_event_idle_shutdown(priv,
1935 							       event->data);
1936 			break;
1937 		case ICNSS_DRIVER_EVENT_IDLE_RESTART:
1938 			ret = icnss_driver_event_idle_restart(priv,
1939 							      event->data);
1940 			break;
1941 		case ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND:
1942 			ret = icnss_driver_event_fw_init_done(priv,
1943 							      event->data);
1944 			break;
1945 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
1946 			ret = icnss_qdss_trace_req_mem_hdlr(priv);
1947 			break;
1948 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
1949 			ret = icnss_qdss_trace_save_hdlr(priv,
1950 							 event->data);
1951 			break;
1952 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
1953 			ret = icnss_qdss_trace_free_hdlr(priv);
1954 			break;
1955 		case ICNSS_DRIVER_EVENT_M3_DUMP_UPLOAD_REQ:
1956 			ret = icnss_m3_dump_upload_req_hdlr(priv, event->data);
1957 			break;
1958 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
1959 			ret = icnss_qdss_trace_req_data_hdlr(priv,
1960 							     event->data);
1961 			break;
1962 		case ICNSS_DRIVER_EVENT_SUBSYS_RESTART_LEVEL:
1963 			ret = icnss_subsys_restart_level(priv, event->data);
1964 			break;
1965 		default:
1966 			icnss_pr_err("Invalid Event type: %d", event->type);
1967 			kfree(event);
1968 			continue;
1969 		}
1970 
1971 		priv->stats.events[event->type].processed++;
1972 
1973 		icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
1974 			     icnss_driver_event_to_str(event->type),
1975 			     event->sync ? "-sync" : "", event->type, ret,
1976 			     priv->state);
1977 
1978 		spin_lock_irqsave(&priv->event_lock, flags);
1979 		if (event->sync) {
1980 			event->ret = ret;
1981 			complete(&event->complete);
1982 			continue;
1983 		}
1984 		spin_unlock_irqrestore(&priv->event_lock, flags);
1985 
1986 		kfree(event);
1987 
1988 		spin_lock_irqsave(&priv->event_lock, flags);
1989 	}
1990 	spin_unlock_irqrestore(&priv->event_lock, flags);
1991 
1992 	icnss_pm_relax(priv);
1993 }
1994 
1995 static void icnss_soc_wake_msg_work(struct work_struct *work)
1996 {
1997 	struct icnss_priv *priv =
1998 		container_of(work, struct icnss_priv, soc_wake_msg_work);
1999 	struct icnss_soc_wake_event *event;
2000 	unsigned long flags;
2001 	int ret;
2002 
2003 	icnss_pm_stay_awake(priv);
2004 
2005 	spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
2006 
2007 	while (!list_empty(&priv->soc_wake_msg_list)) {
2008 		event = list_first_entry(&priv->soc_wake_msg_list,
2009 					 struct icnss_soc_wake_event, list);
2010 		list_del(&event->list);
2011 		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
2012 
2013 		icnss_pr_soc_wake("Processing event: %s%s(%d), state: 0x%lx\n",
2014 				  icnss_soc_wake_event_to_str(event->type),
2015 				  event->sync ? "-sync" : "", event->type,
2016 				  priv->state);
2017 
2018 		switch (event->type) {
2019 		case ICNSS_SOC_WAKE_REQUEST_EVENT:
2020 			ret = icnss_event_soc_wake_request(priv,
2021 							   event->data);
2022 			break;
2023 		case ICNSS_SOC_WAKE_RELEASE_EVENT:
2024 			ret = icnss_event_soc_wake_release(priv,
2025 							   event->data);
2026 			break;
2027 		default:
2028 			icnss_pr_err("Invalid Event type: %d", event->type);
2029 			kfree(event);
2030 			continue;
2031 		}
2032 
2033 		priv->stats.soc_wake_events[event->type].processed++;
2034 
2035 		icnss_pr_soc_wake("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
2036 				  icnss_soc_wake_event_to_str(event->type),
2037 				  event->sync ? "-sync" : "", event->type, ret,
2038 				  priv->state);
2039 
2040 		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
2041 		if (event->sync) {
2042 			event->ret = ret;
2043 			complete(&event->complete);
2044 			continue;
2045 		}
2046 		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
2047 
2048 		kfree(event);
2049 
2050 		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
2051 	}
2052 	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
2053 
2054 	icnss_pm_relax(priv);
2055 }
2056 
2057 static int icnss_msa0_ramdump(struct icnss_priv *priv)
2058 {
2059 	int ret = 0;
2060 	struct qcom_dump_segment segment;
2061 	struct icnss_ramdump_info *msa0_dump_dev = priv->msa0_dump_dev;
2062 	struct list_head head;
2063 
2064 	if (!dump_enabled()) {
2065 		icnss_pr_info("Dump collection is not enabled\n");
2066 		return ret;
2067 	}
2068 
2069 	if (IS_ERR_OR_NULL(msa0_dump_dev))
2070 		return ret;
2071 
2072 	INIT_LIST_HEAD(&head);
2073 
2074 	memset(&segment, 0, sizeof(segment));
2075 
2076 	segment.va = priv->msa_va;
2077 	segment.size = priv->msa_mem_size;
2078 
2079 	list_add(&segment.node, &head);
2080 
2081 	if (!msa0_dump_dev->dev) {
2082 		icnss_pr_err("Created Dump Device not found\n");
2083 		return 0;
2084 	}
2085 
2086 	ret = qcom_dump(&head, msa0_dump_dev->dev);
2087 	if (ret) {
2088 		icnss_pr_err("Failed to dump msa0, err = %d\n", ret);
2089 		return ret;
2090 	}
2091 
2092 	list_del(&segment.node);
2093 	return ret;
2094 }
2095 
2096 static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv,
2097 							void *data)
2098 {
2099 	struct qcom_ssr_notify_data *notif = data;
2100 	int ret = 0;
2101 
2102 	if (!notif->crashed) {
2103 		if (atomic_read(&priv->is_shutdown)) {
2104 			atomic_set(&priv->is_shutdown, false);
2105 			if (!test_bit(ICNSS_PD_RESTART, &priv->state) &&
2106 				!test_bit(ICNSS_SHUTDOWN_DONE, &priv->state) &&
2107 				!test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
2108 				clear_bit(ICNSS_FW_READY, &priv->state);
2109 				icnss_driver_event_post(priv,
2110 					  ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
2111 					  ICNSS_EVENT_SYNC_UNINTERRUPTIBLE,
2112 					  NULL);
2113 			}
2114 		}
2115 
2116 		if (test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
2117 			if (!wait_for_completion_timeout(
2118 					&priv->unblock_shutdown,
2119 					msecs_to_jiffies(PROBE_TIMEOUT)))
2120 				icnss_pr_err("modem block shutdown timeout\n");
2121 		}
2122 
2123 		ret = wlfw_send_modem_shutdown_msg(priv);
2124 		if (ret < 0)
2125 			icnss_pr_err("Fail to send modem shutdown Indication %d\n",
2126 				     ret);
2127 	}
2128 }
2129 
2130 static char *icnss_qcom_ssr_notify_state_to_str(enum qcom_ssr_notify_type code)
2131 {
2132 	switch (code) {
2133 	case QCOM_SSR_BEFORE_POWERUP:
2134 		return "BEFORE_POWERUP";
2135 	case QCOM_SSR_AFTER_POWERUP:
2136 		return "AFTER_POWERUP";
2137 	case QCOM_SSR_BEFORE_SHUTDOWN:
2138 		return "BEFORE_SHUTDOWN";
2139 	case QCOM_SSR_AFTER_SHUTDOWN:
2140 		return "AFTER_SHUTDOWN";
2141 	default:
2142 		return "UNKNOWN";
2143 	}
2144 };
2145 
2146 static int icnss_wpss_early_notifier_nb(struct notifier_block *nb,
2147 					unsigned long code,
2148 					void *data)
2149 {
2150 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2151 					       wpss_early_ssr_nb);
2152 
2153 	icnss_pr_vdbg("WPSS-EARLY-Notify: event %s(%lu)\n",
2154 		      icnss_qcom_ssr_notify_state_to_str(code), code);
2155 
2156 	if (code == QCOM_SSR_BEFORE_SHUTDOWN) {
2157 		set_bit(ICNSS_FW_DOWN, &priv->state);
2158 		icnss_ignore_fw_timeout(true);
2159 	}
2160 
2161 	return NOTIFY_DONE;
2162 }
2163 
2164 static int icnss_wpss_notifier_nb(struct notifier_block *nb,
2165 				  unsigned long code,
2166 				  void *data)
2167 {
2168 	struct icnss_event_pd_service_down_data *event_data;
2169 	struct qcom_ssr_notify_data *notif = data;
2170 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2171 					       wpss_ssr_nb);
2172 	struct icnss_uevent_fw_down_data fw_down_data = {0};
2173 
2174 	icnss_pr_vdbg("WPSS-Notify: event %s(%lu)\n",
2175 		      icnss_qcom_ssr_notify_state_to_str(code), code);
2176 
2177 	if (code == QCOM_SSR_AFTER_SHUTDOWN) {
2178 		icnss_pr_info("Collecting msa0 segment dump\n");
2179 		icnss_msa0_ramdump(priv);
2180 		goto out;
2181 	}
2182 
2183 	if (code != QCOM_SSR_BEFORE_SHUTDOWN)
2184 		goto out;
2185 
2186 	if (priv->wpss_self_recovery_enabled)
2187 		del_timer(&priv->wpss_ssr_timer);
2188 
2189 	priv->is_ssr = true;
2190 
2191 	icnss_pr_info("WPSS went down, state: 0x%lx, crashed: %d\n",
2192 		      priv->state, notif->crashed);
2193 
2194 	if (priv->device_id == ADRASTEA_DEVICE_ID)
2195 		icnss_update_state_send_modem_shutdown(priv, data);
2196 
2197 	set_bit(ICNSS_FW_DOWN, &priv->state);
2198 	icnss_ignore_fw_timeout(true);
2199 
2200 	if (notif->crashed)
2201 		priv->stats.recovery.root_pd_crash++;
2202 	else
2203 		priv->stats.recovery.root_pd_shutdown++;
2204 
2205 	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
2206 
2207 	if (event_data == NULL)
2208 		return notifier_from_errno(-ENOMEM);
2209 
2210 	event_data->crashed = notif->crashed;
2211 
2212 	fw_down_data.crashed = !!notif->crashed;
2213 	if (test_bit(ICNSS_FW_READY, &priv->state)) {
2214 		clear_bit(ICNSS_FW_READY, &priv->state);
2215 		fw_down_data.crashed = !!notif->crashed;
2216 		icnss_call_driver_uevent(priv,
2217 					 ICNSS_UEVENT_FW_DOWN,
2218 					 &fw_down_data);
2219 	}
2220 	icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
2221 				ICNSS_EVENT_SYNC, event_data);
2222 
2223 	if (notif->crashed)
2224 		mod_timer(&priv->recovery_timer,
2225 			  jiffies + msecs_to_jiffies(ICNSS_RECOVERY_TIMEOUT));
2226 out:
2227 	icnss_pr_vdbg("Exit %s,state: 0x%lx\n", __func__, priv->state);
2228 	return NOTIFY_OK;
2229 }
2230 
2231 static int icnss_modem_notifier_nb(struct notifier_block *nb,
2232 				  unsigned long code,
2233 				  void *data)
2234 {
2235 	struct icnss_event_pd_service_down_data *event_data;
2236 	struct qcom_ssr_notify_data *notif = data;
2237 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2238 					       modem_ssr_nb);
2239 	struct icnss_uevent_fw_down_data fw_down_data = {0};
2240 
2241 	icnss_pr_vdbg("Modem-Notify: event %s(%lu)\n",
2242 		      icnss_qcom_ssr_notify_state_to_str(code), code);
2243 
2244 	switch (code) {
2245 	case QCOM_SSR_BEFORE_SHUTDOWN:
2246 		if (priv->is_slate_rfa)
2247 			complete(&priv->slate_boot_complete);
2248 
2249 		if (!notif->crashed &&
2250 		    priv->low_power_support) { /* Hibernate */
2251 			if (test_bit(ICNSS_MODE_ON, &priv->state))
2252 				icnss_driver_event_post(
2253 					priv, ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
2254 					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
2255 			set_bit(ICNSS_LOW_POWER, &priv->state);
2256 		}
2257 		break;
2258 	case QCOM_SSR_AFTER_SHUTDOWN:
2259 		/* Collect ramdump only when there was a crash. */
2260 		if (notif->crashed) {
2261 			icnss_pr_info("Collecting msa0 segment dump\n");
2262 			icnss_msa0_ramdump(priv);
2263 		}
2264 
2265 		goto out;
2266 	default:
2267 		goto out;
2268 	}
2269 
2270 	priv->is_ssr = true;
2271 
2272 	if (notif->crashed) {
2273 		priv->stats.recovery.root_pd_crash++;
2274 		priv->root_pd_shutdown = false;
2275 	} else {
2276 		priv->stats.recovery.root_pd_shutdown++;
2277 		priv->root_pd_shutdown = true;
2278 	}
2279 
2280 	icnss_update_state_send_modem_shutdown(priv, data);
2281 
2282 	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
2283 		set_bit(ICNSS_FW_DOWN, &priv->state);
2284 		icnss_ignore_fw_timeout(true);
2285 
2286 		if (test_bit(ICNSS_FW_READY, &priv->state)) {
2287 			clear_bit(ICNSS_FW_READY, &priv->state);
2288 			fw_down_data.crashed = !!notif->crashed;
2289 			icnss_call_driver_uevent(priv,
2290 						 ICNSS_UEVENT_FW_DOWN,
2291 						 &fw_down_data);
2292 		}
2293 		goto out;
2294 	}
2295 
2296 	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
2297 		      priv->state, notif->crashed);
2298 
2299 	set_bit(ICNSS_FW_DOWN, &priv->state);
2300 
2301 	icnss_ignore_fw_timeout(true);
2302 
2303 	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
2304 
2305 	if (event_data == NULL)
2306 		return notifier_from_errno(-ENOMEM);
2307 
2308 	event_data->crashed = notif->crashed;
2309 
2310 	fw_down_data.crashed = !!notif->crashed;
2311 	if (test_bit(ICNSS_FW_READY, &priv->state)) {
2312 		clear_bit(ICNSS_FW_READY, &priv->state);
2313 		fw_down_data.crashed = !!notif->crashed;
2314 		icnss_call_driver_uevent(priv,
2315 					 ICNSS_UEVENT_FW_DOWN,
2316 					 &fw_down_data);
2317 	}
2318 	icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
2319 				ICNSS_EVENT_SYNC, event_data);
2320 
2321 	if (notif->crashed)
2322 		mod_timer(&priv->recovery_timer,
2323 			  jiffies + msecs_to_jiffies(ICNSS_RECOVERY_TIMEOUT));
2324 out:
2325 	icnss_pr_vdbg("Exit %s,state: 0x%lx\n", __func__, priv->state);
2326 	return NOTIFY_OK;
2327 }
2328 
2329 static int icnss_wpss_early_ssr_register_notifier(struct icnss_priv *priv)
2330 {
2331 	int ret = 0;
2332 
2333 	priv->wpss_early_ssr_nb.notifier_call = icnss_wpss_early_notifier_nb;
2334 
2335 	priv->wpss_early_notify_handler =
2336 		qcom_register_early_ssr_notifier("wpss",
2337 						 &priv->wpss_early_ssr_nb);
2338 
2339 	if (IS_ERR(priv->wpss_early_notify_handler)) {
2340 		ret = PTR_ERR(priv->wpss_early_notify_handler);
2341 		icnss_pr_err("WPSS register early notifier failed: %d\n", ret);
2342 	}
2343 
2344 	return ret;
2345 }
2346 
2347 static int icnss_wpss_ssr_register_notifier(struct icnss_priv *priv)
2348 {
2349 	int ret = 0;
2350 
2351 	priv->wpss_ssr_nb.notifier_call = icnss_wpss_notifier_nb;
2352 	/*
2353 	 * Assign priority of icnss wpss notifier callback over IPA
2354 	 * modem notifier callback which is 0
2355 	 */
2356 	priv->wpss_ssr_nb.priority = 1;
2357 
2358 	priv->wpss_notify_handler =
2359 		qcom_register_ssr_notifier("wpss", &priv->wpss_ssr_nb);
2360 
2361 	if (IS_ERR(priv->wpss_notify_handler)) {
2362 		ret = PTR_ERR(priv->wpss_notify_handler);
2363 		icnss_pr_err("WPSS register notifier failed: %d\n", ret);
2364 	}
2365 
2366 	set_bit(ICNSS_SSR_REGISTERED, &priv->state);
2367 
2368 	return ret;
2369 }
2370 
2371 #ifdef SLATE_MODULE_ENABLED
2372 static int icnss_slate_event_notifier_nb(struct notifier_block *nb,
2373 					 unsigned long event, void *data)
2374 {
2375 	icnss_pr_info("Received slate event 0x%x\n", event);
2376 
2377 	if (event == SLATE_STATUS) {
2378 		struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2379 						       seb_nb);
2380 		enum boot_status status = *(enum boot_status *)data;
2381 
2382 		if (status == SLATE_READY) {
2383 			icnss_pr_dbg("Slate ready received, state: 0x%lx\n",
2384 				     priv->state);
2385 			set_bit(ICNSS_SLATE_READY, &priv->state);
2386 			set_bit(ICNSS_SLATE_UP, &priv->state);
2387 			complete(&priv->slate_boot_complete);
2388 		}
2389 	}
2390 
2391 	return NOTIFY_OK;
2392 }
2393 
2394 static int icnss_register_slate_event_notifier(struct icnss_priv *priv)
2395 {
2396 	int ret = 0;
2397 
2398 	priv->seb_nb.notifier_call = icnss_slate_event_notifier_nb;
2399 
2400 	priv->seb_handle = seb_register_for_slate_event(SLATE_STATUS,
2401 							&priv->seb_nb);
2402 	if (IS_ERR_OR_NULL(priv->seb_handle)) {
2403 		ret = priv->seb_handle ? PTR_ERR(priv->seb_handle) : -EINVAL;
2404 		icnss_pr_err("SLATE event register notifier failed: %d\n",
2405 			     ret);
2406 	}
2407 
2408 	return ret;
2409 }
2410 
2411 static int icnss_unregister_slate_event_notifier(struct icnss_priv *priv)
2412 {
2413 	int ret = 0;
2414 
2415 	ret = seb_unregister_for_slate_event(priv->seb_handle, &priv->seb_nb);
2416 	if (ret < 0)
2417 		icnss_pr_err("Slate event unregister failed: %d\n", ret);
2418 
2419 	return ret;
2420 }
2421 
2422 static int icnss_slate_notifier_nb(struct notifier_block *nb,
2423 				   unsigned long code,
2424 				   void *data)
2425 {
2426 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2427 					       slate_ssr_nb);
2428 	int ret = 0;
2429 
2430 	icnss_pr_vdbg("Slate-subsys-notify: event %lu\n", code);
2431 
2432 	if (code == QCOM_SSR_AFTER_POWERUP &&
2433 	    test_bit(ICNSS_SLATE_READY, &priv->state)) {
2434 		set_bit(ICNSS_SLATE_UP, &priv->state);
2435 		complete(&priv->slate_boot_complete);
2436 		icnss_pr_dbg("Slate boot complete, state: 0x%lx\n",
2437 			     priv->state);
2438 	} else if (code == QCOM_SSR_BEFORE_SHUTDOWN &&
2439 		   test_bit(ICNSS_SLATE_UP, &priv->state)) {
2440 		clear_bit(ICNSS_SLATE_UP, &priv->state);
2441 		if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
2442 			icnss_pr_err("PD_RESTART in progress 0x%lx\n",
2443 				     priv->state);
2444 			goto skip_pdr;
2445 		}
2446 
2447 		icnss_pr_dbg("Initiating PDR 0x%lx\n", priv->state);
2448 		ret = icnss_trigger_recovery(&priv->pdev->dev);
2449 		if (ret < 0) {
2450 			icnss_fatal_err("Fail to trigger PDR: ret: %d, state: 0x%lx\n",
2451 					ret, priv->state);
2452 			goto skip_pdr;
2453 		}
2454 	}
2455 
2456 skip_pdr:
2457 	return NOTIFY_OK;
2458 }
2459 
2460 static int icnss_slate_ssr_register_notifier(struct icnss_priv *priv)
2461 {
2462 	int ret = 0;
2463 
2464 	priv->slate_ssr_nb.notifier_call = icnss_slate_notifier_nb;
2465 
2466 	priv->slate_notify_handler =
2467 		qcom_register_ssr_notifier("slatefw", &priv->slate_ssr_nb);
2468 
2469 	if (IS_ERR(priv->slate_notify_handler)) {
2470 		ret = PTR_ERR(priv->slate_notify_handler);
2471 		icnss_pr_err("SLATE register notifier failed: %d\n", ret);
2472 	}
2473 
2474 	set_bit(ICNSS_SLATE_SSR_REGISTERED, &priv->state);
2475 
2476 	return ret;
2477 }
2478 
2479 static int icnss_slate_ssr_unregister_notifier(struct icnss_priv *priv)
2480 {
2481 	if (!test_and_clear_bit(ICNSS_SLATE_SSR_REGISTERED, &priv->state))
2482 		return 0;
2483 
2484 	qcom_unregister_ssr_notifier(priv->slate_notify_handler,
2485 				     &priv->slate_ssr_nb);
2486 	priv->slate_notify_handler = NULL;
2487 
2488 	return 0;
2489 }
2490 #else
2491 static int icnss_register_slate_event_notifier(struct icnss_priv *priv)
2492 {
2493 	return 0;
2494 }
2495 
2496 static int icnss_unregister_slate_event_notifier(struct icnss_priv *priv)
2497 {
2498 	return 0;
2499 }
2500 
2501 static int icnss_slate_ssr_register_notifier(struct icnss_priv *priv)
2502 {
2503 	return 0;
2504 }
2505 
2506 static int icnss_slate_ssr_unregister_notifier(struct icnss_priv *priv)
2507 {
2508 	return 0;
2509 }
2510 #endif
2511 
2512 static int icnss_modem_ssr_register_notifier(struct icnss_priv *priv)
2513 {
2514 	int ret = 0;
2515 
2516 	priv->modem_ssr_nb.notifier_call = icnss_modem_notifier_nb;
2517 	/*
2518 	 * Assign priority of icnss modem notifier callback over IPA
2519 	 * modem notifier callback which is 0
2520 	 */
2521 	priv->modem_ssr_nb.priority = 1;
2522 
2523 	priv->modem_notify_handler =
2524 		qcom_register_ssr_notifier("mpss", &priv->modem_ssr_nb);
2525 
2526 	if (IS_ERR(priv->modem_notify_handler)) {
2527 		ret = PTR_ERR(priv->modem_notify_handler);
2528 		icnss_pr_err("Modem register notifier failed: %d\n", ret);
2529 	}
2530 
2531 	set_bit(ICNSS_SSR_REGISTERED, &priv->state);
2532 
2533 	return ret;
2534 }
2535 
2536 static void icnss_wpss_early_ssr_unregister_notifier(struct icnss_priv *priv)
2537 {
2538 	if (IS_ERR(priv->wpss_early_notify_handler))
2539 		return;
2540 
2541 	qcom_unregister_early_ssr_notifier(priv->wpss_early_notify_handler,
2542 					   &priv->wpss_early_ssr_nb);
2543 	priv->wpss_early_notify_handler = NULL;
2544 }
2545 
2546 static int icnss_wpss_ssr_unregister_notifier(struct icnss_priv *priv)
2547 {
2548 	if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
2549 		return 0;
2550 
2551 	qcom_unregister_ssr_notifier(priv->wpss_notify_handler,
2552 				     &priv->wpss_ssr_nb);
2553 	priv->wpss_notify_handler = NULL;
2554 
2555 	return 0;
2556 }
2557 
2558 static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
2559 {
2560 	if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
2561 		return 0;
2562 
2563 	qcom_unregister_ssr_notifier(priv->modem_notify_handler,
2564 				     &priv->modem_ssr_nb);
2565 	priv->modem_notify_handler = NULL;
2566 
2567 	return 0;
2568 }
2569 
2570 static void icnss_pdr_notifier_cb(int state, char *service_path, void *priv_cb)
2571 {
2572 	struct icnss_priv *priv = priv_cb;
2573 	struct icnss_event_pd_service_down_data *event_data;
2574 	struct icnss_uevent_fw_down_data fw_down_data = {0};
2575 	enum icnss_pdr_cause_index cause = ICNSS_ROOT_PD_CRASH;
2576 
2577 	if (!priv)
2578 		return;
2579 
2580 	icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
2581 		     state, priv->state);
2582 
2583 	switch (state) {
2584 	case SERVREG_SERVICE_STATE_DOWN:
2585 		event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
2586 
2587 		if (!event_data)
2588 			return;
2589 
2590 		event_data->crashed = true;
2591 
2592 		if (!priv->is_ssr) {
2593 			set_bit(ICNSS_PDR, &penv->state);
2594 			if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
2595 				cause = ICNSS_HOST_ERROR;
2596 				priv->stats.recovery.pdr_host_error++;
2597 			} else {
2598 				cause = ICNSS_FW_CRASH;
2599 				priv->stats.recovery.pdr_fw_crash++;
2600 			}
2601 		} else if (priv->root_pd_shutdown) {
2602 			cause = ICNSS_ROOT_PD_SHUTDOWN;
2603 			event_data->crashed = false;
2604 		}
2605 
2606 		icnss_pr_info("PD service down, state: 0x%lx: cause: %s\n",
2607 			      priv->state, icnss_pdr_cause[cause]);
2608 
2609 		if (!test_bit(ICNSS_FW_DOWN, &priv->state)) {
2610 			set_bit(ICNSS_FW_DOWN, &priv->state);
2611 			icnss_ignore_fw_timeout(true);
2612 
2613 			if (test_bit(ICNSS_FW_READY, &priv->state)) {
2614 				clear_bit(ICNSS_FW_READY, &priv->state);
2615 				fw_down_data.crashed = event_data->crashed;
2616 				icnss_call_driver_uevent(priv,
2617 							 ICNSS_UEVENT_FW_DOWN,
2618 							 &fw_down_data);
2619 			}
2620 		}
2621 		clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
2622 
2623 		if (event_data->crashed)
2624 			mod_timer(&priv->recovery_timer,
2625 				  jiffies +
2626 				  msecs_to_jiffies(ICNSS_RECOVERY_TIMEOUT));
2627 
2628 		icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
2629 					ICNSS_EVENT_SYNC, event_data);
2630 		break;
2631 	case SERVREG_SERVICE_STATE_UP:
2632 		clear_bit(ICNSS_FW_DOWN, &priv->state);
2633 		break;
2634 	default:
2635 		break;
2636 	}
2637 	return;
2638 }
2639 
2640 static int icnss_pd_restart_enable(struct icnss_priv *priv)
2641 {
2642 	struct pdr_handle *handle = NULL;
2643 	struct pdr_service *service = NULL;
2644 	int err = 0;
2645 
2646 	handle = pdr_handle_alloc(icnss_pdr_notifier_cb, priv);
2647 	if (IS_ERR_OR_NULL(handle)) {
2648 		err = PTR_ERR(handle);
2649 		icnss_pr_err("Failed to alloc pdr handle, err %d", err);
2650 		goto out;
2651 	}
2652 	service = pdr_add_lookup(handle, ICNSS_WLAN_SERVICE_NAME, ICNSS_WLANPD_NAME);
2653 	if (IS_ERR_OR_NULL(service)) {
2654 		err = PTR_ERR(service);
2655 		icnss_pr_err("Failed to add lookup, err %d", err);
2656 		goto out;
2657 	}
2658 	priv->pdr_handle = handle;
2659 	priv->pdr_service = service;
2660 	set_bit(ICNSS_PDR_REGISTERED, &priv->state);
2661 
2662 	icnss_pr_info("PDR registration happened");
2663 out:
2664 	return err;
2665 }
2666 
2667 static void icnss_pdr_unregister_notifier(struct icnss_priv *priv)
2668 {
2669 	if (!test_and_clear_bit(ICNSS_PDR_REGISTERED, &priv->state))
2670 		return;
2671 
2672 	pdr_handle_release(priv->pdr_handle);
2673 }
2674 
2675 static int icnss_ramdump_devnode_init(struct icnss_priv *priv)
2676 {
2677 	int ret = 0;
2678 
2679 	priv->icnss_ramdump_class = class_create(THIS_MODULE, ICNSS_RAMDUMP_NAME);
2680 	if (IS_ERR_OR_NULL(priv->icnss_ramdump_class)) {
2681 		ret = PTR_ERR(priv->icnss_ramdump_class);
2682 		icnss_pr_err("%s:Class create failed for ramdump devices (%d)\n", __func__, ret);
2683 		return ret;
2684 	}
2685 
2686 	ret = alloc_chrdev_region(&priv->icnss_ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
2687 				  ICNSS_RAMDUMP_NAME);
2688 	if (ret < 0) {
2689 		icnss_pr_err("%s: Unable to allocate major\n", __func__);
2690 		goto fail_alloc_major;
2691 	}
2692 	return 0;
2693 
2694 fail_alloc_major:
2695 	class_destroy(priv->icnss_ramdump_class);
2696 	return ret;
2697 }
2698 
2699 void *icnss_create_ramdump_device(struct icnss_priv *priv, const char *dev_name)
2700 {
2701 	int ret = 0;
2702 	struct icnss_ramdump_info *ramdump_info;
2703 
2704 	ramdump_info = kzalloc(sizeof(*ramdump_info), GFP_KERNEL);
2705 	if (!ramdump_info)
2706 		return ERR_PTR(-ENOMEM);
2707 
2708 	if (!dev_name) {
2709 		icnss_pr_err("%s: Invalid device name.\n", __func__);
2710 		return NULL;
2711 	}
2712 
2713 	snprintf(ramdump_info->name, ARRAY_SIZE(ramdump_info->name), "icnss_%s", dev_name);
2714 
2715 	ramdump_info->minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES, GFP_KERNEL);
2716 	if (ramdump_info->minor < 0) {
2717 		icnss_pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
2718 			     ramdump_info->minor);
2719 		ret = -ENODEV;
2720 		goto fail_out_of_minors;
2721 	}
2722 
2723 	ramdump_info->dev = device_create(priv->icnss_ramdump_class, NULL,
2724 					  MKDEV(MAJOR(priv->icnss_ramdump_dev),
2725 					  ramdump_info->minor),
2726 					  ramdump_info, ramdump_info->name);
2727 	if (IS_ERR_OR_NULL(ramdump_info->dev)) {
2728 		ret = PTR_ERR(ramdump_info->dev);
2729 		icnss_pr_err("%s: Device create failed for %s (%d)\n", __func__,
2730 			     ramdump_info->name, ret);
2731 		goto fail_device_create;
2732 	}
2733 	return (void *)ramdump_info;
2734 
2735 fail_device_create:
2736 	ida_simple_remove(&rd_minor_id, ramdump_info->minor);
2737 fail_out_of_minors:
2738 	kfree(ramdump_info);
2739 	return ERR_PTR(ret);
2740 }
2741 
2742 static int icnss_register_ramdump_devices(struct icnss_priv *priv)
2743 {
2744 	int ret = 0;
2745 
2746 	if (!priv || !priv->pdev) {
2747 		icnss_pr_err("Platform priv or pdev is NULL\n");
2748 		return -EINVAL;
2749 	}
2750 
2751 	ret = icnss_ramdump_devnode_init(priv);
2752 	if (ret)
2753 		return ret;
2754 
2755 	priv->msa0_dump_dev = icnss_create_ramdump_device(priv, "wcss_msa0");
2756 
2757 	if (IS_ERR_OR_NULL(priv->msa0_dump_dev) || !priv->msa0_dump_dev->dev) {
2758 		icnss_pr_err("Failed to create msa0 dump device!");
2759 		return -ENOMEM;
2760 	}
2761 
2762 	if (priv->device_id == WCN6750_DEVICE_ID ||
2763 	    priv->device_id == WCN6450_DEVICE_ID) {
2764 		priv->m3_dump_phyareg = icnss_create_ramdump_device(priv,
2765 						ICNSS_M3_SEGMENT(
2766 						ICNSS_M3_SEGMENT_PHYAREG));
2767 
2768 		if (IS_ERR_OR_NULL(priv->m3_dump_phyareg) ||
2769 		    !priv->m3_dump_phyareg->dev) {
2770 			icnss_pr_err("Failed to create m3 dump for Phyareg segment device!");
2771 			return -ENOMEM;
2772 		}
2773 
2774 		priv->m3_dump_phydbg = icnss_create_ramdump_device(priv,
2775 						ICNSS_M3_SEGMENT(
2776 						ICNSS_M3_SEGMENT_PHYA));
2777 
2778 		if (IS_ERR_OR_NULL(priv->m3_dump_phydbg) ||
2779 		    !priv->m3_dump_phydbg->dev) {
2780 			icnss_pr_err("Failed to create m3 dump for Phydbg segment device!");
2781 			return -ENOMEM;
2782 		}
2783 
2784 		priv->m3_dump_wmac0reg = icnss_create_ramdump_device(priv,
2785 						ICNSS_M3_SEGMENT(
2786 						ICNSS_M3_SEGMENT_WMACREG));
2787 
2788 		if (IS_ERR_OR_NULL(priv->m3_dump_wmac0reg) ||
2789 		    !priv->m3_dump_wmac0reg->dev) {
2790 			icnss_pr_err("Failed to create m3 dump for Wmac0reg segment device!");
2791 			return -ENOMEM;
2792 		}
2793 
2794 		priv->m3_dump_wcssdbg = icnss_create_ramdump_device(priv,
2795 						ICNSS_M3_SEGMENT(
2796 						ICNSS_M3_SEGMENT_WCSSDBG));
2797 
2798 		if (IS_ERR_OR_NULL(priv->m3_dump_wcssdbg) ||
2799 		    !priv->m3_dump_wcssdbg->dev) {
2800 			icnss_pr_err("Failed to create m3 dump for Wcssdbg segment device!");
2801 			return -ENOMEM;
2802 		}
2803 
2804 		priv->m3_dump_phyapdmem = icnss_create_ramdump_device(priv,
2805 						ICNSS_M3_SEGMENT(
2806 						ICNSS_M3_SEGMENT_PHYAM3));
2807 
2808 		if (IS_ERR_OR_NULL(priv->m3_dump_phyapdmem) ||
2809 		    !priv->m3_dump_phyapdmem->dev) {
2810 			icnss_pr_err("Failed to create m3 dump for Phyapdmem segment device!");
2811 			return -ENOMEM;
2812 		}
2813 	}
2814 
2815 	return 0;
2816 }
2817 
2818 static int icnss_enable_recovery(struct icnss_priv *priv)
2819 {
2820 	int ret;
2821 
2822 	if (test_bit(RECOVERY_DISABLE, &priv->ctrl_params.quirks)) {
2823 		icnss_pr_dbg("Recovery disabled through module parameter\n");
2824 		return 0;
2825 	}
2826 
2827 	if (test_bit(PDR_ONLY, &priv->ctrl_params.quirks)) {
2828 		icnss_pr_dbg("SSR disabled through module parameter\n");
2829 		goto enable_pdr;
2830 	}
2831 
2832 	ret = icnss_register_ramdump_devices(priv);
2833 	if (ret)
2834 		return ret;
2835 
2836 	if (priv->wpss_supported) {
2837 		icnss_wpss_early_ssr_register_notifier(priv);
2838 		icnss_wpss_ssr_register_notifier(priv);
2839 		return 0;
2840 	}
2841 
2842 	if (!(priv->rproc_fw_download))
2843 		icnss_modem_ssr_register_notifier(priv);
2844 
2845 	if (priv->is_slate_rfa) {
2846 		icnss_slate_ssr_register_notifier(priv);
2847 		icnss_register_slate_event_notifier(priv);
2848 	}
2849 
2850 	if (test_bit(SSR_ONLY, &priv->ctrl_params.quirks)) {
2851 		icnss_pr_dbg("PDR disabled through module parameter\n");
2852 		return 0;
2853 	}
2854 
2855 enable_pdr:
2856 	ret = icnss_pd_restart_enable(priv);
2857 
2858 	if (ret)
2859 		return ret;
2860 
2861 	return 0;
2862 }
2863 
2864 static int icnss_dev_id_match(struct icnss_priv *priv,
2865 			      struct device_info *dev_info)
2866 {
2867 	while (dev_info->device_id) {
2868 		if (priv->device_id == dev_info->device_id)
2869 			return 1;
2870 		dev_info++;
2871 	}
2872 	return 0;
2873 }
2874 
2875 static int icnss_tcdev_get_max_state(struct thermal_cooling_device *tcdev,
2876 					unsigned long *thermal_state)
2877 {
2878 	struct icnss_thermal_cdev *icnss_tcdev = tcdev->devdata;
2879 
2880 	*thermal_state = icnss_tcdev->max_thermal_state;
2881 
2882 	return 0;
2883 }
2884 
2885 static int icnss_tcdev_get_cur_state(struct thermal_cooling_device *tcdev,
2886 					unsigned long *thermal_state)
2887 {
2888 	struct icnss_thermal_cdev *icnss_tcdev = tcdev->devdata;
2889 
2890 	*thermal_state = icnss_tcdev->curr_thermal_state;
2891 
2892 	return 0;
2893 }
2894 
2895 static int icnss_tcdev_set_cur_state(struct thermal_cooling_device *tcdev,
2896 					unsigned long thermal_state)
2897 {
2898 	struct icnss_thermal_cdev *icnss_tcdev = tcdev->devdata;
2899 	struct device *dev = &penv->pdev->dev;
2900 	int ret = 0;
2901 
2902 
2903 	if (!penv->ops || !penv->ops->set_therm_cdev_state)
2904 		return 0;
2905 
2906 	if (thermal_state > icnss_tcdev->max_thermal_state)
2907 		return -EINVAL;
2908 
2909 	icnss_pr_vdbg("Cooling device set current state: %ld,for cdev id %d",
2910 		      thermal_state, icnss_tcdev->tcdev_id);
2911 
2912 	mutex_lock(&penv->tcdev_lock);
2913 	ret = penv->ops->set_therm_cdev_state(dev, thermal_state,
2914 					      icnss_tcdev->tcdev_id);
2915 	if (!ret)
2916 		icnss_tcdev->curr_thermal_state = thermal_state;
2917 	mutex_unlock(&penv->tcdev_lock);
2918 	if (ret) {
2919 		icnss_pr_err("Setting Current Thermal State Failed: %d,for cdev id %d",
2920 			     ret, icnss_tcdev->tcdev_id);
2921 		return ret;
2922 	}
2923 
2924 	return 0;
2925 }
2926 
2927 static struct thermal_cooling_device_ops icnss_cooling_ops = {
2928 	.get_max_state = icnss_tcdev_get_max_state,
2929 	.get_cur_state = icnss_tcdev_get_cur_state,
2930 	.set_cur_state = icnss_tcdev_set_cur_state,
2931 };
2932 
2933 int icnss_thermal_cdev_register(struct device *dev, unsigned long max_state,
2934 			   int tcdev_id)
2935 {
2936 	struct icnss_priv *priv = dev_get_drvdata(dev);
2937 	struct icnss_thermal_cdev *icnss_tcdev = NULL;
2938 	char cdev_node_name[THERMAL_NAME_LENGTH] = "";
2939 	struct device_node *dev_node;
2940 	int ret = 0;
2941 
2942 	icnss_tcdev = kzalloc(sizeof(*icnss_tcdev), GFP_KERNEL);
2943 	if (!icnss_tcdev)
2944 		return -ENOMEM;
2945 
2946 	icnss_tcdev->tcdev_id = tcdev_id;
2947 	icnss_tcdev->max_thermal_state = max_state;
2948 
2949 	snprintf(cdev_node_name, THERMAL_NAME_LENGTH,
2950 		 "qcom,icnss_cdev%d", tcdev_id);
2951 
2952 	dev_node = of_find_node_by_name(NULL, cdev_node_name);
2953 	if (!dev_node) {
2954 		icnss_pr_err("Failed to get cooling device node\n");
2955 		return -EINVAL;
2956 	}
2957 
2958 	icnss_pr_dbg("tcdev node->name=%s\n", dev_node->name);
2959 
2960 	if (of_find_property(dev_node, "#cooling-cells", NULL)) {
2961 		icnss_tcdev->tcdev = thermal_of_cooling_device_register(
2962 						dev_node,
2963 						cdev_node_name, icnss_tcdev,
2964 						&icnss_cooling_ops);
2965 		if (IS_ERR_OR_NULL(icnss_tcdev->tcdev)) {
2966 			ret = PTR_ERR(icnss_tcdev->tcdev);
2967 			icnss_pr_err("Cooling device register failed: %d, for cdev id %d\n",
2968 				     ret, icnss_tcdev->tcdev_id);
2969 		} else {
2970 			icnss_pr_dbg("Cooling device registered for cdev id %d",
2971 				     icnss_tcdev->tcdev_id);
2972 			list_add(&icnss_tcdev->tcdev_list,
2973 				 &priv->icnss_tcdev_list);
2974 		}
2975 	} else {
2976 		icnss_pr_dbg("Cooling device registration not supported");
2977 		ret = -EOPNOTSUPP;
2978 	}
2979 
2980 	return ret;
2981 }
2982 EXPORT_SYMBOL(icnss_thermal_cdev_register);
2983 
2984 void icnss_thermal_cdev_unregister(struct device *dev, int tcdev_id)
2985 {
2986 	struct icnss_priv *priv = dev_get_drvdata(dev);
2987 	struct icnss_thermal_cdev *icnss_tcdev = NULL;
2988 
2989 	while (!list_empty(&priv->icnss_tcdev_list)) {
2990 		icnss_tcdev = list_first_entry(&priv->icnss_tcdev_list,
2991 					       struct icnss_thermal_cdev,
2992 					       tcdev_list);
2993 		thermal_cooling_device_unregister(icnss_tcdev->tcdev);
2994 		list_del(&icnss_tcdev->tcdev_list);
2995 		kfree(icnss_tcdev);
2996 	}
2997 }
2998 EXPORT_SYMBOL(icnss_thermal_cdev_unregister);
2999 
3000 int icnss_get_curr_therm_cdev_state(struct device *dev,
3001 				    unsigned long *thermal_state,
3002 				    int tcdev_id)
3003 {
3004 	struct icnss_priv *priv = dev_get_drvdata(dev);
3005 	struct icnss_thermal_cdev *icnss_tcdev = NULL;
3006 
3007 	mutex_lock(&priv->tcdev_lock);
3008 	list_for_each_entry(icnss_tcdev, &priv->icnss_tcdev_list, tcdev_list) {
3009 		if (icnss_tcdev->tcdev_id != tcdev_id)
3010 			continue;
3011 
3012 		*thermal_state = icnss_tcdev->curr_thermal_state;
3013 		mutex_unlock(&priv->tcdev_lock);
3014 		icnss_pr_dbg("Cooling device current state: %ld, for cdev id %d",
3015 			     icnss_tcdev->curr_thermal_state, tcdev_id);
3016 		return 0;
3017 	}
3018 	mutex_unlock(&priv->tcdev_lock);
3019 	icnss_pr_dbg("Cooling device ID not found: %d", tcdev_id);
3020 	return -EINVAL;
3021 }
3022 EXPORT_SYMBOL(icnss_get_curr_therm_cdev_state);
3023 
3024 int icnss_qmi_send(struct device *dev, int type, void *cmd,
3025 		  int cmd_len, void *cb_ctx,
3026 		  int (*cb)(void *ctx, void *event, int event_len))
3027 {
3028 	struct icnss_priv *priv = icnss_get_plat_priv();
3029 	int ret;
3030 
3031 	if (!priv)
3032 		return -ENODEV;
3033 
3034 	if (!test_bit(ICNSS_WLFW_CONNECTED, &priv->state))
3035 		return -EINVAL;
3036 
3037 	priv->get_info_cb = cb;
3038 	priv->get_info_cb_ctx = cb_ctx;
3039 
3040 	ret = icnss_wlfw_get_info_send_sync(priv, type, cmd, cmd_len);
3041 	if (ret) {
3042 		priv->get_info_cb = NULL;
3043 		priv->get_info_cb_ctx = NULL;
3044 	}
3045 
3046 	return ret;
3047 }
3048 EXPORT_SYMBOL(icnss_qmi_send);
3049 
3050 int __icnss_register_driver(struct icnss_driver_ops *ops,
3051 			    struct module *owner, const char *mod_name)
3052 {
3053 	int ret = 0;
3054 	struct icnss_priv *priv = icnss_get_plat_priv();
3055 
3056 	if (!priv || !priv->pdev) {
3057 		ret = -ENODEV;
3058 		goto out;
3059 	}
3060 
3061 	icnss_pr_dbg("Registering driver, state: 0x%lx\n", priv->state);
3062 
3063 	if (priv->ops) {
3064 		icnss_pr_err("Driver already registered\n");
3065 		ret = -EEXIST;
3066 		goto out;
3067 	}
3068 
3069 	if (!ops->dev_info) {
3070 		icnss_pr_err("WLAN driver devinfo is null, Reject wlan driver loading");
3071 		return -EINVAL;
3072 	}
3073 
3074 	if (!icnss_dev_id_match(priv, ops->dev_info)) {
3075 		icnss_pr_err("WLAN driver dev name is %s, not supported by platform driver\n",
3076 			     ops->dev_info->name);
3077 		return -ENODEV;
3078 	}
3079 
3080 	if (!ops->probe || !ops->remove) {
3081 		ret = -EINVAL;
3082 		goto out;
3083 	}
3084 
3085 	ret = icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
3086 				      0, ops);
3087 
3088 	if (ret == -EINTR)
3089 		ret = 0;
3090 
3091 out:
3092 	return ret;
3093 }
3094 EXPORT_SYMBOL(__icnss_register_driver);
3095 
3096 int icnss_unregister_driver(struct icnss_driver_ops *ops)
3097 {
3098 	int ret;
3099 	struct icnss_priv *priv = icnss_get_plat_priv();
3100 
3101 	if (!priv || !priv->pdev) {
3102 		ret = -ENODEV;
3103 		goto out;
3104 	}
3105 
3106 	icnss_pr_dbg("Unregistering driver, state: 0x%lx\n", priv->state);
3107 
3108 	if (!priv->ops) {
3109 		icnss_pr_err("Driver not registered\n");
3110 		ret = -ENOENT;
3111 		goto out;
3112 	}
3113 
3114 	ret = icnss_driver_event_post(priv,
3115 					 ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3116 				      ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
3117 out:
3118 	return ret;
3119 }
3120 EXPORT_SYMBOL(icnss_unregister_driver);
3121 
3122 static struct icnss_msi_config msi_config_wcn6750 = {
3123 	.total_vectors = 28,
3124 	.total_users = 2,
3125 	.users = (struct icnss_msi_user[]) {
3126 		{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
3127 		{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
3128 	},
3129 };
3130 
3131 static struct icnss_msi_config msi_config_wcn6450 = {
3132 	.total_vectors = 10,
3133 	.total_users = 1,
3134 	.users = (struct icnss_msi_user[]) {
3135 		{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
3136 	},
3137 };
3138 
3139 static int icnss_get_msi_assignment(struct icnss_priv *priv)
3140 {
3141 	if (priv->device_id == WCN6750_DEVICE_ID)
3142 		priv->msi_config = &msi_config_wcn6750;
3143 	else
3144 		priv->msi_config = &msi_config_wcn6450;
3145 
3146 	return 0;
3147 }
3148 
3149 int icnss_get_user_msi_assignment(struct device *dev, char *user_name,
3150 				 int *num_vectors, u32 *user_base_data,
3151 				 u32 *base_vector)
3152 {
3153 	struct icnss_priv *priv = dev_get_drvdata(dev);
3154 	struct icnss_msi_config *msi_config;
3155 	int idx;
3156 
3157 	if (!priv)
3158 		return -ENODEV;
3159 
3160 	msi_config = priv->msi_config;
3161 	if (!msi_config) {
3162 		icnss_pr_err("MSI is not supported.\n");
3163 		return -EINVAL;
3164 	}
3165 
3166 	for (idx = 0; idx < msi_config->total_users; idx++) {
3167 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
3168 			*num_vectors = msi_config->users[idx].num_vectors;
3169 			*user_base_data = msi_config->users[idx].base_vector
3170 				+ priv->msi_base_data;
3171 			*base_vector = msi_config->users[idx].base_vector;
3172 
3173 			icnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
3174 				    user_name, *num_vectors, *user_base_data,
3175 				    *base_vector);
3176 
3177 			return 0;
3178 		}
3179 	}
3180 
3181 	icnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
3182 
3183 	return -EINVAL;
3184 }
3185 EXPORT_SYMBOL(icnss_get_user_msi_assignment);
3186 
3187 int icnss_get_msi_irq(struct device *dev, unsigned int vector)
3188 {
3189 	struct icnss_priv *priv = dev_get_drvdata(dev);
3190 	int irq_num;
3191 
3192 	irq_num = priv->srng_irqs[vector];
3193 	icnss_pr_dbg("Get IRQ number %d for vector index %d\n",
3194 		     irq_num, vector);
3195 
3196 	return irq_num;
3197 }
3198 EXPORT_SYMBOL(icnss_get_msi_irq);
3199 
3200 void icnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
3201 			   u32 *msi_addr_high)
3202 {
3203 	struct icnss_priv *priv = dev_get_drvdata(dev);
3204 
3205 	*msi_addr_low = lower_32_bits(priv->msi_addr_iova);
3206 	*msi_addr_high = upper_32_bits(priv->msi_addr_iova);
3207 
3208 }
3209 EXPORT_SYMBOL(icnss_get_msi_address);
3210 
3211 int icnss_ce_request_irq(struct device *dev, unsigned int ce_id,
3212 	irqreturn_t (*handler)(int, void *),
3213 		unsigned long flags, const char *name, void *ctx)
3214 {
3215 	int ret = 0;
3216 	unsigned int irq;
3217 	struct ce_irq_list *irq_entry;
3218 	struct icnss_priv *priv = dev_get_drvdata(dev);
3219 
3220 	if (!priv || !priv->pdev) {
3221 		ret = -ENODEV;
3222 		goto out;
3223 	}
3224 
3225 	icnss_pr_vdbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, priv->state);
3226 
3227 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3228 		icnss_pr_err("Invalid CE ID, ce_id: %d\n", ce_id);
3229 		ret = -EINVAL;
3230 		goto out;
3231 	}
3232 	irq = priv->ce_irqs[ce_id];
3233 	irq_entry = &priv->ce_irq_list[ce_id];
3234 
3235 	if (irq_entry->handler || irq_entry->irq) {
3236 		icnss_pr_err("IRQ already requested: %d, ce_id: %d\n",
3237 			     irq, ce_id);
3238 		ret = -EEXIST;
3239 		goto out;
3240 	}
3241 
3242 	ret = request_irq(irq, handler, flags, name, ctx);
3243 	if (ret) {
3244 		icnss_pr_err("IRQ request failed: %d, ce_id: %d, ret: %d\n",
3245 			     irq, ce_id, ret);
3246 		goto out;
3247 	}
3248 	irq_entry->irq = irq;
3249 	irq_entry->handler = handler;
3250 
3251 	icnss_pr_vdbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
3252 
3253 	penv->stats.ce_irqs[ce_id].request++;
3254 out:
3255 	return ret;
3256 }
3257 EXPORT_SYMBOL(icnss_ce_request_irq);
3258 
3259 int icnss_ce_free_irq(struct device *dev, unsigned int ce_id, void *ctx)
3260 {
3261 	int ret = 0;
3262 	unsigned int irq;
3263 	struct ce_irq_list *irq_entry;
3264 
3265 	if (!penv || !penv->pdev || !dev) {
3266 		ret = -ENODEV;
3267 		goto out;
3268 	}
3269 
3270 	icnss_pr_vdbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
3271 
3272 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3273 		icnss_pr_err("Invalid CE ID to free, ce_id: %d\n", ce_id);
3274 		ret = -EINVAL;
3275 		goto out;
3276 	}
3277 
3278 	irq = penv->ce_irqs[ce_id];
3279 	irq_entry = &penv->ce_irq_list[ce_id];
3280 	if (!irq_entry->handler || !irq_entry->irq) {
3281 		icnss_pr_err("IRQ not requested: %d, ce_id: %d\n", irq, ce_id);
3282 		ret = -EEXIST;
3283 		goto out;
3284 	}
3285 	free_irq(irq, ctx);
3286 	irq_entry->irq = 0;
3287 	irq_entry->handler = NULL;
3288 
3289 	penv->stats.ce_irqs[ce_id].free++;
3290 out:
3291 	return ret;
3292 }
3293 EXPORT_SYMBOL(icnss_ce_free_irq);
3294 
3295 void icnss_enable_irq(struct device *dev, unsigned int ce_id)
3296 {
3297 	unsigned int irq;
3298 
3299 	if (!penv || !penv->pdev || !dev) {
3300 		icnss_pr_err("Platform driver not initialized\n");
3301 		return;
3302 	}
3303 
3304 	icnss_pr_vdbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
3305 		     penv->state);
3306 
3307 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3308 		icnss_pr_err("Invalid CE ID to enable IRQ, ce_id: %d\n", ce_id);
3309 		return;
3310 	}
3311 
3312 	penv->stats.ce_irqs[ce_id].enable++;
3313 
3314 	irq = penv->ce_irqs[ce_id];
3315 	enable_irq(irq);
3316 }
3317 EXPORT_SYMBOL(icnss_enable_irq);
3318 
3319 void icnss_disable_irq(struct device *dev, unsigned int ce_id)
3320 {
3321 	unsigned int irq;
3322 
3323 	if (!penv || !penv->pdev || !dev) {
3324 		icnss_pr_err("Platform driver not initialized\n");
3325 		return;
3326 	}
3327 
3328 	icnss_pr_vdbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
3329 		     penv->state);
3330 
3331 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3332 		icnss_pr_err("Invalid CE ID to disable IRQ, ce_id: %d\n",
3333 			     ce_id);
3334 		return;
3335 	}
3336 
3337 	irq = penv->ce_irqs[ce_id];
3338 	disable_irq(irq);
3339 
3340 	penv->stats.ce_irqs[ce_id].disable++;
3341 }
3342 EXPORT_SYMBOL(icnss_disable_irq);
3343 
3344 int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
3345 {
3346 	char *fw_build_timestamp = NULL;
3347 	struct icnss_priv *priv = dev_get_drvdata(dev);
3348 
3349 	if (!priv) {
3350 		icnss_pr_err("Platform driver not initialized\n");
3351 		return -EINVAL;
3352 	}
3353 
3354 	info->v_addr = priv->mem_base_va;
3355 	info->p_addr = priv->mem_base_pa;
3356 	info->chip_id = priv->chip_info.chip_id;
3357 	info->chip_family = priv->chip_info.chip_family;
3358 	info->board_id = priv->board_id;
3359 	info->soc_id = priv->soc_id;
3360 	info->fw_version = priv->fw_version_info.fw_version;
3361 	fw_build_timestamp = priv->fw_version_info.fw_build_timestamp;
3362 	fw_build_timestamp[WLFW_MAX_TIMESTAMP_LEN] = '\0';
3363 	strlcpy(info->fw_build_timestamp,
3364 		priv->fw_version_info.fw_build_timestamp,
3365 		WLFW_MAX_TIMESTAMP_LEN + 1);
3366 	strlcpy(info->fw_build_id, priv->fw_build_id,
3367 	        ICNSS_WLFW_MAX_BUILD_ID_LEN + 1);
3368 	info->rd_card_chain_cap = priv->rd_card_chain_cap;
3369 	info->phy_he_channel_width_cap = priv->phy_he_channel_width_cap;
3370 	info->phy_qam_cap = priv->phy_qam_cap;
3371 
3372 	return 0;
3373 }
3374 EXPORT_SYMBOL(icnss_get_soc_info);
3375 
3376 int icnss_get_mhi_state(struct device *dev)
3377 {
3378 	struct icnss_priv *priv = dev_get_drvdata(dev);
3379 
3380 	if (!priv) {
3381 		icnss_pr_err("Platform driver not initialized\n");
3382 		return -EINVAL;
3383 	}
3384 
3385 	if (!priv->mhi_state_info_va)
3386 		return -ENOMEM;
3387 
3388 	return ioread32(priv->mhi_state_info_va);
3389 }
3390 EXPORT_SYMBOL(icnss_get_mhi_state);
3391 
3392 int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode)
3393 {
3394 	int ret;
3395 	struct icnss_priv *priv;
3396 
3397 	if (!dev)
3398 		return -ENODEV;
3399 
3400 	priv = dev_get_drvdata(dev);
3401 
3402 	if (!priv) {
3403 		icnss_pr_err("Platform driver not initialized\n");
3404 		return -EINVAL;
3405 	}
3406 
3407 	if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
3408 	    !test_bit(ICNSS_FW_READY, &penv->state)) {
3409 		icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
3410 			     priv->state);
3411 		return -EINVAL;
3412 	}
3413 
3414 	icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
3415 
3416 	ret = wlfw_ini_send_sync_msg(priv, fw_log_mode);
3417 	if (ret)
3418 		icnss_pr_err("Fail to send ini, ret = %d, fw_log_mode: %u\n",
3419 			     ret, fw_log_mode);
3420 	return ret;
3421 }
3422 EXPORT_SYMBOL(icnss_set_fw_log_mode);
3423 
3424 int icnss_force_wake_request(struct device *dev)
3425 {
3426 	struct icnss_priv *priv;
3427 
3428 	if (!dev)
3429 		return -ENODEV;
3430 
3431 	priv = dev_get_drvdata(dev);
3432 
3433 	if (!priv) {
3434 		icnss_pr_err("Platform driver not initialized\n");
3435 		return -EINVAL;
3436 	}
3437 
3438 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
3439 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
3440 		icnss_pr_soc_wake("FW down, ignoring SOC Wake request state: 0x%lx\n",
3441 				  priv->state);
3442 		return -EINVAL;
3443 	}
3444 
3445 	if (atomic_inc_not_zero(&priv->soc_wake_ref_count)) {
3446 		icnss_pr_soc_wake("SOC already awake, Ref count: %d",
3447 				  atomic_read(&priv->soc_wake_ref_count));
3448 		return 0;
3449 	}
3450 
3451 	icnss_pr_soc_wake("Calling SOC Wake request");
3452 
3453 	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_REQUEST_EVENT,
3454 				  0, NULL);
3455 
3456 	return 0;
3457 }
3458 EXPORT_SYMBOL(icnss_force_wake_request);
3459 
3460 int icnss_force_wake_release(struct device *dev)
3461 {
3462 	struct icnss_priv *priv;
3463 
3464 	if (!dev)
3465 		return -ENODEV;
3466 
3467 	priv = dev_get_drvdata(dev);
3468 
3469 	if (!priv) {
3470 		icnss_pr_err("Platform driver not initialized\n");
3471 		return -EINVAL;
3472 	}
3473 
3474 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
3475 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
3476 		icnss_pr_soc_wake("FW down, ignoring SOC Wake release state: 0x%lx\n",
3477 				  priv->state);
3478 		return -EINVAL;
3479 	}
3480 
3481 	icnss_pr_soc_wake("Calling SOC Wake response");
3482 
3483 	if (atomic_read(&priv->soc_wake_ref_count) &&
3484 	    icnss_atomic_dec_if_greater_one(&priv->soc_wake_ref_count)) {
3485 		icnss_pr_soc_wake("SOC previous release pending, Ref count: %d",
3486 				  atomic_read(&priv->soc_wake_ref_count));
3487 		return 0;
3488 	}
3489 
3490 	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_RELEASE_EVENT,
3491 				  0, NULL);
3492 
3493 	return 0;
3494 }
3495 EXPORT_SYMBOL(icnss_force_wake_release);
3496 
3497 int icnss_is_device_awake(struct device *dev)
3498 {
3499 	struct icnss_priv *priv = dev_get_drvdata(dev);
3500 
3501 	if (!priv) {
3502 		icnss_pr_err("Platform driver not initialized\n");
3503 		return -EINVAL;
3504 	}
3505 
3506 	return atomic_read(&priv->soc_wake_ref_count);
3507 }
3508 EXPORT_SYMBOL(icnss_is_device_awake);
3509 
3510 int icnss_is_pci_ep_awake(struct device *dev)
3511 {
3512 	struct icnss_priv *priv = dev_get_drvdata(dev);
3513 
3514 	if (!priv) {
3515 		icnss_pr_err("Platform driver not initialized\n");
3516 		return -EINVAL;
3517 	}
3518 
3519 	if (!priv->mhi_state_info_va)
3520 		return -ENOMEM;
3521 
3522 	return ioread32(priv->mhi_state_info_va + ICNSS_PCI_EP_WAKE_OFFSET);
3523 }
3524 EXPORT_SYMBOL(icnss_is_pci_ep_awake);
3525 
3526 int icnss_athdiag_read(struct device *dev, uint32_t offset,
3527 		       uint32_t mem_type, uint32_t data_len,
3528 		       uint8_t *output)
3529 {
3530 	int ret = 0;
3531 	struct icnss_priv *priv = dev_get_drvdata(dev);
3532 
3533 	if (priv->magic != ICNSS_MAGIC) {
3534 		icnss_pr_err("Invalid drvdata for diag read: dev %pK, data %pK, magic 0x%x\n",
3535 			     dev, priv, priv->magic);
3536 		return -EINVAL;
3537 	}
3538 
3539 	if (!output || data_len == 0
3540 	    || data_len > WLFW_MAX_DATA_SIZE) {
3541 		icnss_pr_err("Invalid parameters for diag read: output %pK, data_len %u\n",
3542 			     output, data_len);
3543 		ret = -EINVAL;
3544 		goto out;
3545 	}
3546 
3547 	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
3548 	    !test_bit(ICNSS_POWER_ON, &priv->state)) {
3549 		icnss_pr_err("Invalid state for diag read: 0x%lx\n",
3550 			     priv->state);
3551 		ret = -EINVAL;
3552 		goto out;
3553 	}
3554 
3555 	ret = wlfw_athdiag_read_send_sync_msg(priv, offset, mem_type,
3556 					      data_len, output);
3557 out:
3558 	return ret;
3559 }
3560 EXPORT_SYMBOL(icnss_athdiag_read);
3561 
3562 int icnss_athdiag_write(struct device *dev, uint32_t offset,
3563 			uint32_t mem_type, uint32_t data_len,
3564 			uint8_t *input)
3565 {
3566 	int ret = 0;
3567 	struct icnss_priv *priv = dev_get_drvdata(dev);
3568 
3569 	if (priv->magic != ICNSS_MAGIC) {
3570 		icnss_pr_err("Invalid drvdata for diag write: dev %pK, data %pK, magic 0x%x\n",
3571 			     dev, priv, priv->magic);
3572 		return -EINVAL;
3573 	}
3574 
3575 	if (!input || data_len == 0
3576 	    || data_len > WLFW_MAX_DATA_SIZE) {
3577 		icnss_pr_err("Invalid parameters for diag write: input %pK, data_len %u\n",
3578 			     input, data_len);
3579 		ret = -EINVAL;
3580 		goto out;
3581 	}
3582 
3583 	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
3584 	    !test_bit(ICNSS_POWER_ON, &priv->state)) {
3585 		icnss_pr_err("Invalid state for diag write: 0x%lx\n",
3586 			     priv->state);
3587 		ret = -EINVAL;
3588 		goto out;
3589 	}
3590 
3591 	ret = wlfw_athdiag_write_send_sync_msg(priv, offset, mem_type,
3592 					       data_len, input);
3593 out:
3594 	return ret;
3595 }
3596 EXPORT_SYMBOL(icnss_athdiag_write);
3597 
3598 int icnss_wlan_enable(struct device *dev, struct icnss_wlan_enable_cfg *config,
3599 		      enum icnss_driver_mode mode,
3600 		      const char *host_version)
3601 {
3602 	struct icnss_priv *priv = dev_get_drvdata(dev);
3603 	int temp = 0, ret = 0;
3604 
3605 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
3606 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
3607 		icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
3608 			     priv->state);
3609 		return -EINVAL;
3610 	}
3611 
3612 	if (test_bit(ICNSS_MODE_ON, &priv->state)) {
3613 		icnss_pr_err("Already Mode on, ignoring wlan_enable state: 0x%lx\n",
3614 			     priv->state);
3615 		return -EINVAL;
3616 	}
3617 
3618 	if (priv->wpss_supported &&
3619 	    !priv->dms.nv_mac_not_prov && !priv->dms.mac_valid)
3620 		icnss_setup_dms_mac(priv);
3621 
3622 	if (priv->device_id == WCN6750_DEVICE_ID) {
3623 		if (!icnss_get_temperature(priv, &temp)) {
3624 			icnss_pr_dbg("Temperature: %d\n", temp);
3625 			if (temp < WLAN_EN_TEMP_THRESHOLD)
3626 				icnss_set_wlan_en_delay(priv);
3627 		}
3628 	}
3629 
3630 	if (priv->device_id == WCN6450_DEVICE_ID)
3631 		icnss_hw_power_off(priv);
3632 
3633 	ret = icnss_send_wlan_enable_to_fw(priv, config, mode, host_version);
3634 
3635 	if (priv->device_id == WCN6450_DEVICE_ID)
3636 		icnss_hw_power_on(priv);
3637 
3638 	return ret;
3639 }
3640 EXPORT_SYMBOL(icnss_wlan_enable);
3641 
3642 int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode)
3643 {
3644 	struct icnss_priv *priv = dev_get_drvdata(dev);
3645 
3646 	if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
3647 		icnss_pr_dbg("FW down, ignoring wlan_disable state: 0x%lx\n",
3648 			     priv->state);
3649 		return 0;
3650 	}
3651 
3652 	return icnss_send_wlan_disable_to_fw(priv);
3653 }
3654 EXPORT_SYMBOL(icnss_wlan_disable);
3655 
3656 bool icnss_is_qmi_disable(struct device *dev)
3657 {
3658 	return test_bit(SKIP_QMI, &penv->ctrl_params.quirks) ? true : false;
3659 }
3660 EXPORT_SYMBOL(icnss_is_qmi_disable);
3661 
3662 int icnss_get_ce_id(struct device *dev, int irq)
3663 {
3664 	int i;
3665 
3666 	if (!penv || !penv->pdev || !dev)
3667 		return -ENODEV;
3668 
3669 	for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
3670 		if (penv->ce_irqs[i] == irq)
3671 			return i;
3672 	}
3673 
3674 	icnss_pr_err("No matching CE id for irq %d\n", irq);
3675 
3676 	return -EINVAL;
3677 }
3678 EXPORT_SYMBOL(icnss_get_ce_id);
3679 
3680 int icnss_get_irq(struct device *dev, int ce_id)
3681 {
3682 	int irq;
3683 
3684 	if (!penv || !penv->pdev || !dev)
3685 		return -ENODEV;
3686 
3687 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS)
3688 		return -EINVAL;
3689 
3690 	irq = penv->ce_irqs[ce_id];
3691 
3692 	return irq;
3693 }
3694 EXPORT_SYMBOL(icnss_get_irq);
3695 
3696 struct iommu_domain *icnss_smmu_get_domain(struct device *dev)
3697 {
3698 	struct icnss_priv *priv = dev_get_drvdata(dev);
3699 
3700 	if (!priv) {
3701 		icnss_pr_err("Invalid drvdata: dev %pK\n", dev);
3702 		return NULL;
3703 	}
3704 	return priv->iommu_domain;
3705 }
3706 EXPORT_SYMBOL(icnss_smmu_get_domain);
3707 
3708 int icnss_smmu_map(struct device *dev,
3709 		   phys_addr_t paddr, uint32_t *iova_addr, size_t size)
3710 {
3711 	struct icnss_priv *priv = dev_get_drvdata(dev);
3712 	int flag = IOMMU_READ | IOMMU_WRITE;
3713 	bool dma_coherent = false;
3714 	unsigned long iova;
3715 	int prop_len = 0;
3716 	size_t len;
3717 	int ret = 0;
3718 
3719 	if (!priv) {
3720 		icnss_pr_err("Invalid drvdata: dev %pK, data %pK\n",
3721 			     dev, priv);
3722 		return -EINVAL;
3723 	}
3724 
3725 	if (!iova_addr) {
3726 		icnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
3727 			     &paddr, size);
3728 		return -EINVAL;
3729 	}
3730 
3731 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
3732 	iova = roundup(priv->smmu_iova_ipa_current, PAGE_SIZE);
3733 
3734 	if (of_get_property(dev->of_node, "qcom,iommu-geometry", &prop_len) &&
3735 	    iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
3736 		icnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
3737 			     iova,
3738 			     &priv->smmu_iova_ipa_start,
3739 			     priv->smmu_iova_ipa_len);
3740 		return -ENOMEM;
3741 	}
3742 
3743 	dma_coherent = of_property_read_bool(dev->of_node, "dma-coherent");
3744 	icnss_pr_dbg("dma-coherent is %s\n",
3745 		     dma_coherent ? "enabled" : "disabled");
3746 	if (dma_coherent)
3747 		flag |= IOMMU_CACHE;
3748 
3749 	icnss_pr_dbg("IOMMU Map: iova %lx, len %zu\n", iova, len);
3750 
3751 	ret = iommu_map(priv->iommu_domain, iova,
3752 			rounddown(paddr, PAGE_SIZE), len,
3753 			flag);
3754 	if (ret) {
3755 		icnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
3756 		return ret;
3757 	}
3758 
3759 	priv->smmu_iova_ipa_current = iova + len;
3760 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
3761 
3762 	icnss_pr_dbg("IOVA addr mapped to physical addr %lx\n", *iova_addr);
3763 	return 0;
3764 }
3765 EXPORT_SYMBOL(icnss_smmu_map);
3766 
3767 int icnss_smmu_unmap(struct device *dev,
3768 		     uint32_t iova_addr, size_t size)
3769 {
3770 	struct icnss_priv *priv = dev_get_drvdata(dev);
3771 	unsigned long iova;
3772 	size_t len, unmapped_len;
3773 
3774 	if (!priv) {
3775 		icnss_pr_err("Invalid drvdata: dev %pK, data %pK\n",
3776 			     dev, priv);
3777 		return -EINVAL;
3778 	}
3779 
3780 	if (!iova_addr) {
3781 		icnss_pr_err("iova_addr is NULL, size %zu\n",
3782 			     size);
3783 		return -EINVAL;
3784 	}
3785 
3786 	len = roundup(size + iova_addr - rounddown(iova_addr, PAGE_SIZE),
3787 		      PAGE_SIZE);
3788 	iova = rounddown(iova_addr, PAGE_SIZE);
3789 
3790 	if (iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
3791 		icnss_pr_err("Out of IOVA space during unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
3792 			     iova,
3793 			     &priv->smmu_iova_ipa_start,
3794 			     priv->smmu_iova_ipa_len);
3795 		return -ENOMEM;
3796 	}
3797 
3798 	icnss_pr_dbg("IOMMU Unmap: iova %lx, len %zu\n",
3799 		     iova, len);
3800 
3801 	unmapped_len = iommu_unmap(priv->iommu_domain, iova, len);
3802 	if (unmapped_len != len) {
3803 		icnss_pr_err("Failed to unmap, %zu\n", unmapped_len);
3804 		return -EINVAL;
3805 	}
3806 
3807 	priv->smmu_iova_ipa_current = iova;
3808 	return 0;
3809 }
3810 EXPORT_SYMBOL(icnss_smmu_unmap);
3811 
3812 unsigned int icnss_socinfo_get_serial_number(struct device *dev)
3813 {
3814 	return socinfo_get_serial_number();
3815 }
3816 EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
3817 
3818 int icnss_trigger_recovery(struct device *dev)
3819 {
3820 	int ret = 0;
3821 	struct icnss_priv *priv = dev_get_drvdata(dev);
3822 
3823 	if (priv->magic != ICNSS_MAGIC) {
3824 		icnss_pr_err("Invalid drvdata: magic 0x%x\n", priv->magic);
3825 		ret = -EINVAL;
3826 		goto out;
3827 	}
3828 
3829 	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
3830 		icnss_pr_err("PD recovery already in progress: state: 0x%lx\n",
3831 			     priv->state);
3832 		ret = -EPERM;
3833 		goto out;
3834 	}
3835 
3836 	if (priv->wpss_supported) {
3837 		icnss_pr_vdbg("Initiate Root PD restart");
3838 		ret = icnss_send_smp2p(priv, ICNSS_TRIGGER_SSR,
3839 				       ICNSS_SMP2P_OUT_POWER_SAVE);
3840 		if (!ret)
3841 			set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
3842 		return ret;
3843 	}
3844 
3845 	if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
3846 		icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
3847 			     priv->state);
3848 		ret = -EOPNOTSUPP;
3849 		goto out;
3850 	}
3851 
3852 	icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
3853 		      priv->state);
3854 
3855 	ret = pdr_restart_pd(priv->pdr_handle, priv->pdr_service);
3856 
3857 	if (!ret)
3858 		set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
3859 
3860 out:
3861 	return ret;
3862 }
3863 EXPORT_SYMBOL(icnss_trigger_recovery);
3864 
3865 int icnss_idle_shutdown(struct device *dev)
3866 {
3867 	struct icnss_priv *priv = dev_get_drvdata(dev);
3868 
3869 	if (!priv) {
3870 		icnss_pr_err("Invalid drvdata: dev %pK", dev);
3871 		return -EINVAL;
3872 	}
3873 
3874 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
3875 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
3876 		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown\n");
3877 		return -EBUSY;
3878 	}
3879 
3880 	return icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
3881 					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
3882 }
3883 EXPORT_SYMBOL(icnss_idle_shutdown);
3884 
3885 int icnss_idle_restart(struct device *dev)
3886 {
3887 	struct icnss_priv *priv = dev_get_drvdata(dev);
3888 
3889 	if (!priv) {
3890 		icnss_pr_err("Invalid drvdata: dev %pK", dev);
3891 		return -EINVAL;
3892 	}
3893 
3894 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
3895 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
3896 		icnss_pr_err("SSR/PDR is already in-progress during idle restart\n");
3897 		return -EBUSY;
3898 	}
3899 
3900 	return icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_IDLE_RESTART,
3901 					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
3902 }
3903 EXPORT_SYMBOL(icnss_idle_restart);
3904 
3905 int icnss_exit_power_save(struct device *dev)
3906 {
3907 	struct icnss_priv *priv = dev_get_drvdata(dev);
3908 
3909 	icnss_pr_vdbg("Calling Exit Power Save\n");
3910 
3911 	if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
3912 	    !test_bit(ICNSS_MODE_ON, &priv->state))
3913 		return 0;
3914 
3915 	return icnss_send_smp2p(priv, ICNSS_POWER_SAVE_EXIT,
3916 				ICNSS_SMP2P_OUT_POWER_SAVE);
3917 }
3918 EXPORT_SYMBOL(icnss_exit_power_save);
3919 
3920 int icnss_prevent_l1(struct device *dev)
3921 {
3922 	struct icnss_priv *priv = dev_get_drvdata(dev);
3923 
3924 	if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
3925 	    !test_bit(ICNSS_MODE_ON, &priv->state))
3926 		return 0;
3927 
3928 	return icnss_send_smp2p(priv, ICNSS_PCI_EP_POWER_SAVE_EXIT,
3929 				ICNSS_SMP2P_OUT_EP_POWER_SAVE);
3930 }
3931 EXPORT_SYMBOL(icnss_prevent_l1);
3932 
3933 void icnss_allow_l1(struct device *dev)
3934 {
3935 	struct icnss_priv *priv = dev_get_drvdata(dev);
3936 
3937 	if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
3938 	    !test_bit(ICNSS_MODE_ON, &priv->state))
3939 		return;
3940 
3941 	icnss_send_smp2p(priv, ICNSS_PCI_EP_POWER_SAVE_ENTER,
3942 			 ICNSS_SMP2P_OUT_EP_POWER_SAVE);
3943 }
3944 EXPORT_SYMBOL(icnss_allow_l1);
3945 
3946 void icnss_allow_recursive_recovery(struct device *dev)
3947 {
3948 	struct icnss_priv *priv = dev_get_drvdata(dev);
3949 
3950 	priv->allow_recursive_recovery = true;
3951 
3952 	icnss_pr_info("Recursive recovery allowed for WLAN\n");
3953 }
3954 
3955 void icnss_disallow_recursive_recovery(struct device *dev)
3956 {
3957 	struct icnss_priv *priv = dev_get_drvdata(dev);
3958 
3959 	priv->allow_recursive_recovery = false;
3960 
3961 	icnss_pr_info("Recursive recovery disallowed for WLAN\n");
3962 }
3963 
3964 static int icnss_create_shutdown_sysfs(struct icnss_priv *priv)
3965 {
3966 	struct kobject *icnss_kobject;
3967 	int ret = 0;
3968 
3969 	atomic_set(&priv->is_shutdown, false);
3970 
3971 	icnss_kobject = kobject_create_and_add("shutdown_wlan", kernel_kobj);
3972 	if (!icnss_kobject) {
3973 		icnss_pr_err("Unable to create shutdown_wlan kernel object");
3974 		return -EINVAL;
3975 	}
3976 
3977 	priv->icnss_kobject = icnss_kobject;
3978 
3979 	ret = sysfs_create_file(icnss_kobject, &icnss_sysfs_attribute.attr);
3980 	if (ret) {
3981 		icnss_pr_err("Unable to create icnss sysfs file err:%d", ret);
3982 		return ret;
3983 	}
3984 
3985 	return ret;
3986 }
3987 
3988 static void icnss_destroy_shutdown_sysfs(struct icnss_priv *priv)
3989 {
3990 	struct kobject *icnss_kobject;
3991 
3992 	icnss_kobject = priv->icnss_kobject;
3993 	if (icnss_kobject)
3994 		kobject_put(icnss_kobject);
3995 }
3996 
3997 static ssize_t qdss_tr_start_store(struct device *dev,
3998 				   struct device_attribute *attr,
3999 				   const char *buf, size_t count)
4000 {
4001 	struct icnss_priv *priv = dev_get_drvdata(dev);
4002 
4003 	wlfw_qdss_trace_start(priv);
4004 	icnss_pr_dbg("Received QDSS start command\n");
4005 	return count;
4006 }
4007 
4008 static ssize_t qdss_tr_stop_store(struct device *dev,
4009 				  struct device_attribute *attr,
4010 				  const char *user_buf, size_t count)
4011 {
4012 	struct icnss_priv *priv = dev_get_drvdata(dev);
4013 	u32 option = 0;
4014 
4015 	if (sscanf(user_buf, "%du", &option) != 1)
4016 		return -EINVAL;
4017 
4018 	wlfw_qdss_trace_stop(priv, option);
4019 	icnss_pr_dbg("Received QDSS stop command\n");
4020 	return count;
4021 }
4022 
4023 static ssize_t qdss_conf_download_store(struct device *dev,
4024 					struct device_attribute *attr,
4025 					const char *buf, size_t count)
4026 {
4027 	struct icnss_priv *priv = dev_get_drvdata(dev);
4028 
4029 	icnss_wlfw_qdss_dnld_send_sync(priv);
4030 	icnss_pr_dbg("Received QDSS download config command\n");
4031 	return count;
4032 }
4033 
4034 static ssize_t hw_trc_override_store(struct device *dev,
4035 				     struct device_attribute *attr,
4036 				     const char *buf, size_t count)
4037 {
4038 	struct icnss_priv *priv = dev_get_drvdata(dev);
4039 	int tmp = 0;
4040 
4041 	if (sscanf(buf, "%du", &tmp) != 1)
4042 		return -EINVAL;
4043 
4044 	priv->hw_trc_override = tmp;
4045 	icnss_pr_dbg("Received QDSS hw_trc_override indication\n");
4046 	return count;
4047 }
4048 
4049 static void icnss_wpss_load(struct work_struct *wpss_load_work)
4050 {
4051 	struct icnss_priv *priv = icnss_get_plat_priv();
4052 	phandle rproc_phandle;
4053 	int ret;
4054 
4055 	if (of_property_read_u32(priv->pdev->dev.of_node, "qcom,rproc-handle",
4056 				 &rproc_phandle)) {
4057 		icnss_pr_err("error reading rproc phandle\n");
4058 		return;
4059 	}
4060 
4061 	priv->rproc = rproc_get_by_phandle(rproc_phandle);
4062 	if (IS_ERR_OR_NULL(priv->rproc)) {
4063 		icnss_pr_err("rproc not found");
4064 		return;
4065 	}
4066 
4067 	ret = rproc_boot(priv->rproc);
4068 	if (ret) {
4069 		icnss_pr_err("Failed to boot wpss rproc, ret: %d", ret);
4070 		rproc_put(priv->rproc);
4071 	}
4072 }
4073 
4074 static ssize_t wpss_boot_store(struct device *dev,
4075 			       struct device_attribute *attr,
4076 			       const char *buf, size_t count)
4077 {
4078 	struct icnss_priv *priv = dev_get_drvdata(dev);
4079 	int wpss_rproc = 0;
4080 
4081 	if (!priv->wpss_supported && !priv->rproc_fw_download)
4082 		return count;
4083 
4084 	if (sscanf(buf, "%du", &wpss_rproc) != 1) {
4085 		icnss_pr_err("Failed to read wpss rproc info");
4086 		return -EINVAL;
4087 	}
4088 
4089 	icnss_pr_dbg("WPSS Remote Processor: %s", wpss_rproc ? "GET" : "PUT");
4090 
4091 	if (wpss_rproc == 1)
4092 		schedule_work(&wpss_loader);
4093 	else if (wpss_rproc == 0)
4094 		icnss_wpss_unload(priv);
4095 
4096 	return count;
4097 }
4098 
4099 static ssize_t wlan_en_delay_store(struct device *dev,
4100 			       struct device_attribute *attr,
4101 			       const char *buf, size_t count)
4102 {
4103 	struct icnss_priv *priv = dev_get_drvdata(dev);
4104 	uint32_t wlan_en_delay  = 0;
4105 
4106 	if (priv->device_id == ADRASTEA_DEVICE_ID)
4107 		return count;
4108 
4109 	if (sscanf(buf, "%du", &wlan_en_delay) != 1) {
4110 		icnss_pr_err("Failed to read wlan_en_delay");
4111 		return -EINVAL;
4112 	}
4113 
4114 	icnss_pr_dbg("WLAN_EN delay: %dms", wlan_en_delay);
4115 	priv->wlan_en_delay_ms_user = wlan_en_delay;
4116 
4117 	return count;
4118 }
4119 
4120 static DEVICE_ATTR_WO(qdss_tr_start);
4121 static DEVICE_ATTR_WO(qdss_tr_stop);
4122 static DEVICE_ATTR_WO(qdss_conf_download);
4123 static DEVICE_ATTR_WO(hw_trc_override);
4124 static DEVICE_ATTR_WO(wpss_boot);
4125 static DEVICE_ATTR_WO(wlan_en_delay);
4126 
4127 static struct attribute *icnss_attrs[] = {
4128 	&dev_attr_qdss_tr_start.attr,
4129 	&dev_attr_qdss_tr_stop.attr,
4130 	&dev_attr_qdss_conf_download.attr,
4131 	&dev_attr_hw_trc_override.attr,
4132 	&dev_attr_wpss_boot.attr,
4133 	&dev_attr_wlan_en_delay.attr,
4134 	NULL,
4135 };
4136 
4137 static struct attribute_group icnss_attr_group = {
4138 	.attrs = icnss_attrs,
4139 };
4140 
4141 static int icnss_create_sysfs_link(struct icnss_priv *priv)
4142 {
4143 	struct device *dev = &priv->pdev->dev;
4144 	int ret;
4145 
4146 	ret = sysfs_create_link(kernel_kobj, &dev->kobj, "icnss");
4147 	if (ret) {
4148 		icnss_pr_err("Failed to create icnss link, err = %d\n",
4149 			     ret);
4150 		goto out;
4151 	}
4152 
4153 	return 0;
4154 out:
4155 	return ret;
4156 }
4157 
4158 static void icnss_remove_sysfs_link(struct icnss_priv *priv)
4159 {
4160 	sysfs_remove_link(kernel_kobj, "icnss");
4161 }
4162 
4163 static int icnss_sysfs_create(struct icnss_priv *priv)
4164 {
4165 	int ret = 0;
4166 
4167 	ret = devm_device_add_group(&priv->pdev->dev,
4168 				    &icnss_attr_group);
4169 	if (ret) {
4170 		icnss_pr_err("Failed to create icnss device group, err = %d\n",
4171 			     ret);
4172 		goto out;
4173 	}
4174 
4175 	icnss_create_sysfs_link(priv);
4176 
4177 	ret = icnss_create_shutdown_sysfs(priv);
4178 	if (ret)
4179 		goto remove_icnss_group;
4180 
4181 	return 0;
4182 remove_icnss_group:
4183 	devm_device_remove_group(&priv->pdev->dev, &icnss_attr_group);
4184 out:
4185 	return ret;
4186 }
4187 
4188 static void icnss_sysfs_destroy(struct icnss_priv *priv)
4189 {
4190 	icnss_destroy_shutdown_sysfs(priv);
4191 	icnss_remove_sysfs_link(priv);
4192 	devm_device_remove_group(&priv->pdev->dev, &icnss_attr_group);
4193 }
4194 
4195 static int icnss_resource_parse(struct icnss_priv *priv)
4196 {
4197 	int ret = 0, i = 0, irq = 0;
4198 	struct platform_device *pdev = priv->pdev;
4199 	struct device *dev = &pdev->dev;
4200 	struct resource *res;
4201 	u32 int_prop;
4202 
4203 	ret = icnss_get_vreg(priv);
4204 	if (ret) {
4205 		icnss_pr_err("Failed to get vreg, err = %d\n", ret);
4206 		goto out;
4207 	}
4208 
4209 	ret = icnss_get_clk(priv);
4210 	if (ret) {
4211 		icnss_pr_err("Failed to get clocks, err = %d\n", ret);
4212 		goto put_vreg;
4213 	}
4214 
4215 	if (of_property_read_bool(pdev->dev.of_node, "qcom,psf-supported")) {
4216 		ret = icnss_get_psf_info(priv);
4217 		if (ret < 0)
4218 			goto out;
4219 		priv->psf_supported = true;
4220 	}
4221 
4222 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
4223 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4224 						   "membase");
4225 		if (!res) {
4226 			icnss_pr_err("Memory base not found in DT\n");
4227 			ret = -EINVAL;
4228 			goto put_clk;
4229 		}
4230 
4231 		priv->mem_base_pa = res->start;
4232 		priv->mem_base_va = devm_ioremap(dev, priv->mem_base_pa,
4233 						 resource_size(res));
4234 		if (!priv->mem_base_va) {
4235 			icnss_pr_err("Memory base ioremap failed: phy addr: %pa\n",
4236 				     &priv->mem_base_pa);
4237 			ret = -EINVAL;
4238 			goto put_clk;
4239 		}
4240 		icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%pK\n",
4241 			     &priv->mem_base_pa,
4242 			     priv->mem_base_va);
4243 
4244 		for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
4245 			irq = platform_get_irq(pdev, i);
4246 			if (irq < 0) {
4247 				icnss_pr_err("Fail to get IRQ-%d\n", i);
4248 				ret = -ENODEV;
4249 				goto put_clk;
4250 			} else {
4251 				priv->ce_irqs[i] = irq;
4252 			}
4253 		}
4254 
4255 		if (of_property_read_bool(pdev->dev.of_node,
4256 					  "qcom,is_low_power")) {
4257 			priv->low_power_support = true;
4258 			icnss_pr_dbg("Deep Sleep/Hibernate mode supported\n");
4259 		}
4260 
4261 		if (of_property_read_u32(pdev->dev.of_node, "qcom,rf_subtype",
4262 					 &priv->rf_subtype) == 0) {
4263 			priv->is_rf_subtype_valid = true;
4264 			icnss_pr_dbg("RF subtype 0x%x\n", priv->rf_subtype);
4265 		}
4266 
4267 		if (of_property_read_bool(pdev->dev.of_node,
4268 					  "qcom,is_slate_rfa")) {
4269 			priv->is_slate_rfa = true;
4270 			icnss_pr_err("SLATE rfa is enabled\n");
4271 		}
4272 	} else if (priv->device_id == WCN6750_DEVICE_ID ||
4273 		   priv->device_id == WCN6450_DEVICE_ID) {
4274 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4275 						   "msi_addr");
4276 		if (!res) {
4277 			icnss_pr_err("MSI address not found in DT\n");
4278 			ret = -EINVAL;
4279 			goto put_clk;
4280 		}
4281 
4282 		priv->msi_addr_pa = res->start;
4283 		priv->msi_addr_iova = dma_map_resource(dev, priv->msi_addr_pa,
4284 						       PAGE_SIZE,
4285 						       DMA_FROM_DEVICE, 0);
4286 		if (dma_mapping_error(dev, priv->msi_addr_iova)) {
4287 			icnss_pr_err("MSI: failed to map msi address\n");
4288 			priv->msi_addr_iova = 0;
4289 			ret = -ENOMEM;
4290 			goto put_clk;
4291 		}
4292 		icnss_pr_dbg("MSI Addr pa: %pa, iova: 0x%pK\n",
4293 			     &priv->msi_addr_pa,
4294 			     priv->msi_addr_iova);
4295 
4296 		ret = of_property_read_u32_index(dev->of_node,
4297 						 "interrupts",
4298 						 1,
4299 						 &int_prop);
4300 		if (ret) {
4301 			icnss_pr_dbg("Read interrupt prop failed");
4302 			goto put_clk;
4303 		}
4304 
4305 		priv->msi_base_data = int_prop + 32;
4306 		icnss_pr_dbg(" MSI Base Data: %d, IRQ Index: %d\n",
4307 			     priv->msi_base_data, int_prop);
4308 
4309 		icnss_get_msi_assignment(priv);
4310 		for (i = 0; i < priv->msi_config->total_vectors; i++) {
4311 			irq = platform_get_irq(priv->pdev, i);
4312 			if (irq < 0) {
4313 				icnss_pr_err("Fail to get IRQ-%d\n", i);
4314 				ret = -ENODEV;
4315 				goto put_clk;
4316 			} else {
4317 				priv->srng_irqs[i] = irq;
4318 			}
4319 		}
4320 	}
4321 
4322 	return 0;
4323 
4324 put_clk:
4325 	icnss_put_clk(priv);
4326 put_vreg:
4327 	icnss_put_vreg(priv);
4328 out:
4329 	return ret;
4330 }
4331 
4332 static int icnss_msa_dt_parse(struct icnss_priv *priv)
4333 {
4334 	int ret = 0;
4335 	struct platform_device *pdev = priv->pdev;
4336 	struct device *dev = &pdev->dev;
4337 	struct device_node *np = NULL;
4338 	u64 prop_size = 0;
4339 	const __be32 *addrp = NULL;
4340 
4341 	np = of_parse_phandle(dev->of_node,
4342 			      "qcom,wlan-msa-fixed-region", 0);
4343 	if (np) {
4344 		addrp = of_get_address(np, 0, &prop_size, NULL);
4345 		if (!addrp) {
4346 			icnss_pr_err("Failed to get assigned-addresses or property\n");
4347 			ret = -EINVAL;
4348 			of_node_put(np);
4349 			goto out;
4350 		}
4351 
4352 		priv->msa_pa = of_translate_address(np, addrp);
4353 		if (priv->msa_pa == OF_BAD_ADDR) {
4354 			icnss_pr_err("Failed to translate MSA PA from device-tree\n");
4355 			ret = -EINVAL;
4356 			of_node_put(np);
4357 			goto out;
4358 		}
4359 
4360 		of_node_put(np);
4361 
4362 		priv->msa_va = memremap(priv->msa_pa,
4363 					(unsigned long)prop_size, MEMREMAP_WT);
4364 		if (!priv->msa_va) {
4365 			icnss_pr_err("MSA PA ioremap failed: phy addr: %pa\n",
4366 				     &priv->msa_pa);
4367 			ret = -EINVAL;
4368 			goto out;
4369 		}
4370 		priv->msa_mem_size = prop_size;
4371 	} else {
4372 		ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
4373 					   &priv->msa_mem_size);
4374 		if (ret || priv->msa_mem_size == 0) {
4375 			icnss_pr_err("Fail to get MSA Memory Size: %u ret: %d\n",
4376 				     priv->msa_mem_size, ret);
4377 			goto out;
4378 		}
4379 
4380 		priv->msa_va = dmam_alloc_coherent(&pdev->dev,
4381 				priv->msa_mem_size, &priv->msa_pa, GFP_KERNEL);
4382 
4383 		if (!priv->msa_va) {
4384 			icnss_pr_err("DMA alloc failed for MSA\n");
4385 			ret = -ENOMEM;
4386 			goto out;
4387 		}
4388 	}
4389 
4390 	icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%pK MSA Memory Size: 0x%x\n",
4391 		     &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
4392 
4393 	priv->use_prefix_path = of_property_read_bool(priv->pdev->dev.of_node,
4394 						      "qcom,fw-prefix");
4395 	return 0;
4396 
4397 out:
4398 	return ret;
4399 }
4400 
4401 static int icnss_smmu_fault_handler(struct iommu_domain *domain,
4402 				    struct device *dev, unsigned long iova,
4403 				    int flags, void *handler_token)
4404 {
4405 	struct icnss_priv *priv = handler_token;
4406 	struct icnss_uevent_fw_down_data fw_down_data = {0};
4407 
4408 	icnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
4409 
4410 	if (!priv) {
4411 		icnss_pr_err("priv is NULL\n");
4412 		return -ENODEV;
4413 	}
4414 
4415 	if (test_bit(ICNSS_FW_READY, &priv->state)) {
4416 		fw_down_data.crashed = true;
4417 		icnss_call_driver_uevent(priv, ICNSS_UEVENT_SMMU_FAULT,
4418 					 &fw_down_data);
4419 		icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
4420 					 &fw_down_data);
4421 	}
4422 
4423 	icnss_trigger_recovery(&priv->pdev->dev);
4424 
4425 	/* IOMMU driver requires -ENOSYS return value to print debug info. */
4426 	return -ENOSYS;
4427 }
4428 
4429 static int icnss_smmu_dt_parse(struct icnss_priv *priv)
4430 {
4431 	int ret = 0;
4432 	struct platform_device *pdev = priv->pdev;
4433 	struct device *dev = &pdev->dev;
4434 	const char *iommu_dma_type;
4435 	struct resource *res;
4436 	u32 addr_win[2];
4437 
4438 	ret = of_property_read_u32_array(dev->of_node,
4439 					 "qcom,iommu-dma-addr-pool",
4440 					 addr_win,
4441 					 ARRAY_SIZE(addr_win));
4442 
4443 	if (ret) {
4444 		icnss_pr_err("SMMU IOVA base not found\n");
4445 	} else {
4446 		priv->smmu_iova_start = addr_win[0];
4447 		priv->smmu_iova_len = addr_win[1];
4448 		icnss_pr_dbg("SMMU IOVA start: %pa, len: %zx\n",
4449 			     &priv->smmu_iova_start,
4450 			     priv->smmu_iova_len);
4451 
4452 		priv->iommu_domain =
4453 			iommu_get_domain_for_dev(&pdev->dev);
4454 
4455 		ret = of_property_read_string(dev->of_node, "qcom,iommu-dma",
4456 					      &iommu_dma_type);
4457 		if (!ret && !strcmp("fastmap", iommu_dma_type)) {
4458 			icnss_pr_dbg("SMMU S1 stage enabled\n");
4459 			priv->smmu_s1_enable = true;
4460 			if (priv->device_id == WCN6750_DEVICE_ID ||
4461 			    priv->device_id == WCN6450_DEVICE_ID)
4462 				iommu_set_fault_handler(priv->iommu_domain,
4463 						icnss_smmu_fault_handler,
4464 						priv);
4465 		}
4466 
4467 		res = platform_get_resource_byname(pdev,
4468 						   IORESOURCE_MEM,
4469 						   "smmu_iova_ipa");
4470 		if (!res) {
4471 			icnss_pr_err("SMMU IOVA IPA not found\n");
4472 		} else {
4473 			priv->smmu_iova_ipa_start = res->start;
4474 			priv->smmu_iova_ipa_current = res->start;
4475 			priv->smmu_iova_ipa_len = resource_size(res);
4476 			icnss_pr_dbg("SMMU IOVA IPA start: %pa, len: %zx\n",
4477 				     &priv->smmu_iova_ipa_start,
4478 				     priv->smmu_iova_ipa_len);
4479 		}
4480 	}
4481 
4482 	return 0;
4483 }
4484 
4485 int icnss_get_iova(struct icnss_priv *priv, u64 *addr, u64 *size)
4486 {
4487 	if (!priv)
4488 		return -ENODEV;
4489 
4490 	if (!priv->smmu_iova_len)
4491 		return -EINVAL;
4492 
4493 	*addr = priv->smmu_iova_start;
4494 	*size = priv->smmu_iova_len;
4495 
4496 	return 0;
4497 }
4498 
4499 int icnss_get_iova_ipa(struct icnss_priv *priv, u64 *addr, u64 *size)
4500 {
4501 	if (!priv)
4502 		return -ENODEV;
4503 
4504 	if (!priv->smmu_iova_ipa_len)
4505 		return -EINVAL;
4506 
4507 	*addr = priv->smmu_iova_ipa_start;
4508 	*size = priv->smmu_iova_ipa_len;
4509 
4510 	return 0;
4511 }
4512 
4513 void icnss_add_fw_prefix_name(struct icnss_priv *priv, char *prefix_name,
4514 			      char *name)
4515 {
4516 	if (!priv)
4517 		return;
4518 
4519 	if (!priv->use_prefix_path) {
4520 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME, "%s", name);
4521 		return;
4522 	}
4523 
4524 	if (priv->device_id == ADRASTEA_DEVICE_ID)
4525 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME,
4526 			  ADRASTEA_PATH_PREFIX "%s", name);
4527 	else if (priv->device_id == WCN6750_DEVICE_ID)
4528 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME,
4529 			  QCA6750_PATH_PREFIX "%s", name);
4530 	else if (priv->device_id == WCN6450_DEVICE_ID)
4531 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME,
4532 			  WCN6450_PATH_PREFIX "%s", name);
4533 	icnss_pr_dbg("File added with prefix: %s\n", prefix_name);
4534 }
4535 
4536 static const struct platform_device_id icnss_platform_id_table[] = {
4537 	{ .name = "wcn6750", .driver_data = WCN6750_DEVICE_ID, },
4538 	{ .name = "adrastea", .driver_data = ADRASTEA_DEVICE_ID, },
4539 	{ .name = "wcn6450", .driver_data = WCN6450_DEVICE_ID, },
4540 	{ },
4541 };
4542 
4543 static const struct of_device_id icnss_dt_match[] = {
4544 	{
4545 		.compatible = "qcom,wcn6750",
4546 		.data = (void *)&icnss_platform_id_table[0]},
4547 	{
4548 		.compatible = "qcom,icnss",
4549 		.data = (void *)&icnss_platform_id_table[1]},
4550 	{
4551 		.compatible = "qcom,wcn6450",
4552 		.data = (void *)&icnss_platform_id_table[2]},
4553 	{ },
4554 };
4555 
4556 MODULE_DEVICE_TABLE(of, icnss_dt_match);
4557 
4558 static void icnss_init_control_params(struct icnss_priv *priv)
4559 {
4560 	priv->ctrl_params.qmi_timeout = WLFW_TIMEOUT;
4561 	priv->ctrl_params.quirks = ICNSS_QUIRKS_DEFAULT;
4562 	priv->ctrl_params.bdf_type = ICNSS_BDF_TYPE_DEFAULT;
4563 
4564 	if (priv->device_id == WCN6750_DEVICE_ID ||
4565 	    of_property_read_bool(priv->pdev->dev.of_node,
4566 				  "wpss-support-enable"))
4567 		priv->wpss_supported = true;
4568 
4569 	if (of_property_read_bool(priv->pdev->dev.of_node,
4570 				  "bdf-download-support"))
4571 		priv->bdf_download_support = true;
4572 
4573 	if (of_property_read_bool(priv->pdev->dev.of_node,
4574 				  "rproc-fw-download"))
4575 		priv->rproc_fw_download = true;
4576 
4577 	if (priv->bdf_download_support && priv->device_id == ADRASTEA_DEVICE_ID)
4578 		priv->ctrl_params.bdf_type = ICNSS_BDF_BIN;
4579 }
4580 
4581 static void icnss_read_device_configs(struct icnss_priv *priv)
4582 {
4583 	if (of_property_read_bool(priv->pdev->dev.of_node,
4584 				  "wlan-ipa-disabled")) {
4585 		set_bit(ICNSS_IPA_DISABLED, &priv->device_config);
4586 	}
4587 
4588 	if (of_property_read_bool(priv->pdev->dev.of_node,
4589 				  "qcom,wpss-self-recovery"))
4590 		priv->wpss_self_recovery_enabled = true;
4591 }
4592 
4593 static inline void icnss_runtime_pm_init(struct icnss_priv *priv)
4594 {
4595 	pm_runtime_get_sync(&priv->pdev->dev);
4596 	pm_runtime_forbid(&priv->pdev->dev);
4597 	pm_runtime_set_active(&priv->pdev->dev);
4598 	pm_runtime_enable(&priv->pdev->dev);
4599 }
4600 
4601 static inline void icnss_runtime_pm_deinit(struct icnss_priv *priv)
4602 {
4603 	pm_runtime_disable(&priv->pdev->dev);
4604 	pm_runtime_allow(&priv->pdev->dev);
4605 	pm_runtime_put_sync(&priv->pdev->dev);
4606 }
4607 
4608 static inline bool icnss_use_nv_mac(struct icnss_priv *priv)
4609 {
4610 	return of_property_read_bool(priv->pdev->dev.of_node,
4611 				     "use-nv-mac");
4612 }
4613 
4614 static void rproc_restart_level_notifier(void *data, struct rproc *rproc)
4615 {
4616 	struct icnss_subsys_restart_level_data *restart_level_data;
4617 
4618 	icnss_pr_info("rproc name: %s recovery disable: %d",
4619 		      rproc->name, rproc->recovery_disabled);
4620 
4621 	restart_level_data = kzalloc(sizeof(*restart_level_data), GFP_ATOMIC);
4622 	if (!restart_level_data)
4623 		return;
4624 
4625 	if (strnstr(rproc->name, "wpss", ICNSS_RPROC_LEN)) {
4626 		if (rproc->recovery_disabled)
4627 			restart_level_data->restart_level = ICNSS_DISABLE_M3_SSR;
4628 		else
4629 			restart_level_data->restart_level = ICNSS_ENABLE_M3_SSR;
4630 
4631 		icnss_driver_event_post(penv, ICNSS_DRIVER_EVENT_SUBSYS_RESTART_LEVEL,
4632 					0, restart_level_data);
4633 	}
4634 }
4635 
4636 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
4637 static void icnss_initialize_mem_pool(unsigned long device_id)
4638 {
4639 	cnss_initialize_prealloc_pool(device_id);
4640 }
4641 static void icnss_deinitialize_mem_pool(void)
4642 {
4643 	cnss_deinitialize_prealloc_pool();
4644 }
4645 #else
4646 static void icnss_initialize_mem_pool(unsigned long device_id)
4647 {
4648 }
4649 static void icnss_deinitialize_mem_pool(void)
4650 {
4651 }
4652 #endif
4653 
4654 static int icnss_probe(struct platform_device *pdev)
4655 {
4656 	int ret = 0;
4657 	struct device *dev = &pdev->dev;
4658 	struct icnss_priv *priv;
4659 	const struct of_device_id *of_id;
4660 	const struct platform_device_id *device_id;
4661 
4662 	if (dev_get_drvdata(dev)) {
4663 		icnss_pr_err("Driver is already initialized\n");
4664 		return -EEXIST;
4665 	}
4666 
4667 	of_id = of_match_device(icnss_dt_match, &pdev->dev);
4668 	if (!of_id || !of_id->data) {
4669 		icnss_pr_err("Failed to find of match device!\n");
4670 		ret = -ENODEV;
4671 		goto out_reset_drvdata;
4672 	}
4673 
4674 	device_id = of_id->data;
4675 
4676 	icnss_pr_dbg("Platform driver probe\n");
4677 
4678 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
4679 	if (!priv)
4680 		return -ENOMEM;
4681 
4682 	priv->magic = ICNSS_MAGIC;
4683 	dev_set_drvdata(dev, priv);
4684 
4685 	priv->pdev = pdev;
4686 	priv->device_id = device_id->driver_data;
4687 	priv->is_chain1_supported = true;
4688 	INIT_LIST_HEAD(&priv->vreg_list);
4689 	INIT_LIST_HEAD(&priv->clk_list);
4690 	icnss_allow_recursive_recovery(dev);
4691 
4692 	icnss_initialize_mem_pool(priv->device_id);
4693 
4694 	icnss_init_control_params(priv);
4695 
4696 	icnss_read_device_configs(priv);
4697 
4698 	ret = icnss_resource_parse(priv);
4699 	if (ret)
4700 		goto out_reset_drvdata;
4701 
4702 	ret = icnss_msa_dt_parse(priv);
4703 	if (ret)
4704 		goto out_free_resources;
4705 
4706 	ret = icnss_smmu_dt_parse(priv);
4707 	if (ret)
4708 		goto out_free_resources;
4709 
4710 	spin_lock_init(&priv->event_lock);
4711 	spin_lock_init(&priv->on_off_lock);
4712 	spin_lock_init(&priv->soc_wake_msg_lock);
4713 	mutex_init(&priv->dev_lock);
4714 	mutex_init(&priv->tcdev_lock);
4715 
4716 	priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
4717 	if (!priv->event_wq) {
4718 		icnss_pr_err("Workqueue creation failed\n");
4719 		ret = -EFAULT;
4720 		goto smmu_cleanup;
4721 	}
4722 
4723 	INIT_WORK(&priv->event_work, icnss_driver_event_work);
4724 	INIT_LIST_HEAD(&priv->event_list);
4725 
4726 	if (priv->is_slate_rfa)
4727 		init_completion(&priv->slate_boot_complete);
4728 
4729 	ret = icnss_register_fw_service(priv);
4730 	if (ret < 0) {
4731 		icnss_pr_err("fw service registration failed: %d\n", ret);
4732 		goto out_destroy_wq;
4733 	}
4734 
4735 	icnss_enable_recovery(priv);
4736 
4737 	icnss_debugfs_create(priv);
4738 
4739 	icnss_sysfs_create(priv);
4740 
4741 	ret = device_init_wakeup(&priv->pdev->dev, true);
4742 	if (ret)
4743 		icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
4744 			     ret);
4745 
4746 	icnss_set_plat_priv(priv);
4747 
4748 	init_completion(&priv->unblock_shutdown);
4749 
4750 	if (priv->device_id == WCN6750_DEVICE_ID ||
4751 	    priv->device_id == WCN6450_DEVICE_ID) {
4752 		priv->soc_wake_wq = alloc_workqueue("icnss_soc_wake_event",
4753 						    WQ_UNBOUND|WQ_HIGHPRI, 1);
4754 		if (!priv->soc_wake_wq) {
4755 			icnss_pr_err("Soc wake Workqueue creation failed\n");
4756 			ret = -EFAULT;
4757 			goto out_unregister_fw_service;
4758 		}
4759 
4760 		INIT_WORK(&priv->soc_wake_msg_work, icnss_soc_wake_msg_work);
4761 		INIT_LIST_HEAD(&priv->soc_wake_msg_list);
4762 
4763 		ret = icnss_genl_init();
4764 		if (ret < 0)
4765 			icnss_pr_err("ICNSS genl init failed %d\n", ret);
4766 
4767 		init_completion(&priv->smp2p_soc_wake_wait);
4768 		icnss_runtime_pm_init(priv);
4769 		icnss_aop_interface_init(priv);
4770 		set_bit(ICNSS_COLD_BOOT_CAL, &priv->state);
4771 		priv->bdf_download_support = true;
4772 		register_trace_android_vh_rproc_recovery_set(rproc_restart_level_notifier, NULL);
4773 	}
4774 
4775 	if (priv->wpss_supported) {
4776 		ret = icnss_dms_init(priv);
4777 		if (ret)
4778 			icnss_pr_err("ICNSS DMS init failed %d\n", ret);
4779 		priv->use_nv_mac = icnss_use_nv_mac(priv);
4780 		icnss_pr_dbg("NV MAC feature is %s\n",
4781 			     priv->use_nv_mac ? "Mandatory":"Not Mandatory");
4782 	}
4783 
4784 	if (priv->wpss_supported || priv->rproc_fw_download)
4785 		INIT_WORK(&wpss_loader, icnss_wpss_load);
4786 
4787 	timer_setup(&priv->recovery_timer,
4788 		    icnss_recovery_timeout_hdlr, 0);
4789 
4790 	if (priv->wpss_self_recovery_enabled) {
4791 		INIT_WORK(&wpss_ssr_work, icnss_wpss_self_recovery);
4792 		timer_setup(&priv->wpss_ssr_timer,
4793 			    icnss_wpss_ssr_timeout_hdlr, 0);
4794 	}
4795 
4796 	INIT_LIST_HEAD(&priv->icnss_tcdev_list);
4797 
4798 	icnss_pr_info("Platform driver probed successfully\n");
4799 
4800 	return 0;
4801 
4802 out_unregister_fw_service:
4803 	icnss_unregister_fw_service(priv);
4804 out_destroy_wq:
4805 	destroy_workqueue(priv->event_wq);
4806 smmu_cleanup:
4807 	priv->iommu_domain = NULL;
4808 out_free_resources:
4809 	icnss_put_resources(priv);
4810 out_reset_drvdata:
4811 	icnss_deinitialize_mem_pool();
4812 	dev_set_drvdata(dev, NULL);
4813 	return ret;
4814 }
4815 
4816 void icnss_destroy_ramdump_device(struct icnss_ramdump_info *ramdump_info)
4817 {
4818 
4819 	if (IS_ERR_OR_NULL(ramdump_info))
4820 		return;
4821 
4822 	device_unregister(ramdump_info->dev);
4823 
4824 	ida_simple_remove(&rd_minor_id, ramdump_info->minor);
4825 
4826 	kfree(ramdump_info);
4827 }
4828 
4829 static void icnss_unregister_power_supply_notifier(struct icnss_priv *priv)
4830 {
4831 	if (priv->batt_psy)
4832 		power_supply_put(penv->batt_psy);
4833 
4834 	if (priv->psf_supported) {
4835 		flush_workqueue(priv->soc_update_wq);
4836 		destroy_workqueue(priv->soc_update_wq);
4837 		power_supply_unreg_notifier(&priv->psf_nb);
4838 	}
4839 }
4840 
4841 static int icnss_remove(struct platform_device *pdev)
4842 {
4843 	struct icnss_priv *priv = dev_get_drvdata(&pdev->dev);
4844 
4845 	icnss_pr_info("Removing driver: state: 0x%lx\n", priv->state);
4846 
4847 	del_timer(&priv->recovery_timer);
4848 
4849 	if (priv->wpss_self_recovery_enabled)
4850 		del_timer(&priv->wpss_ssr_timer);
4851 
4852 	device_init_wakeup(&priv->pdev->dev, false);
4853 
4854 	icnss_debugfs_destroy(priv);
4855 
4856 	icnss_unregister_power_supply_notifier(penv);
4857 
4858 	icnss_sysfs_destroy(priv);
4859 
4860 	complete_all(&priv->unblock_shutdown);
4861 
4862 	if (priv->is_slate_rfa) {
4863 		complete(&priv->slate_boot_complete);
4864 		icnss_slate_ssr_unregister_notifier(priv);
4865 		icnss_unregister_slate_event_notifier(priv);
4866 	}
4867 
4868 	icnss_destroy_ramdump_device(priv->msa0_dump_dev);
4869 
4870 	if (priv->wpss_supported) {
4871 		icnss_dms_deinit(priv);
4872 		icnss_wpss_early_ssr_unregister_notifier(priv);
4873 		icnss_wpss_ssr_unregister_notifier(priv);
4874 	} else {
4875 		icnss_modem_ssr_unregister_notifier(priv);
4876 		icnss_pdr_unregister_notifier(priv);
4877 	}
4878 
4879 	if (priv->device_id == WCN6750_DEVICE_ID ||
4880 	    priv->device_id == WCN6450_DEVICE_ID) {
4881 		icnss_genl_exit();
4882 		icnss_runtime_pm_deinit(priv);
4883 		unregister_trace_android_vh_rproc_recovery_set(rproc_restart_level_notifier, NULL);
4884 		complete_all(&priv->smp2p_soc_wake_wait);
4885 		icnss_destroy_ramdump_device(priv->m3_dump_phyareg);
4886 		icnss_destroy_ramdump_device(priv->m3_dump_phydbg);
4887 		icnss_destroy_ramdump_device(priv->m3_dump_wmac0reg);
4888 		icnss_destroy_ramdump_device(priv->m3_dump_wcssdbg);
4889 		icnss_destroy_ramdump_device(priv->m3_dump_phyapdmem);
4890 		if (priv->soc_wake_wq)
4891 			destroy_workqueue(priv->soc_wake_wq);
4892 		icnss_aop_interface_deinit(priv);
4893 	}
4894 
4895 	class_destroy(priv->icnss_ramdump_class);
4896 	unregister_chrdev_region(priv->icnss_ramdump_dev, RAMDUMP_NUM_DEVICES);
4897 
4898 	icnss_unregister_fw_service(priv);
4899 	if (priv->event_wq)
4900 		destroy_workqueue(priv->event_wq);
4901 
4902 	priv->iommu_domain = NULL;
4903 
4904 	icnss_hw_power_off(priv);
4905 
4906 	icnss_put_resources(priv);
4907 
4908 	icnss_deinitialize_mem_pool();
4909 
4910 	dev_set_drvdata(&pdev->dev, NULL);
4911 
4912 	return 0;
4913 }
4914 
4915 void icnss_recovery_timeout_hdlr(struct timer_list *t)
4916 {
4917 	struct icnss_priv *priv = from_timer(priv, t, recovery_timer);
4918 
4919 	/* This is to handle if slate is not up and modem SSR is triggered */
4920 	if (priv->is_slate_rfa && !test_bit(ICNSS_SLATE_UP, &priv->state))
4921 		return;
4922 
4923 	icnss_pr_err("Timeout waiting for FW Ready 0x%lx\n", priv->state);
4924 	ICNSS_ASSERT(0);
4925 }
4926 
4927 void icnss_wpss_ssr_timeout_hdlr(struct timer_list *t)
4928 {
4929 	struct icnss_priv *priv = from_timer(priv, t, wpss_ssr_timer);
4930 
4931 	icnss_pr_err("Timeout waiting for WPSS SSR notification 0x%lx\n",
4932 		      priv->state);
4933 	schedule_work(&wpss_ssr_work);
4934 }
4935 
4936 #ifdef CONFIG_PM_SLEEP
4937 static int icnss_pm_suspend(struct device *dev)
4938 {
4939 	struct icnss_priv *priv = dev_get_drvdata(dev);
4940 	int ret = 0;
4941 
4942 	if (priv->magic != ICNSS_MAGIC) {
4943 		icnss_pr_err("Invalid drvdata for pm suspend: dev %pK, data %pK, magic 0x%x\n",
4944 			     dev, priv, priv->magic);
4945 		return -EINVAL;
4946 	}
4947 
4948 	icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state);
4949 
4950 	if (!priv->ops || !priv->ops->pm_suspend ||
4951 	    IS_ERR(priv->smp2p_info[ICNSS_SMP2P_OUT_POWER_SAVE].smem_state) ||
4952 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
4953 		return 0;
4954 
4955 	ret = priv->ops->pm_suspend(dev);
4956 
4957 	if (ret == 0) {
4958 		if (priv->device_id == WCN6750_DEVICE_ID ||
4959 		    priv->device_id == WCN6450_DEVICE_ID) {
4960 			if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
4961 			    !test_bit(ICNSS_MODE_ON, &priv->state))
4962 				return 0;
4963 
4964 			ret = icnss_send_smp2p(priv, ICNSS_POWER_SAVE_ENTER,
4965 					       ICNSS_SMP2P_OUT_POWER_SAVE);
4966 		}
4967 		priv->stats.pm_suspend++;
4968 		set_bit(ICNSS_PM_SUSPEND, &priv->state);
4969 	} else {
4970 		priv->stats.pm_suspend_err++;
4971 	}
4972 	return ret;
4973 }
4974 
4975 static int icnss_pm_resume(struct device *dev)
4976 {
4977 	struct icnss_priv *priv = dev_get_drvdata(dev);
4978 	int ret = 0;
4979 
4980 	if (priv->magic != ICNSS_MAGIC) {
4981 		icnss_pr_err("Invalid drvdata for pm resume: dev %pK, data %pK, magic 0x%x\n",
4982 			     dev, priv, priv->magic);
4983 		return -EINVAL;
4984 	}
4985 
4986 	icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state);
4987 
4988 	if (!priv->ops || !priv->ops->pm_resume ||
4989 	    IS_ERR(priv->smp2p_info[ICNSS_SMP2P_OUT_POWER_SAVE].smem_state) ||
4990 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
4991 		goto out;
4992 
4993 	ret = priv->ops->pm_resume(dev);
4994 
4995 out:
4996 	if (ret == 0) {
4997 		priv->stats.pm_resume++;
4998 		clear_bit(ICNSS_PM_SUSPEND, &priv->state);
4999 	} else {
5000 		priv->stats.pm_resume_err++;
5001 	}
5002 	return ret;
5003 }
5004 
5005 static int icnss_pm_suspend_noirq(struct device *dev)
5006 {
5007 	struct icnss_priv *priv = dev_get_drvdata(dev);
5008 	int ret = 0;
5009 
5010 	if (priv->magic != ICNSS_MAGIC) {
5011 		icnss_pr_err("Invalid drvdata for pm suspend_noirq: dev %pK, data %pK, magic 0x%x\n",
5012 			     dev, priv, priv->magic);
5013 		return -EINVAL;
5014 	}
5015 
5016 	icnss_pr_vdbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
5017 
5018 	if (!priv->ops || !priv->ops->suspend_noirq ||
5019 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
5020 		goto out;
5021 
5022 	ret = priv->ops->suspend_noirq(dev);
5023 
5024 out:
5025 	if (ret == 0) {
5026 		priv->stats.pm_suspend_noirq++;
5027 		set_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
5028 	} else {
5029 		priv->stats.pm_suspend_noirq_err++;
5030 	}
5031 	return ret;
5032 }
5033 
5034 static int icnss_pm_resume_noirq(struct device *dev)
5035 {
5036 	struct icnss_priv *priv = dev_get_drvdata(dev);
5037 	int ret = 0;
5038 
5039 	if (priv->magic != ICNSS_MAGIC) {
5040 		icnss_pr_err("Invalid drvdata for pm resume_noirq: dev %pK, data %pK, magic 0x%x\n",
5041 			     dev, priv, priv->magic);
5042 		return -EINVAL;
5043 	}
5044 
5045 	icnss_pr_vdbg("PM resume_noirq, state: 0x%lx\n", priv->state);
5046 
5047 	if (!priv->ops || !priv->ops->resume_noirq ||
5048 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
5049 		goto out;
5050 
5051 	ret = priv->ops->resume_noirq(dev);
5052 
5053 out:
5054 	if (ret == 0) {
5055 		priv->stats.pm_resume_noirq++;
5056 		clear_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
5057 	} else {
5058 		priv->stats.pm_resume_noirq_err++;
5059 	}
5060 	return ret;
5061 }
5062 
5063 static int icnss_pm_runtime_suspend(struct device *dev)
5064 {
5065 	struct icnss_priv *priv = dev_get_drvdata(dev);
5066 	int ret = 0;
5067 
5068 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
5069 		icnss_pr_err("Ignore runtime suspend:\n");
5070 		goto out;
5071 	}
5072 
5073 	if (priv->magic != ICNSS_MAGIC) {
5074 		icnss_pr_err("Invalid drvdata for runtime suspend: dev %pK, data %pK, magic 0x%x\n",
5075 			     dev, priv, priv->magic);
5076 		return -EINVAL;
5077 	}
5078 
5079 	if (!priv->ops || !priv->ops->runtime_suspend ||
5080 	    IS_ERR(priv->smp2p_info[ICNSS_SMP2P_OUT_POWER_SAVE].smem_state))
5081 		goto out;
5082 
5083 	icnss_pr_vdbg("Runtime suspend\n");
5084 	ret = priv->ops->runtime_suspend(dev);
5085 	if (!ret) {
5086 		if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
5087 		    !test_bit(ICNSS_MODE_ON, &priv->state))
5088 			return 0;
5089 
5090 		ret = icnss_send_smp2p(priv, ICNSS_POWER_SAVE_ENTER,
5091 				       ICNSS_SMP2P_OUT_POWER_SAVE);
5092 	}
5093 out:
5094 	return ret;
5095 }
5096 
5097 static int icnss_pm_runtime_resume(struct device *dev)
5098 {
5099 	struct icnss_priv *priv = dev_get_drvdata(dev);
5100 	int ret = 0;
5101 
5102 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
5103 		icnss_pr_err("Ignore runtime resume\n");
5104 		goto out;
5105 	}
5106 
5107 	if (priv->magic != ICNSS_MAGIC) {
5108 		icnss_pr_err("Invalid drvdata for runtime resume: dev %pK, data %pK, magic 0x%x\n",
5109 			     dev, priv, priv->magic);
5110 		return -EINVAL;
5111 	}
5112 
5113 	if (!priv->ops || !priv->ops->runtime_resume ||
5114 	    IS_ERR(priv->smp2p_info[ICNSS_SMP2P_OUT_POWER_SAVE].smem_state))
5115 		goto out;
5116 
5117 	icnss_pr_vdbg("Runtime resume, state: 0x%lx\n", priv->state);
5118 
5119 	ret = priv->ops->runtime_resume(dev);
5120 
5121 out:
5122 	return ret;
5123 }
5124 
5125 static int icnss_pm_runtime_idle(struct device *dev)
5126 {
5127 	struct icnss_priv *priv = dev_get_drvdata(dev);
5128 
5129 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
5130 		icnss_pr_err("Ignore runtime idle\n");
5131 		goto out;
5132 	}
5133 
5134 	icnss_pr_vdbg("Runtime idle\n");
5135 
5136 	pm_request_autosuspend(dev);
5137 
5138 out:
5139 	return -EBUSY;
5140 }
5141 #endif
5142 
5143 static const struct dev_pm_ops icnss_pm_ops = {
5144 	SET_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend,
5145 				icnss_pm_resume)
5146 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend_noirq,
5147 				      icnss_pm_resume_noirq)
5148 	SET_RUNTIME_PM_OPS(icnss_pm_runtime_suspend, icnss_pm_runtime_resume,
5149 			   icnss_pm_runtime_idle)
5150 };
5151 
5152 static struct platform_driver icnss_driver = {
5153 	.probe  = icnss_probe,
5154 	.remove = icnss_remove,
5155 	.driver = {
5156 		.name = "icnss2",
5157 		.pm = &icnss_pm_ops,
5158 		.of_match_table = icnss_dt_match,
5159 	},
5160 };
5161 
5162 static int __init icnss_initialize(void)
5163 {
5164 	icnss_debug_init();
5165 	return platform_driver_register(&icnss_driver);
5166 }
5167 
5168 static void __exit icnss_exit(void)
5169 {
5170 	platform_driver_unregister(&icnss_driver);
5171 	icnss_debug_deinit();
5172 }
5173 
5174 
5175 module_init(icnss_initialize);
5176 module_exit(icnss_exit);
5177 
5178 MODULE_LICENSE("GPL v2");
5179 MODULE_DESCRIPTION("iWCN CORE platform driver");
5180