1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2020, 2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) "icnss2: " fmt
8 
9 #include <linux/of_address.h>
10 #include <linux/clk.h>
11 #include <linux/iommu.h>
12 #include <linux/export.h>
13 #include <linux/err.h>
14 #include <linux/of.h>
15 #include <linux/of_device.h>
16 #include <linux/init.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/thread_info.h>
29 #include <linux/uaccess.h>
30 #include <linux/etherdevice.h>
31 #include <linux/of.h>
32 #include <linux/of_irq.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/soc/qcom/qmi.h>
35 #include <linux/sysfs.h>
36 #include <linux/thermal.h>
37 #include <soc/qcom/memory_dump.h>
38 #include <soc/qcom/secure_buffer.h>
39 #include <soc/qcom/socinfo.h>
40 #include <soc/qcom/qcom_ramdump.h>
41 #include <linux/soc/qcom/smem.h>
42 #include <linux/soc/qcom/smem_state.h>
43 #include <linux/remoteproc.h>
44 #include <linux/remoteproc/qcom_rproc.h>
45 #include <linux/soc/qcom/pdr.h>
46 #include <linux/remoteproc.h>
47 #include <linux/version.h>
48 #include <trace/hooks/remoteproc.h>
49 #ifdef CONFIG_SLATE_MODULE_ENABLED
50 #include <linux/soc/qcom/slatecom_interface.h>
51 #include <linux/soc/qcom/slate_events_bridge_intf.h>
52 #include <uapi/linux/slatecom_interface.h>
53 #endif
54 #include "main.h"
55 #include "qmi.h"
56 #include "debug.h"
57 #include "power.h"
58 #include "genl.h"
59 
60 #define MAX_PROP_SIZE			32
61 #define NUM_LOG_PAGES			10
62 #define NUM_LOG_LONG_PAGES		4
63 #define ICNSS_MAGIC			0x5abc5abc
64 
65 #define ICNSS_WLAN_SERVICE_NAME					"wlan/fw"
66 #define ICNSS_WLANPD_NAME					"msm/modem/wlan_pd"
67 #define ICNSS_DEFAULT_FEATURE_MASK 0x01
68 
69 #define ICNSS_M3_SEGMENT(segment)		"wcnss_"segment
70 #define ICNSS_M3_SEGMENT_PHYAREG		"phyareg"
71 #define ICNSS_M3_SEGMENT_PHYA			"phydbg"
72 #define ICNSS_M3_SEGMENT_WMACREG		"wmac0reg"
73 #define ICNSS_M3_SEGMENT_WCSSDBG		"WCSSDBG"
74 #define ICNSS_M3_SEGMENT_PHYAM3			"PHYAPDMEM"
75 
76 #define ICNSS_QUIRKS_DEFAULT		BIT(FW_REJUVENATE_ENABLE)
77 #define ICNSS_MAX_PROBE_CNT		2
78 
79 #define ICNSS_BDF_TYPE_DEFAULT         ICNSS_BDF_ELF
80 
81 #define PROBE_TIMEOUT                 15000
82 #define SMP2P_SOC_WAKE_TIMEOUT        500
83 #ifdef CONFIG_ICNSS2_DEBUG
84 static unsigned long qmi_timeout = 3000;
85 module_param(qmi_timeout, ulong, 0600);
86 #define WLFW_TIMEOUT                    msecs_to_jiffies(qmi_timeout)
87 #else
88 #define WLFW_TIMEOUT                    msecs_to_jiffies(3000)
89 #endif
90 
91 #define ICNSS_RECOVERY_TIMEOUT		60000
92 #define ICNSS_WPSS_SSR_TIMEOUT          5000
93 #define ICNSS_CAL_TIMEOUT		40000
94 
95 static struct icnss_priv *penv;
96 static struct work_struct wpss_loader;
97 static struct work_struct wpss_ssr_work;
98 uint64_t dynamic_feature_mask = ICNSS_DEFAULT_FEATURE_MASK;
99 
100 #define ICNSS_EVENT_PENDING			2989
101 
102 #define ICNSS_EVENT_SYNC			BIT(0)
103 #define ICNSS_EVENT_UNINTERRUPTIBLE		BIT(1)
104 #define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE	(ICNSS_EVENT_UNINTERRUPTIBLE | \
105 						 ICNSS_EVENT_SYNC)
106 #define ICNSS_DMS_QMI_CONNECTION_WAIT_MS 50
107 #define ICNSS_DMS_QMI_CONNECTION_WAIT_RETRY 200
108 
109 #define SMP2P_GET_MAX_RETRY		4
110 #define SMP2P_GET_RETRY_DELAY_MS	500
111 
112 #define RAMDUMP_NUM_DEVICES		256
113 #define ICNSS_RAMDUMP_NAME		"icnss_ramdump"
114 
115 #define WLAN_EN_TEMP_THRESHOLD		5000
116 #define WLAN_EN_DELAY			500
117 
118 #define ICNSS_RPROC_LEN			100
119 static DEFINE_IDA(rd_minor_id);
120 
121 enum icnss_pdr_cause_index {
122 	ICNSS_FW_CRASH,
123 	ICNSS_ROOT_PD_CRASH,
124 	ICNSS_ROOT_PD_SHUTDOWN,
125 	ICNSS_HOST_ERROR,
126 };
127 
128 static const char * const icnss_pdr_cause[] = {
129 	[ICNSS_FW_CRASH] = "FW crash",
130 	[ICNSS_ROOT_PD_CRASH] = "Root PD crashed",
131 	[ICNSS_ROOT_PD_SHUTDOWN] = "Root PD shutdown",
132 	[ICNSS_HOST_ERROR] = "Host error",
133 };
134 
icnss_set_plat_priv(struct icnss_priv * priv)135 static void icnss_set_plat_priv(struct icnss_priv *priv)
136 {
137 	penv = priv;
138 }
139 
icnss_get_plat_priv(void)140 static struct icnss_priv *icnss_get_plat_priv(void)
141 {
142 	return penv;
143 }
144 
icnss_wpss_unload(struct icnss_priv * priv)145 static inline void icnss_wpss_unload(struct icnss_priv *priv)
146 {
147 	if (priv && priv->rproc) {
148 		rproc_shutdown(priv->rproc);
149 		rproc_put(priv->rproc);
150 		priv->rproc = NULL;
151 	}
152 }
153 
icnss_sysfs_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)154 static ssize_t icnss_sysfs_store(struct kobject *kobj,
155 				 struct kobj_attribute *attr,
156 				 const char *buf, size_t count)
157 {
158 	struct icnss_priv *priv = icnss_get_plat_priv();
159 
160 	if (!priv)
161 		return count;
162 
163 	icnss_pr_dbg("Received shutdown indication");
164 
165 	atomic_set(&priv->is_shutdown, true);
166 	if ((priv->wpss_supported || priv->rproc_fw_download) &&
167 	    priv->device_id == ADRASTEA_DEVICE_ID)
168 		icnss_wpss_unload(priv);
169 	return count;
170 }
171 
172 static struct kobj_attribute icnss_sysfs_attribute =
173 __ATTR(shutdown, 0660, NULL, icnss_sysfs_store);
174 
icnss_pm_stay_awake(struct icnss_priv * priv)175 static void icnss_pm_stay_awake(struct icnss_priv *priv)
176 {
177 	if (atomic_inc_return(&priv->pm_count) != 1)
178 		return;
179 
180 	icnss_pr_vdbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
181 		     atomic_read(&priv->pm_count));
182 
183 	pm_stay_awake(&priv->pdev->dev);
184 
185 	priv->stats.pm_stay_awake++;
186 }
187 
icnss_pm_relax(struct icnss_priv * priv)188 static void icnss_pm_relax(struct icnss_priv *priv)
189 {
190 	int r = atomic_dec_return(&priv->pm_count);
191 
192 	WARN_ON(r < 0);
193 
194 	if (r != 0)
195 		return;
196 
197 	icnss_pr_vdbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
198 		     atomic_read(&priv->pm_count));
199 
200 	pm_relax(&priv->pdev->dev);
201 	priv->stats.pm_relax++;
202 }
203 
icnss_driver_event_to_str(enum icnss_driver_event_type type)204 char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
205 {
206 	switch (type) {
207 	case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
208 		return "SERVER_ARRIVE";
209 	case ICNSS_DRIVER_EVENT_SERVER_EXIT:
210 		return "SERVER_EXIT";
211 	case ICNSS_DRIVER_EVENT_FW_READY_IND:
212 		return "FW_READY";
213 	case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
214 		return "REGISTER_DRIVER";
215 	case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
216 		return "UNREGISTER_DRIVER";
217 	case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
218 		return "PD_SERVICE_DOWN";
219 	case ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND:
220 		return "FW_EARLY_CRASH_IND";
221 	case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
222 		return "IDLE_SHUTDOWN";
223 	case ICNSS_DRIVER_EVENT_IDLE_RESTART:
224 		return "IDLE_RESTART";
225 	case ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND:
226 		return "FW_INIT_DONE";
227 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
228 		return "QDSS_TRACE_REQ_MEM";
229 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
230 		return "QDSS_TRACE_SAVE";
231 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
232 		return "QDSS_TRACE_FREE";
233 	case ICNSS_DRIVER_EVENT_M3_DUMP_UPLOAD_REQ:
234 		return "M3_DUMP_UPLOAD";
235 	case ICNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
236 		return "IMS_WFC_CALL_IND";
237 	case ICNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
238 		return "WLFW_TWC_CFG_IND";
239 	case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
240 		return "QDSS_TRACE_REQ_DATA";
241 	case ICNSS_DRIVER_EVENT_SUBSYS_RESTART_LEVEL:
242 		return "SUBSYS_RESTART_LEVEL";
243 	case ICNSS_DRIVER_EVENT_MAX:
244 		return "EVENT_MAX";
245 	}
246 
247 	return "UNKNOWN";
248 };
249 
icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type)250 char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type)
251 {
252 	switch (type) {
253 	case ICNSS_SOC_WAKE_REQUEST_EVENT:
254 		return "SOC_WAKE_REQUEST";
255 	case ICNSS_SOC_WAKE_RELEASE_EVENT:
256 		return "SOC_WAKE_RELEASE";
257 	case ICNSS_SOC_WAKE_EVENT_MAX:
258 		return "SOC_EVENT_MAX";
259 	}
260 
261 	return "UNKNOWN";
262 };
263 
icnss_driver_event_post(struct icnss_priv * priv,enum icnss_driver_event_type type,u32 flags,void * data)264 int icnss_driver_event_post(struct icnss_priv *priv,
265 			    enum icnss_driver_event_type type,
266 			    u32 flags, void *data)
267 {
268 	struct icnss_driver_event *event;
269 	unsigned long irq_flags;
270 	int gfp = GFP_KERNEL;
271 	int ret = 0;
272 
273 	if (!priv)
274 		return -ENODEV;
275 
276 	icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
277 		     icnss_driver_event_to_str(type), type, current->comm,
278 		     flags, priv->state);
279 
280 	if (type >= ICNSS_DRIVER_EVENT_MAX) {
281 		icnss_pr_err("Invalid Event type: %d, can't post", type);
282 		return -EINVAL;
283 	}
284 
285 	if (in_interrupt() || !preemptible() || rcu_preempt_depth())
286 		gfp = GFP_ATOMIC;
287 
288 	event = kzalloc(sizeof(*event), gfp);
289 	if (event == NULL)
290 		return -ENOMEM;
291 
292 	icnss_pm_stay_awake(priv);
293 
294 	event->type = type;
295 	event->data = data;
296 	init_completion(&event->complete);
297 	event->ret = ICNSS_EVENT_PENDING;
298 	event->sync = !!(flags & ICNSS_EVENT_SYNC);
299 
300 	spin_lock_irqsave(&priv->event_lock, irq_flags);
301 	list_add_tail(&event->list, &priv->event_list);
302 	spin_unlock_irqrestore(&priv->event_lock, irq_flags);
303 
304 	priv->stats.events[type].posted++;
305 	queue_work(priv->event_wq, &priv->event_work);
306 
307 	if (!(flags & ICNSS_EVENT_SYNC))
308 		goto out;
309 
310 	if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
311 		wait_for_completion(&event->complete);
312 	else
313 		ret = wait_for_completion_interruptible(&event->complete);
314 
315 	icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
316 		     icnss_driver_event_to_str(type), type, priv->state, ret,
317 		     event->ret);
318 
319 	spin_lock_irqsave(&priv->event_lock, irq_flags);
320 	if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
321 		event->sync = false;
322 		spin_unlock_irqrestore(&priv->event_lock, irq_flags);
323 		ret = -EINTR;
324 		goto out;
325 	}
326 	spin_unlock_irqrestore(&priv->event_lock, irq_flags);
327 
328 	ret = event->ret;
329 	kfree(event);
330 
331 out:
332 	icnss_pm_relax(priv);
333 	return ret;
334 }
335 
icnss_soc_wake_event_post(struct icnss_priv * priv,enum icnss_soc_wake_event_type type,u32 flags,void * data)336 int icnss_soc_wake_event_post(struct icnss_priv *priv,
337 			      enum icnss_soc_wake_event_type type,
338 			      u32 flags, void *data)
339 {
340 	struct icnss_soc_wake_event *event;
341 	unsigned long irq_flags;
342 	int gfp = GFP_KERNEL;
343 	int ret = 0;
344 
345 	if (!priv)
346 		return -ENODEV;
347 
348 	icnss_pr_soc_wake("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
349 			  icnss_soc_wake_event_to_str(type),
350 			  type, current->comm, flags, priv->state);
351 
352 	if (type >= ICNSS_SOC_WAKE_EVENT_MAX) {
353 		icnss_pr_err("Invalid Event type: %d, can't post", type);
354 		return -EINVAL;
355 	}
356 
357 	if (in_interrupt() || irqs_disabled())
358 		gfp = GFP_ATOMIC;
359 
360 	event = kzalloc(sizeof(*event), gfp);
361 	if (!event)
362 		return -ENOMEM;
363 
364 	icnss_pm_stay_awake(priv);
365 
366 	event->type = type;
367 	event->data = data;
368 	init_completion(&event->complete);
369 	event->ret = ICNSS_EVENT_PENDING;
370 	event->sync = !!(flags & ICNSS_EVENT_SYNC);
371 
372 	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
373 	list_add_tail(&event->list, &priv->soc_wake_msg_list);
374 	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
375 
376 	priv->stats.soc_wake_events[type].posted++;
377 	queue_work(priv->soc_wake_wq, &priv->soc_wake_msg_work);
378 
379 	if (!(flags & ICNSS_EVENT_SYNC))
380 		goto out;
381 
382 	if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
383 		wait_for_completion(&event->complete);
384 	else
385 		ret = wait_for_completion_interruptible(&event->complete);
386 
387 	icnss_pr_soc_wake("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
388 			  icnss_soc_wake_event_to_str(type),
389 			  type, priv->state, ret, event->ret);
390 
391 	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
392 	if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
393 		event->sync = false;
394 		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
395 		ret = -EINTR;
396 		goto out;
397 	}
398 	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
399 
400 	ret = event->ret;
401 	kfree(event);
402 
403 out:
404 	icnss_pm_relax(priv);
405 	return ret;
406 }
407 
icnss_is_fw_ready(void)408 bool icnss_is_fw_ready(void)
409 {
410 	if (!penv)
411 		return false;
412 	else
413 		return test_bit(ICNSS_FW_READY, &penv->state);
414 }
415 EXPORT_SYMBOL(icnss_is_fw_ready);
416 
icnss_block_shutdown(bool status)417 void icnss_block_shutdown(bool status)
418 {
419 	if (!penv)
420 		return;
421 
422 	if (status) {
423 		set_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
424 		reinit_completion(&penv->unblock_shutdown);
425 	} else {
426 		clear_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
427 		complete(&penv->unblock_shutdown);
428 	}
429 }
430 EXPORT_SYMBOL(icnss_block_shutdown);
431 
icnss_is_fw_down(void)432 bool icnss_is_fw_down(void)
433 {
434 
435 	struct icnss_priv *priv = icnss_get_plat_priv();
436 
437 	if (!priv)
438 		return false;
439 
440 	return test_bit(ICNSS_FW_DOWN, &priv->state) ||
441 		test_bit(ICNSS_PD_RESTART, &priv->state) ||
442 		test_bit(ICNSS_REJUVENATE, &priv->state);
443 }
444 EXPORT_SYMBOL(icnss_is_fw_down);
445 
icnss_get_device_config(void)446 unsigned long icnss_get_device_config(void)
447 {
448 	struct icnss_priv *priv = icnss_get_plat_priv();
449 
450 	if (!priv)
451 		return 0;
452 
453 	return priv->device_config;
454 }
455 EXPORT_SYMBOL(icnss_get_device_config);
456 
icnss_is_rejuvenate(void)457 bool icnss_is_rejuvenate(void)
458 {
459 	if (!penv)
460 		return false;
461 	else
462 		return test_bit(ICNSS_REJUVENATE, &penv->state);
463 }
464 EXPORT_SYMBOL(icnss_is_rejuvenate);
465 
icnss_is_pdr(void)466 bool icnss_is_pdr(void)
467 {
468 	if (!penv)
469 		return false;
470 	else
471 		return test_bit(ICNSS_PDR, &penv->state);
472 }
473 EXPORT_SYMBOL(icnss_is_pdr);
474 
icnss_is_smp2p_valid(struct icnss_priv * priv,enum smp2p_out_entry smp2p_entry)475 static bool icnss_is_smp2p_valid(struct icnss_priv *priv,
476 			  enum smp2p_out_entry smp2p_entry)
477 {
478 	if (priv->device_id == WCN6750_DEVICE_ID ||
479 	    priv->device_id == WCN6450_DEVICE_ID ||
480 	    priv->wpss_supported)
481 		return IS_ERR_OR_NULL(priv->smp2p_info[smp2p_entry].smem_state);
482 	else
483 		return 0;
484 }
485 
icnss_send_smp2p(struct icnss_priv * priv,enum icnss_smp2p_msg_id msg_id,enum smp2p_out_entry smp2p_entry)486 static int icnss_send_smp2p(struct icnss_priv *priv,
487 			    enum icnss_smp2p_msg_id msg_id,
488 			    enum smp2p_out_entry smp2p_entry)
489 {
490 	unsigned int value = 0;
491 	int ret;
492 
493 	if (!priv || icnss_is_smp2p_valid(priv, smp2p_entry))
494 		return -EINVAL;
495 
496 	/* No Need to check FW_DOWN for ICNSS_RESET_MSG */
497 	if (msg_id == ICNSS_RESET_MSG) {
498 		priv->smp2p_info[smp2p_entry].seq = 0;
499 		ret = qcom_smem_state_update_bits(
500 				priv->smp2p_info[smp2p_entry].smem_state,
501 				ICNSS_SMEM_VALUE_MASK,
502 				0);
503 		if (ret)
504 			icnss_pr_err("Error in SMP2P sent. ret: %d, %s\n",
505 				     ret, icnss_smp2p_str[smp2p_entry]);
506 
507 		return ret;
508 	}
509 
510 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
511 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
512 		icnss_pr_smp2p("FW down, ignoring sending SMP2P state: 0x%lx\n",
513 				  priv->state);
514 		return -EINVAL;
515 	}
516 
517 	value |= priv->smp2p_info[smp2p_entry].seq++;
518 	value <<= ICNSS_SMEM_SEQ_NO_POS;
519 	value |= msg_id;
520 
521 	icnss_pr_smp2p("Sending SMP2P value: 0x%X\n", value);
522 
523 	if (msg_id == ICNSS_SOC_WAKE_REQ || msg_id == ICNSS_SOC_WAKE_REL)
524 		reinit_completion(&penv->smp2p_soc_wake_wait);
525 
526 	ret = qcom_smem_state_update_bits(
527 			priv->smp2p_info[smp2p_entry].smem_state,
528 			ICNSS_SMEM_VALUE_MASK,
529 			value);
530 	if (ret) {
531 		icnss_pr_smp2p("Error in SMP2P send ret: %d, %s\n", ret,
532 			       icnss_smp2p_str[smp2p_entry]);
533 	} else {
534 		if (msg_id == ICNSS_SOC_WAKE_REQ ||
535 		    msg_id == ICNSS_SOC_WAKE_REL) {
536 			if (!wait_for_completion_timeout(
537 					&priv->smp2p_soc_wake_wait,
538 					msecs_to_jiffies(SMP2P_SOC_WAKE_TIMEOUT))) {
539 				icnss_pr_err("SMP2P Soc Wake timeout msg %d, %s\n", msg_id,
540 					     icnss_smp2p_str[smp2p_entry]);
541 				if (!test_bit(ICNSS_FW_DOWN, &priv->state))
542 					ICNSS_ASSERT(0);
543 			}
544 		}
545 	}
546 
547 	return ret;
548 }
549 
icnss_is_low_power(void)550 bool icnss_is_low_power(void)
551 {
552 	if (!penv)
553 		return false;
554 	else
555 		return test_bit(ICNSS_LOW_POWER, &penv->state);
556 }
557 EXPORT_SYMBOL(icnss_is_low_power);
558 
fw_error_fatal_handler(int irq,void * ctx)559 static irqreturn_t fw_error_fatal_handler(int irq, void *ctx)
560 {
561 	struct icnss_priv *priv = ctx;
562 
563 	if (priv)
564 		priv->force_err_fatal = true;
565 
566 	icnss_pr_err("Received force error fatal request from FW\n");
567 
568 	return IRQ_HANDLED;
569 }
570 
fw_crash_indication_handler(int irq,void * ctx)571 static irqreturn_t fw_crash_indication_handler(int irq, void *ctx)
572 {
573 	struct icnss_priv *priv = ctx;
574 	struct icnss_uevent_fw_down_data fw_down_data = {0};
575 
576 	icnss_pr_err("Received early crash indication from FW\n");
577 
578 	if (priv) {
579 		if (priv->wpss_self_recovery_enabled)
580 			mod_timer(&priv->wpss_ssr_timer,
581 				  jiffies + msecs_to_jiffies(ICNSS_WPSS_SSR_TIMEOUT));
582 
583 		set_bit(ICNSS_FW_DOWN, &priv->state);
584 		icnss_ignore_fw_timeout(true);
585 
586 		if (test_bit(ICNSS_FW_READY, &priv->state)) {
587 			clear_bit(ICNSS_FW_READY, &priv->state);
588 			fw_down_data.crashed = true;
589 			icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
590 						 &fw_down_data);
591 		}
592 	}
593 
594 	icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND,
595 				0, NULL);
596 
597 	return IRQ_HANDLED;
598 }
599 
register_fw_error_notifications(struct device * dev)600 static void register_fw_error_notifications(struct device *dev)
601 {
602 	struct icnss_priv *priv = dev_get_drvdata(dev);
603 	struct device_node *dev_node;
604 	int irq = 0, ret = 0;
605 
606 	if (!priv)
607 		return;
608 
609 	dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in");
610 	if (!dev_node) {
611 		icnss_pr_err("Failed to get smp2p node for force-fatal-error\n");
612 		return;
613 	}
614 
615 	icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
616 
617 	if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) {
618 		ret = irq = of_irq_get_byname(dev_node,
619 					      "qcom,smp2p-force-fatal-error");
620 		if (ret < 0) {
621 			icnss_pr_err("Unable to get force-fatal-error irq %d\n",
622 				     irq);
623 			return;
624 		}
625 	}
626 
627 	ret = devm_request_threaded_irq(dev, irq, NULL, fw_error_fatal_handler,
628 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
629 					"wlanfw-err", priv);
630 	if (ret < 0) {
631 		icnss_pr_err("Unable to register for error fatal IRQ handler %d ret = %d",
632 			     irq, ret);
633 		return;
634 	}
635 	icnss_pr_dbg("FW force error fatal handler registered irq = %d\n", irq);
636 	priv->fw_error_fatal_irq = irq;
637 }
638 
register_early_crash_notifications(struct device * dev)639 static void register_early_crash_notifications(struct device *dev)
640 {
641 	struct icnss_priv *priv = dev_get_drvdata(dev);
642 	struct device_node *dev_node;
643 	int irq = 0, ret = 0;
644 
645 	if (!priv)
646 		return;
647 
648 	dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_1_in");
649 	if (!dev_node) {
650 		icnss_pr_err("Failed to get smp2p node for early-crash-ind\n");
651 		return;
652 	}
653 
654 	icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
655 
656 	if (strcmp("qcom,smp2p_map_wlan_1_in", dev_node->name) == 0) {
657 		ret = irq = of_irq_get_byname(dev_node,
658 					      "qcom,smp2p-early-crash-ind");
659 		if (ret < 0) {
660 			icnss_pr_err("Unable to get early-crash-ind irq %d\n",
661 				     irq);
662 			return;
663 		}
664 	}
665 
666 	ret = devm_request_threaded_irq(dev, irq, NULL,
667 					fw_crash_indication_handler,
668 					IRQF_ONESHOT | IRQF_TRIGGER_RISING,
669 					"wlanfw-early-crash-ind", priv);
670 	if (ret < 0) {
671 		icnss_pr_err("Unable to register for early crash indication IRQ handler %d ret = %d",
672 			     irq, ret);
673 		return;
674 	}
675 	icnss_pr_dbg("FW crash indication handler registered irq = %d\n", irq);
676 	priv->fw_early_crash_irq = irq;
677 }
678 
icnss_get_temperature(struct icnss_priv * priv,int * temp)679 static int icnss_get_temperature(struct icnss_priv *priv, int *temp)
680 {
681 	struct thermal_zone_device *thermal_dev;
682 	const char *tsens;
683 	int ret;
684 
685 	ret = of_property_read_string(priv->pdev->dev.of_node,
686 				      "tsens",
687 				      &tsens);
688 	if (ret)
689 		return ret;
690 
691 	icnss_pr_dbg("Thermal Sensor is %s\n", tsens);
692 	thermal_dev = thermal_zone_get_zone_by_name(tsens);
693 	if (IS_ERR_OR_NULL(thermal_dev)) {
694 		icnss_pr_err("Fail to get thermal zone. ret: %d",
695 			     PTR_ERR(thermal_dev));
696 		return PTR_ERR(thermal_dev);
697 	}
698 
699 	ret = thermal_zone_get_temp(thermal_dev, temp);
700 	if (ret)
701 		icnss_pr_err("Fail to get temperature. ret: %d", ret);
702 
703 	return ret;
704 }
705 
fw_soc_wake_ack_handler(int irq,void * ctx)706 static irqreturn_t fw_soc_wake_ack_handler(int irq, void *ctx)
707 {
708 	struct icnss_priv *priv = ctx;
709 
710 	if (priv)
711 		complete(&priv->smp2p_soc_wake_wait);
712 
713 	return IRQ_HANDLED;
714 }
715 
register_soc_wake_notif(struct device * dev)716 static void register_soc_wake_notif(struct device *dev)
717 {
718 	struct icnss_priv *priv = dev_get_drvdata(dev);
719 	struct device_node *dev_node;
720 	int irq = 0, ret = 0;
721 
722 	if (!priv)
723 		return;
724 
725 	dev_node = of_find_node_by_name(NULL, "qcom,smp2p_map_wlan_2_in");
726 	if (!dev_node) {
727 		icnss_pr_err("Failed to get smp2p node for soc-wake-ack\n");
728 		return;
729 	}
730 
731 	icnss_pr_dbg("smp2p node->name=%s\n", dev_node->name);
732 
733 	if (strcmp("qcom,smp2p_map_wlan_2_in", dev_node->name) == 0) {
734 		ret = irq = of_irq_get_byname(dev_node,
735 					      "qcom,smp2p-soc-wake-ack");
736 		if (ret < 0) {
737 			icnss_pr_err("Unable to get soc wake ack irq %d\n",
738 				     irq);
739 			return;
740 		}
741 	}
742 
743 	ret = devm_request_threaded_irq(dev, irq, NULL,
744 					fw_soc_wake_ack_handler,
745 					IRQF_ONESHOT | IRQF_TRIGGER_RISING |
746 					IRQF_TRIGGER_FALLING,
747 					"wlanfw-soc-wake-ack", priv);
748 	if (ret < 0) {
749 		icnss_pr_err("Unable to register for SOC Wake ACK IRQ handler %d ret = %d",
750 			     irq, ret);
751 		return;
752 	}
753 	icnss_pr_dbg("FW SOC Wake ACK handler registered irq = %d\n", irq);
754 	priv->fw_soc_wake_ack_irq = irq;
755 }
756 
757 
icnss_call_driver_uevent(struct icnss_priv * priv,enum icnss_uevent uevent,void * data)758 int icnss_call_driver_uevent(struct icnss_priv *priv,
759 				    enum icnss_uevent uevent, void *data)
760 {
761 	struct icnss_uevent_data uevent_data;
762 
763 	if (!priv->ops || !priv->ops->uevent)
764 		return 0;
765 
766 	icnss_pr_dbg("Calling driver uevent state: 0x%lx, uevent: %d\n",
767 		     priv->state, uevent);
768 
769 	uevent_data.uevent = uevent;
770 	uevent_data.data = data;
771 
772 	return priv->ops->uevent(&priv->pdev->dev, &uevent_data);
773 }
774 
icnss_setup_dms_mac(struct icnss_priv * priv)775 static int icnss_setup_dms_mac(struct icnss_priv *priv)
776 {
777 	int i;
778 	int ret = 0;
779 
780 	ret = icnss_qmi_get_dms_mac(priv);
781 	if (ret == 0 && priv->dms.mac_valid)
782 		goto qmi_send;
783 
784 	/* DTSI property use-nv-mac is used to force DMS MAC address for WLAN.
785 	 * Thus assert on failure to get MAC from DMS even after retries
786 	 */
787 	if (priv->use_nv_mac) {
788 		for (i = 0; i < ICNSS_DMS_QMI_CONNECTION_WAIT_RETRY; i++) {
789 			if (priv->dms.mac_valid)
790 				break;
791 
792 			ret = icnss_qmi_get_dms_mac(priv);
793 			if (ret != -EAGAIN)
794 				break;
795 			msleep(ICNSS_DMS_QMI_CONNECTION_WAIT_MS);
796 		}
797 		if (!priv->dms.nv_mac_not_prov && !priv->dms.mac_valid) {
798 			icnss_pr_err("Unable to get MAC from DMS after retries\n");
799 			ICNSS_ASSERT(0);
800 			return -EINVAL;
801 		}
802 	}
803 qmi_send:
804 	if (priv->dms.mac_valid)
805 		ret =
806 		icnss_wlfw_wlan_mac_req_send_sync(priv, priv->dms.mac,
807 						  ARRAY_SIZE(priv->dms.mac));
808 	return ret;
809 }
810 
icnss_get_smp2p_info(struct icnss_priv * priv,enum smp2p_out_entry smp2p_entry)811 static void icnss_get_smp2p_info(struct icnss_priv *priv,
812 				 enum smp2p_out_entry smp2p_entry)
813 {
814 	int retry = 0;
815 	int error;
816 
817 	if (priv->smp2p_info[smp2p_entry].smem_state)
818 		return;
819 retry:
820 	priv->smp2p_info[smp2p_entry].smem_state =
821 		qcom_smem_state_get(&priv->pdev->dev,
822 				    icnss_smp2p_str[smp2p_entry],
823 				    &priv->smp2p_info[smp2p_entry].smem_bit);
824 	if (icnss_is_smp2p_valid(priv, smp2p_entry)) {
825 		if (retry++ < SMP2P_GET_MAX_RETRY) {
826 			error = PTR_ERR(priv->smp2p_info[smp2p_entry].smem_state);
827 			icnss_pr_err("Failed to get smem state, ret: %d Entry: %s",
828 				     error, icnss_smp2p_str[smp2p_entry]);
829 			msleep(SMP2P_GET_RETRY_DELAY_MS);
830 			goto retry;
831 		}
832 		ICNSS_ASSERT(0);
833 		return;
834 	}
835 
836 	icnss_pr_dbg("smem state, Entry: %s", icnss_smp2p_str[smp2p_entry]);
837 }
838 
839 static inline
icnss_set_wlan_en_delay(struct icnss_priv * priv)840 void icnss_set_wlan_en_delay(struct icnss_priv *priv)
841 {
842 	if (priv->wlan_en_delay_ms_user > WLAN_EN_DELAY) {
843 		priv->wlan_en_delay_ms = priv->wlan_en_delay_ms_user;
844 	} else {
845 		priv->wlan_en_delay_ms = WLAN_EN_DELAY;
846 	}
847 }
848 
icnss_rf_subtype_value_to_type(u32 val)849 static enum wlfw_wlan_rf_subtype_v01 icnss_rf_subtype_value_to_type(u32 val)
850 {
851 	switch (val) {
852 	case WLAN_RF_SLATE:
853 		return WLFW_WLAN_RF_SLATE_V01;
854 	case WLAN_RF_APACHE:
855 		return WLFW_WLAN_RF_APACHE_V01;
856 	default:
857 		return WLFW_WLAN_RF_SUBTYPE_MAX_VAL_V01;
858 	}
859 }
860 
861 #ifdef CONFIG_SLATE_MODULE_ENABLED
icnss_send_wlan_boot_init(void)862 static void icnss_send_wlan_boot_init(void)
863 {
864 	send_wlan_state(GMI_MGR_WLAN_BOOT_INIT);
865 	icnss_pr_info("sent wlan boot init command\n");
866 }
867 
icnss_send_wlan_boot_complete(void)868 static void icnss_send_wlan_boot_complete(void)
869 {
870 	send_wlan_state(GMI_MGR_WLAN_BOOT_COMPLETE);
871 	icnss_pr_info("sent wlan boot complete command\n");
872 }
873 
icnss_wait_for_slate_complete(struct icnss_priv * priv)874 static int icnss_wait_for_slate_complete(struct icnss_priv *priv)
875 {
876 	if (!test_bit(ICNSS_SLATE_UP, &priv->state)) {
877 		reinit_completion(&priv->slate_boot_complete);
878 		icnss_pr_err("Waiting for slate boot up notification, 0x%lx\n",
879 			     priv->state);
880 		wait_for_completion(&priv->slate_boot_complete);
881 	}
882 
883 	if (!test_bit(ICNSS_SLATE_UP, &priv->state))
884 		return -EINVAL;
885 
886 	icnss_send_wlan_boot_init();
887 
888 	return 0;
889 }
890 #else
icnss_send_wlan_boot_complete(void)891 static void icnss_send_wlan_boot_complete(void)
892 {
893 }
894 
icnss_wait_for_slate_complete(struct icnss_priv * priv)895 static int icnss_wait_for_slate_complete(struct icnss_priv *priv)
896 {
897 	return 0;
898 }
899 #endif
900 
icnss_driver_event_server_arrive(struct icnss_priv * priv,void * data)901 static int icnss_driver_event_server_arrive(struct icnss_priv *priv,
902 						 void *data)
903 {
904 	int ret = 0;
905 	int temp = 0;
906 	bool ignore_assert = false;
907 	enum wlfw_wlan_rf_subtype_v01 rf_subtype;
908 
909 	if (!priv)
910 		return -ENODEV;
911 
912 	set_bit(ICNSS_WLFW_EXISTS, &priv->state);
913 	clear_bit(ICNSS_FW_DOWN, &priv->state);
914 	clear_bit(ICNSS_FW_READY, &priv->state);
915 
916 	if (priv->is_slate_rfa) {
917 		ret = icnss_wait_for_slate_complete(priv);
918 		if (ret == -EINVAL) {
919 			icnss_pr_err("Slate complete failed\n");
920 			return ret;
921 		}
922 	}
923 
924 	icnss_ignore_fw_timeout(false);
925 
926 	if (test_bit(ICNSS_WLFW_CONNECTED, &priv->state)) {
927 		icnss_pr_err("QMI Server already in Connected State\n");
928 		ICNSS_ASSERT(0);
929 	}
930 
931 	ret = icnss_connect_to_fw_server(priv, data);
932 	if (ret)
933 		goto fail;
934 
935 	set_bit(ICNSS_WLFW_CONNECTED, &priv->state);
936 
937 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
938 		ret = icnss_hw_power_on(priv);
939 		if (ret)
940 			goto fail;
941 	}
942 
943 	ret = wlfw_ind_register_send_sync_msg(priv);
944 	if (ret < 0) {
945 		if (ret == -EALREADY) {
946 			ret = 0;
947 			goto qmi_registered;
948 		}
949 		ignore_assert = true;
950 		goto fail;
951 	}
952 
953 	if (priv->is_rf_subtype_valid) {
954 		rf_subtype = icnss_rf_subtype_value_to_type(priv->rf_subtype);
955 		if (rf_subtype != WLFW_WLAN_RF_SUBTYPE_MAX_VAL_V01) {
956 			ret = wlfw_wlan_hw_init_cfg_msg(priv, rf_subtype);
957 			if (ret < 0)
958 				icnss_pr_dbg("Sending rf_subtype failed ret %d\n",
959 					     ret);
960 		} else {
961 			icnss_pr_dbg("Invalid rf subtype %d in DT\n",
962 				     priv->rf_subtype);
963 		}
964 	}
965 
966 	if (priv->device_id == WCN6750_DEVICE_ID ||
967 	    priv->device_id == WCN6450_DEVICE_ID) {
968 		if (!icnss_get_temperature(priv, &temp)) {
969 			icnss_pr_dbg("Temperature: %d\n", temp);
970 			if (temp < WLAN_EN_TEMP_THRESHOLD)
971 				icnss_set_wlan_en_delay(priv);
972 		}
973 
974 		ret = wlfw_host_cap_send_sync(priv);
975 		if (ret < 0)
976 			goto fail;
977 	}
978 
979 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
980 		if (!priv->msa_va) {
981 			icnss_pr_err("Invalid MSA address\n");
982 			ret = -EINVAL;
983 			goto fail;
984 		}
985 
986 		ret = wlfw_msa_mem_info_send_sync_msg(priv);
987 		if (ret < 0) {
988 			ignore_assert = true;
989 			goto fail;
990 		}
991 
992 		ret = wlfw_msa_ready_send_sync_msg(priv);
993 		if (ret < 0) {
994 			ignore_assert = true;
995 			goto fail;
996 		}
997 	}
998 
999 	if (priv->device_id == WCN6450_DEVICE_ID)
1000 		icnss_hw_power_off(priv);
1001 
1002 	ret = wlfw_cap_send_sync_msg(priv);
1003 	if (ret < 0) {
1004 		ignore_assert = true;
1005 		goto fail;
1006 	}
1007 
1008 	if (priv->device_id == ADRASTEA_DEVICE_ID && priv->is_chain1_supported) {
1009 		ret = icnss_power_on_chain1_reg(priv);
1010 		if (ret) {
1011 			ignore_assert = true;
1012 			goto fail;
1013 		}
1014 	}
1015 
1016 	if (priv->device_id == WCN6750_DEVICE_ID ||
1017 	    priv->device_id == WCN6450_DEVICE_ID) {
1018 		ret = icnss_hw_power_on(priv);
1019 		if (ret)
1020 			goto fail;
1021 
1022 		ret = wlfw_device_info_send_msg(priv);
1023 		if (ret < 0) {
1024 			ignore_assert = true;
1025 			goto  device_info_failure;
1026 		}
1027 
1028 		priv->mem_base_va = devm_ioremap(&priv->pdev->dev,
1029 						 priv->mem_base_pa,
1030 						 priv->mem_base_size);
1031 		if (!priv->mem_base_va) {
1032 			icnss_pr_err("Ioremap failed for bar address\n");
1033 			goto device_info_failure;
1034 		}
1035 
1036 		icnss_pr_dbg("Non-Secured Bar Address pa: %pa, va: 0x%pK\n",
1037 			     &priv->mem_base_pa,
1038 			     priv->mem_base_va);
1039 
1040 		if (priv->mhi_state_info_pa)
1041 			priv->mhi_state_info_va = devm_ioremap(&priv->pdev->dev,
1042 						priv->mhi_state_info_pa,
1043 						PAGE_SIZE);
1044 		if (!priv->mhi_state_info_va)
1045 			icnss_pr_err("Ioremap failed for MHI info address\n");
1046 
1047 		icnss_pr_dbg("MHI state info Address pa: %pa, va: 0x%pK\n",
1048 			     &priv->mhi_state_info_pa,
1049 			     priv->mhi_state_info_va);
1050 	}
1051 
1052 	if (priv->bdf_download_support) {
1053 		icnss_wlfw_bdf_dnld_send_sync(priv, ICNSS_BDF_REGDB);
1054 
1055 		ret = icnss_wlfw_bdf_dnld_send_sync(priv,
1056 						    priv->ctrl_params.bdf_type);
1057 		if (ret < 0)
1058 			goto device_info_failure;
1059 	}
1060 
1061 	if (priv->device_id == WCN6450_DEVICE_ID) {
1062 		ret = icnss_wlfw_qdss_dnld_send_sync(priv);
1063 		if (ret < 0)
1064 			icnss_pr_info("Failed to download qdss config file for WCN6450, ret = %d\n",
1065 				      ret);
1066 	}
1067 
1068 	if (priv->device_id == WCN6750_DEVICE_ID ||
1069 	    priv->device_id == WCN6450_DEVICE_ID) {
1070 		if (!priv->fw_soc_wake_ack_irq)
1071 			register_soc_wake_notif(&priv->pdev->dev);
1072 
1073 		icnss_get_smp2p_info(priv, ICNSS_SMP2P_OUT_SOC_WAKE);
1074 		icnss_get_smp2p_info(priv, ICNSS_SMP2P_OUT_EP_POWER_SAVE);
1075 	}
1076 
1077 	if (priv->wpss_supported)
1078 		icnss_get_smp2p_info(priv, ICNSS_SMP2P_OUT_POWER_SAVE);
1079 
1080 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
1081 		if (priv->bdf_download_support) {
1082 			ret = wlfw_cal_report_req(priv);
1083 			if (ret < 0)
1084 				goto device_info_failure;
1085 		}
1086 
1087 		wlfw_dynamic_feature_mask_send_sync_msg(priv,
1088 							dynamic_feature_mask);
1089 	}
1090 
1091 	if (!priv->fw_error_fatal_irq)
1092 		register_fw_error_notifications(&priv->pdev->dev);
1093 
1094 	if (!priv->fw_early_crash_irq)
1095 		register_early_crash_notifications(&priv->pdev->dev);
1096 
1097 	if (priv->psf_supported)
1098 		queue_work(priv->soc_update_wq, &priv->soc_update_work);
1099 
1100 	return ret;
1101 
1102 device_info_failure:
1103 	icnss_hw_power_off(priv);
1104 fail:
1105 	ICNSS_ASSERT(ignore_assert);
1106 qmi_registered:
1107 	return ret;
1108 }
1109 
icnss_driver_event_server_exit(struct icnss_priv * priv)1110 static int icnss_driver_event_server_exit(struct icnss_priv *priv)
1111 {
1112 	if (!priv)
1113 		return -ENODEV;
1114 
1115 	icnss_pr_info("WLAN FW Service Disconnected: 0x%lx\n", priv->state);
1116 
1117 	icnss_clear_server(priv);
1118 
1119 	if (priv->psf_supported)
1120 		priv->last_updated_voltage = 0;
1121 
1122 	return 0;
1123 }
1124 
icnss_call_driver_probe(struct icnss_priv * priv)1125 static int icnss_call_driver_probe(struct icnss_priv *priv)
1126 {
1127 	int ret = 0;
1128 	int probe_cnt = 0;
1129 
1130 	if (!priv->ops || !priv->ops->probe)
1131 		return 0;
1132 
1133 	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1134 		return -EINVAL;
1135 
1136 	icnss_pr_dbg("Calling driver probe state: 0x%lx\n", priv->state);
1137 
1138 	icnss_hw_power_on(priv);
1139 
1140 	icnss_block_shutdown(true);
1141 	while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
1142 		ret = priv->ops->probe(&priv->pdev->dev);
1143 		probe_cnt++;
1144 		if (ret != -EPROBE_DEFER)
1145 			break;
1146 	}
1147 	if (ret < 0) {
1148 		icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
1149 			     ret, priv->state, probe_cnt);
1150 		icnss_block_shutdown(false);
1151 		goto out;
1152 	}
1153 
1154 	icnss_block_shutdown(false);
1155 	set_bit(ICNSS_DRIVER_PROBED, &priv->state);
1156 
1157 	return 0;
1158 
1159 out:
1160 	icnss_hw_power_off(priv);
1161 	return ret;
1162 }
1163 
icnss_call_driver_shutdown(struct icnss_priv * priv)1164 static int icnss_call_driver_shutdown(struct icnss_priv *priv)
1165 {
1166 	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1167 		goto out;
1168 
1169 	if (!priv->ops || !priv->ops->shutdown)
1170 		goto out;
1171 
1172 	if (test_bit(ICNSS_SHUTDOWN_DONE, &priv->state))
1173 		goto out;
1174 
1175 	icnss_pr_dbg("Calling driver shutdown state: 0x%lx\n", priv->state);
1176 
1177 	priv->ops->shutdown(&priv->pdev->dev);
1178 	set_bit(ICNSS_SHUTDOWN_DONE, &priv->state);
1179 
1180 out:
1181 	return 0;
1182 }
1183 
icnss_pd_restart_complete(struct icnss_priv * priv)1184 static int icnss_pd_restart_complete(struct icnss_priv *priv)
1185 {
1186 	int ret = 0;
1187 
1188 	icnss_pm_relax(priv);
1189 
1190 	icnss_call_driver_shutdown(priv);
1191 
1192 	clear_bit(ICNSS_PDR, &priv->state);
1193 	clear_bit(ICNSS_REJUVENATE, &priv->state);
1194 	clear_bit(ICNSS_PD_RESTART, &priv->state);
1195 	clear_bit(ICNSS_LOW_POWER, &priv->state);
1196 	priv->early_crash_ind = false;
1197 	priv->is_ssr = false;
1198 
1199 	if (!priv->ops || !priv->ops->reinit)
1200 		goto out;
1201 
1202 	if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
1203 		icnss_pr_err("FW is in bad state, state: 0x%lx\n",
1204 			     priv->state);
1205 		goto out;
1206 	}
1207 
1208 	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
1209 		goto call_probe;
1210 
1211 	icnss_pr_dbg("Calling driver reinit state: 0x%lx\n", priv->state);
1212 
1213 	icnss_hw_power_on(priv);
1214 
1215 	icnss_block_shutdown(true);
1216 
1217 	ret = priv->ops->reinit(&priv->pdev->dev);
1218 	if (ret < 0) {
1219 		icnss_fatal_err("Driver reinit failed: %d, state: 0x%lx\n",
1220 				ret, priv->state);
1221 		if (!priv->allow_recursive_recovery)
1222 			ICNSS_ASSERT(false);
1223 		icnss_block_shutdown(false);
1224 		goto out_power_off;
1225 	}
1226 
1227 	icnss_block_shutdown(false);
1228 	clear_bit(ICNSS_SHUTDOWN_DONE, &priv->state);
1229 	return 0;
1230 
1231 call_probe:
1232 	return icnss_call_driver_probe(priv);
1233 
1234 out_power_off:
1235 	icnss_hw_power_off(priv);
1236 
1237 out:
1238 	return ret;
1239 }
1240 
1241 
icnss_driver_event_fw_ready_ind(struct icnss_priv * priv,void * data)1242 static int icnss_driver_event_fw_ready_ind(struct icnss_priv *priv, void *data)
1243 {
1244 	int ret = 0;
1245 
1246 	if (!priv)
1247 		return -ENODEV;
1248 
1249 	del_timer(&priv->recovery_timer);
1250 	set_bit(ICNSS_FW_READY, &priv->state);
1251 	clear_bit(ICNSS_MODE_ON, &priv->state);
1252 	atomic_set(&priv->soc_wake_ref_count, 0);
1253 
1254 	if (priv->device_id == WCN6750_DEVICE_ID ||
1255 	    priv->device_id == WCN6450_DEVICE_ID)
1256 		icnss_free_qdss_mem(priv);
1257 
1258 	icnss_pr_info("WLAN FW is ready: 0x%lx\n", priv->state);
1259 
1260 	icnss_hw_power_off(priv);
1261 
1262 	if (!priv->pdev) {
1263 		icnss_pr_err("Device is not ready\n");
1264 		ret = -ENODEV;
1265 		goto out;
1266 	}
1267 
1268 	if (priv->is_slate_rfa && test_bit(ICNSS_SLATE_UP, &priv->state))
1269 		icnss_send_wlan_boot_complete();
1270 
1271 	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
1272 		ret = icnss_pd_restart_complete(priv);
1273 	} else {
1274 		if (priv->wpss_supported)
1275 			icnss_setup_dms_mac(priv);
1276 		ret = icnss_call_driver_probe(priv);
1277 	}
1278 
1279 	icnss_vreg_unvote(priv);
1280 
1281 out:
1282 	return ret;
1283 }
1284 
icnss_driver_event_fw_init_done(struct icnss_priv * priv,void * data)1285 static int icnss_driver_event_fw_init_done(struct icnss_priv *priv, void *data)
1286 {
1287 	int ret = 0;
1288 
1289 	if (!priv)
1290 		return -ENODEV;
1291 
1292 	icnss_pr_info("WLAN FW Initialization done: 0x%lx\n", priv->state);
1293 
1294 	if (priv->device_id == WCN6750_DEVICE_ID) {
1295 		ret = icnss_wlfw_qdss_dnld_send_sync(priv);
1296 		if (ret < 0)
1297 			icnss_pr_info("Failed to download qdss config file for WCN6750, ret = %d\n",
1298 				      ret);
1299 	}
1300 
1301 	if (test_bit(ICNSS_COLD_BOOT_CAL, &priv->state)) {
1302 		mod_timer(&priv->recovery_timer,
1303 			  jiffies + msecs_to_jiffies(ICNSS_CAL_TIMEOUT));
1304 		ret = wlfw_wlan_mode_send_sync_msg(priv,
1305 			(enum wlfw_driver_mode_enum_v01)ICNSS_CALIBRATION);
1306 	} else {
1307 		icnss_driver_event_fw_ready_ind(priv, NULL);
1308 	}
1309 
1310 	return ret;
1311 }
1312 
icnss_alloc_qdss_mem(struct icnss_priv * priv)1313 int icnss_alloc_qdss_mem(struct icnss_priv *priv)
1314 {
1315 	struct platform_device *pdev = priv->pdev;
1316 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1317 	int i, j;
1318 
1319 	for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1320 		if (!qdss_mem[i].va && qdss_mem[i].size) {
1321 			qdss_mem[i].va =
1322 				dma_alloc_coherent(&pdev->dev,
1323 						   qdss_mem[i].size,
1324 						   &qdss_mem[i].pa,
1325 						   GFP_KERNEL);
1326 			if (!qdss_mem[i].va) {
1327 				icnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
1328 					     qdss_mem[i].size,
1329 					     qdss_mem[i].type, i);
1330 				break;
1331 			}
1332 		}
1333 	}
1334 
1335 	/* Best-effort allocation for QDSS trace */
1336 	if (i < priv->qdss_mem_seg_len) {
1337 		for (j = i; j < priv->qdss_mem_seg_len; j++) {
1338 			qdss_mem[j].type = 0;
1339 			qdss_mem[j].size = 0;
1340 		}
1341 		priv->qdss_mem_seg_len = i;
1342 	}
1343 
1344 	return 0;
1345 }
1346 
icnss_free_qdss_mem(struct icnss_priv * priv)1347 void icnss_free_qdss_mem(struct icnss_priv *priv)
1348 {
1349 	struct platform_device *pdev = priv->pdev;
1350 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1351 	int i;
1352 
1353 	for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1354 		if (qdss_mem[i].va && qdss_mem[i].size) {
1355 			icnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
1356 				     &qdss_mem[i].pa, qdss_mem[i].size,
1357 				     qdss_mem[i].type);
1358 			dma_free_coherent(&pdev->dev,
1359 					  qdss_mem[i].size, qdss_mem[i].va,
1360 					  qdss_mem[i].pa);
1361 			qdss_mem[i].va = NULL;
1362 			qdss_mem[i].pa = 0;
1363 			qdss_mem[i].size = 0;
1364 			qdss_mem[i].type = 0;
1365 		}
1366 	}
1367 	priv->qdss_mem_seg_len = 0;
1368 }
1369 
icnss_qdss_trace_req_mem_hdlr(struct icnss_priv * priv)1370 static int icnss_qdss_trace_req_mem_hdlr(struct icnss_priv *priv)
1371 {
1372 	int ret = 0;
1373 
1374 	ret = icnss_alloc_qdss_mem(priv);
1375 	if (ret < 0)
1376 		return ret;
1377 
1378 	return wlfw_qdss_trace_mem_info_send_sync(priv);
1379 }
1380 
icnss_qdss_trace_pa_to_va(struct icnss_priv * priv,u64 pa,u32 size,int * seg_id)1381 static void *icnss_qdss_trace_pa_to_va(struct icnss_priv *priv,
1382 				       u64 pa, u32 size, int *seg_id)
1383 {
1384 	int i = 0;
1385 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1386 	u64 offset = 0;
1387 	void *va = NULL;
1388 	u64 local_pa;
1389 	u32 local_size;
1390 
1391 	for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1392 		local_pa = (u64)qdss_mem[i].pa;
1393 		local_size = (u32)qdss_mem[i].size;
1394 		if (pa == local_pa && size <= local_size) {
1395 			va = qdss_mem[i].va;
1396 			break;
1397 		}
1398 		if (pa > local_pa &&
1399 		    pa < local_pa + local_size &&
1400 		    pa + size <= local_pa + local_size) {
1401 			offset = pa - local_pa;
1402 			va = qdss_mem[i].va + offset;
1403 			break;
1404 		}
1405 	}
1406 
1407 	*seg_id = i;
1408 	return va;
1409 }
1410 
icnss_qdss_trace_save_hdlr(struct icnss_priv * priv,void * data)1411 static int icnss_qdss_trace_save_hdlr(struct icnss_priv *priv,
1412 				      void *data)
1413 {
1414 	struct icnss_qmi_event_qdss_trace_save_data *event_data = data;
1415 	struct icnss_fw_mem *qdss_mem = priv->qdss_mem;
1416 	int ret = 0;
1417 	int i;
1418 	void *va = NULL;
1419 	u64 pa;
1420 	u32 size;
1421 	int seg_id = 0;
1422 
1423 	if (!priv->qdss_mem_seg_len) {
1424 		icnss_pr_err("Memory for QDSS trace is not available\n");
1425 		return -ENOMEM;
1426 	}
1427 
1428 	if (event_data->mem_seg_len == 0) {
1429 		for (i = 0; i < priv->qdss_mem_seg_len; i++) {
1430 			ret = icnss_genl_send_msg(qdss_mem[i].va,
1431 						  ICNSS_GENL_MSG_TYPE_QDSS,
1432 						  event_data->file_name,
1433 						  qdss_mem[i].size);
1434 			if (ret < 0) {
1435 				icnss_pr_err("Fail to save QDSS data: %d\n",
1436 					     ret);
1437 				break;
1438 			}
1439 		}
1440 	} else {
1441 		for (i = 0; i < event_data->mem_seg_len; i++) {
1442 			pa = event_data->mem_seg[i].addr;
1443 			size = event_data->mem_seg[i].size;
1444 			va = icnss_qdss_trace_pa_to_va(priv, pa,
1445 						       size, &seg_id);
1446 			if (!va) {
1447 				icnss_pr_err("Fail to find matching va for pa %pa\n",
1448 					     &pa);
1449 				ret = -EINVAL;
1450 				break;
1451 			}
1452 			ret = icnss_genl_send_msg(va, ICNSS_GENL_MSG_TYPE_QDSS,
1453 						  event_data->file_name, size);
1454 			if (ret < 0) {
1455 				icnss_pr_err("Fail to save QDSS data: %d\n",
1456 					     ret);
1457 				break;
1458 			}
1459 		}
1460 	}
1461 
1462 	kfree(data);
1463 	return ret;
1464 }
1465 
icnss_atomic_dec_if_greater_one(atomic_t * v)1466 static inline int icnss_atomic_dec_if_greater_one(atomic_t *v)
1467 {
1468 	int dec, c = atomic_read(v);
1469 
1470 	do {
1471 		dec = c - 1;
1472 		if (unlikely(dec < 1))
1473 			break;
1474 	} while (!atomic_try_cmpxchg(v, &c, dec));
1475 
1476 	return dec;
1477 }
1478 
icnss_qdss_trace_req_data_hdlr(struct icnss_priv * priv,void * data)1479 static int icnss_qdss_trace_req_data_hdlr(struct icnss_priv *priv,
1480 					  void *data)
1481 {
1482 	int ret = 0;
1483 	struct icnss_qmi_event_qdss_trace_save_data *event_data = data;
1484 
1485 	if (!priv)
1486 		return -ENODEV;
1487 
1488 	if (!data)
1489 		return -EINVAL;
1490 
1491 	ret = icnss_wlfw_qdss_data_send_sync(priv, event_data->file_name,
1492 					     event_data->total_size);
1493 
1494 	kfree(data);
1495 	return ret;
1496 }
1497 
icnss_event_soc_wake_request(struct icnss_priv * priv,void * data)1498 static int icnss_event_soc_wake_request(struct icnss_priv *priv, void *data)
1499 {
1500 	int ret = 0;
1501 
1502 	if (!priv)
1503 		return -ENODEV;
1504 
1505 	if (atomic_inc_not_zero(&priv->soc_wake_ref_count)) {
1506 		icnss_pr_soc_wake("SOC awake after posting work, Ref count: %d",
1507 				  atomic_read(&priv->soc_wake_ref_count));
1508 		return 0;
1509 	}
1510 
1511 	ret = icnss_send_smp2p(priv, ICNSS_SOC_WAKE_REQ,
1512 			       ICNSS_SMP2P_OUT_SOC_WAKE);
1513 	if (!ret)
1514 		atomic_inc(&priv->soc_wake_ref_count);
1515 
1516 	return ret;
1517 }
1518 
icnss_event_soc_wake_release(struct icnss_priv * priv,void * data)1519 static int icnss_event_soc_wake_release(struct icnss_priv *priv, void *data)
1520 {
1521 	int ret = 0;
1522 
1523 	if (!priv)
1524 		return -ENODEV;
1525 
1526 	if (atomic_dec_if_positive(&priv->soc_wake_ref_count)) {
1527 		icnss_pr_soc_wake("Wake release not called. Ref count: %d",
1528 				  priv->soc_wake_ref_count);
1529 		return 0;
1530 	}
1531 
1532 	ret = icnss_send_smp2p(priv, ICNSS_SOC_WAKE_REL,
1533 			       ICNSS_SMP2P_OUT_SOC_WAKE);
1534 	return ret;
1535 }
1536 
icnss_driver_event_register_driver(struct icnss_priv * priv,void * data)1537 static int icnss_driver_event_register_driver(struct icnss_priv *priv,
1538 							 void *data)
1539 {
1540 	int ret = 0;
1541 	int probe_cnt = 0;
1542 
1543 	if (priv->ops)
1544 		return -EEXIST;
1545 
1546 	priv->ops = data;
1547 
1548 	if (test_bit(SKIP_QMI, &priv->ctrl_params.quirks))
1549 		set_bit(ICNSS_FW_READY, &priv->state);
1550 
1551 	if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
1552 		icnss_pr_err("FW is in bad state, state: 0x%lx\n",
1553 			     priv->state);
1554 		return -ENODEV;
1555 	}
1556 
1557 	if (!test_bit(ICNSS_FW_READY, &priv->state)) {
1558 		icnss_pr_dbg("FW is not ready yet, state: 0x%lx\n",
1559 			     priv->state);
1560 		goto out;
1561 	}
1562 
1563 	ret = icnss_hw_power_on(priv);
1564 	if (ret)
1565 		goto out;
1566 
1567 	icnss_block_shutdown(true);
1568 	while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
1569 		ret = priv->ops->probe(&priv->pdev->dev);
1570 		probe_cnt++;
1571 		if (ret != -EPROBE_DEFER)
1572 			break;
1573 	}
1574 	if (ret) {
1575 		icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
1576 			     ret, priv->state, probe_cnt);
1577 		icnss_block_shutdown(false);
1578 		goto power_off;
1579 	}
1580 
1581 	icnss_block_shutdown(false);
1582 	set_bit(ICNSS_DRIVER_PROBED, &priv->state);
1583 
1584 	return 0;
1585 
1586 power_off:
1587 	icnss_hw_power_off(priv);
1588 out:
1589 	return ret;
1590 }
1591 
icnss_driver_event_unregister_driver(struct icnss_priv * priv,void * data)1592 static int icnss_driver_event_unregister_driver(struct icnss_priv *priv,
1593 							 void *data)
1594 {
1595 	if (!test_bit(ICNSS_DRIVER_PROBED, &priv->state)) {
1596 		priv->ops = NULL;
1597 		goto out;
1598 	}
1599 
1600 	set_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1601 
1602 	icnss_block_shutdown(true);
1603 
1604 	if (priv->ops)
1605 		priv->ops->remove(&priv->pdev->dev);
1606 
1607 	icnss_block_shutdown(false);
1608 
1609 	clear_bit(ICNSS_DRIVER_UNLOADING, &priv->state);
1610 	clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
1611 
1612 	priv->ops = NULL;
1613 
1614 	icnss_hw_power_off(priv);
1615 
1616 out:
1617 	return 0;
1618 }
1619 
icnss_fw_crashed(struct icnss_priv * priv,struct icnss_event_pd_service_down_data * event_data)1620 static int icnss_fw_crashed(struct icnss_priv *priv,
1621 			    struct icnss_event_pd_service_down_data *event_data)
1622 {
1623 	struct icnss_uevent_fw_down_data fw_down_data = {0};
1624 
1625 	icnss_pr_dbg("FW crashed, state: 0x%lx\n", priv->state);
1626 
1627 	set_bit(ICNSS_PD_RESTART, &priv->state);
1628 
1629 	icnss_pm_stay_awake(priv);
1630 
1631 	if (test_bit(ICNSS_DRIVER_PROBED, &priv->state) &&
1632 	    test_bit(ICNSS_FW_READY, &priv->state)) {
1633 		clear_bit(ICNSS_FW_READY, &priv->state);
1634 		fw_down_data.crashed = true;
1635 		icnss_call_driver_uevent(priv,
1636 					 ICNSS_UEVENT_FW_DOWN,
1637 					 &fw_down_data);
1638 	}
1639 
1640 	if (event_data && event_data->fw_rejuvenate)
1641 		wlfw_rejuvenate_ack_send_sync_msg(priv);
1642 
1643 	return 0;
1644 }
1645 
icnss_update_hang_event_data(struct icnss_priv * priv,struct icnss_uevent_hang_data * hang_data)1646 int icnss_update_hang_event_data(struct icnss_priv *priv,
1647 				 struct icnss_uevent_hang_data *hang_data)
1648 {
1649 	if (!priv->hang_event_data_va)
1650 		return -EINVAL;
1651 
1652 	priv->hang_event_data = kmemdup(priv->hang_event_data_va,
1653 					priv->hang_event_data_len,
1654 					GFP_ATOMIC);
1655 	if (!priv->hang_event_data)
1656 		return -ENOMEM;
1657 
1658 	// Update the hang event params
1659 	hang_data->hang_event_data = priv->hang_event_data;
1660 	hang_data->hang_event_data_len = priv->hang_event_data_len;
1661 
1662 	return 0;
1663 }
1664 
icnss_send_hang_event_data(struct icnss_priv * priv)1665 int icnss_send_hang_event_data(struct icnss_priv *priv)
1666 {
1667 	struct icnss_uevent_hang_data hang_data = {0};
1668 	int ret = 0xFF;
1669 
1670 	if (priv->early_crash_ind) {
1671 		ret = icnss_update_hang_event_data(priv, &hang_data);
1672 		if (ret)
1673 			icnss_pr_err("Unable to allocate memory for Hang event data\n");
1674 	}
1675 	icnss_call_driver_uevent(priv, ICNSS_UEVENT_HANG_DATA,
1676 				 &hang_data);
1677 
1678 	if (!ret) {
1679 		kfree(priv->hang_event_data);
1680 		priv->hang_event_data = NULL;
1681 	}
1682 
1683 	return 0;
1684 }
1685 
icnss_driver_event_pd_service_down(struct icnss_priv * priv,void * data)1686 static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
1687 					      void *data)
1688 {
1689 	struct icnss_event_pd_service_down_data *event_data = data;
1690 
1691 	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
1692 		icnss_ignore_fw_timeout(false);
1693 		goto out;
1694 	}
1695 
1696 	if (priv->force_err_fatal)
1697 		ICNSS_ASSERT(0);
1698 
1699 	if (priv->device_id == WCN6750_DEVICE_ID ||
1700 	    priv->device_id == WCN6450_DEVICE_ID) {
1701 		icnss_send_smp2p(priv, ICNSS_RESET_MSG,
1702 				 ICNSS_SMP2P_OUT_SOC_WAKE);
1703 		icnss_send_smp2p(priv, ICNSS_RESET_MSG,
1704 				 ICNSS_SMP2P_OUT_EP_POWER_SAVE);
1705 	}
1706 
1707 	if (priv->wpss_supported)
1708 		icnss_send_smp2p(priv, ICNSS_RESET_MSG,
1709 				 ICNSS_SMP2P_OUT_POWER_SAVE);
1710 
1711 	icnss_send_hang_event_data(priv);
1712 
1713 	if (priv->early_crash_ind) {
1714 		icnss_pr_dbg("PD Down ignored as early indication is processed: %d, state: 0x%lx\n",
1715 			     event_data->crashed, priv->state);
1716 		goto out;
1717 	}
1718 
1719 	if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
1720 		icnss_fatal_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
1721 				event_data->crashed, priv->state);
1722 		if (!priv->allow_recursive_recovery)
1723 			ICNSS_ASSERT(0);
1724 		goto out;
1725 	}
1726 
1727 	if (!test_bit(ICNSS_PD_RESTART, &priv->state))
1728 		icnss_fw_crashed(priv, event_data);
1729 
1730 out:
1731 	kfree(data);
1732 
1733 	return 0;
1734 }
1735 
icnss_driver_event_early_crash_ind(struct icnss_priv * priv,void * data)1736 static int icnss_driver_event_early_crash_ind(struct icnss_priv *priv,
1737 					      void *data)
1738 {
1739 	if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state)) {
1740 		icnss_ignore_fw_timeout(false);
1741 		goto out;
1742 	}
1743 
1744 	priv->early_crash_ind = true;
1745 	icnss_fw_crashed(priv, NULL);
1746 
1747 out:
1748 	kfree(data);
1749 
1750 	return 0;
1751 }
1752 
icnss_driver_event_idle_shutdown(struct icnss_priv * priv,void * data)1753 static int icnss_driver_event_idle_shutdown(struct icnss_priv *priv,
1754 					    void *data)
1755 {
1756 	int ret = 0;
1757 
1758 	if (!priv->ops || !priv->ops->idle_shutdown)
1759 		return 0;
1760 
1761 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
1762 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
1763 		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown callback\n");
1764 		ret = -EBUSY;
1765 	} else {
1766 		icnss_pr_dbg("Calling driver idle shutdown, state: 0x%lx\n",
1767 								priv->state);
1768 		icnss_block_shutdown(true);
1769 		ret = priv->ops->idle_shutdown(&priv->pdev->dev);
1770 		icnss_block_shutdown(false);
1771 	}
1772 
1773 	return ret;
1774 }
1775 
icnss_driver_event_idle_restart(struct icnss_priv * priv,void * data)1776 static int icnss_driver_event_idle_restart(struct icnss_priv *priv,
1777 					   void *data)
1778 {
1779 	int ret = 0;
1780 
1781 	if (!priv->ops || !priv->ops->idle_restart)
1782 		return 0;
1783 
1784 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
1785 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
1786 		icnss_pr_err("SSR/PDR is already in-progress during idle restart callback\n");
1787 		ret = -EBUSY;
1788 	} else {
1789 		icnss_pr_dbg("Calling driver idle restart, state: 0x%lx\n",
1790 								priv->state);
1791 		icnss_block_shutdown(true);
1792 		ret = priv->ops->idle_restart(&priv->pdev->dev);
1793 		icnss_block_shutdown(false);
1794 	}
1795 
1796 	return ret;
1797 }
1798 
icnss_qdss_trace_free_hdlr(struct icnss_priv * priv)1799 static int icnss_qdss_trace_free_hdlr(struct icnss_priv *priv)
1800 {
1801 	icnss_free_qdss_mem(priv);
1802 
1803 	return 0;
1804 }
1805 
icnss_m3_dump_upload_req_hdlr(struct icnss_priv * priv,void * data)1806 static int icnss_m3_dump_upload_req_hdlr(struct icnss_priv *priv,
1807 					 void *data)
1808 {
1809 	struct icnss_m3_upload_segments_req_data *event_data = data;
1810 	struct qcom_dump_segment segment;
1811 	int i, status = 0, ret = 0;
1812 	struct list_head head;
1813 
1814 	if (!dump_enabled()) {
1815 		icnss_pr_info("Dump collection is not enabled\n");
1816 		return ret;
1817 	}
1818 
1819 	if (IS_ERR_OR_NULL(priv->m3_dump_phyareg) ||
1820 	    IS_ERR_OR_NULL(priv->m3_dump_phydbg) ||
1821 	    IS_ERR_OR_NULL(priv->m3_dump_wmac0reg) ||
1822 	    IS_ERR_OR_NULL(priv->m3_dump_wcssdbg) ||
1823 	    IS_ERR_OR_NULL(priv->m3_dump_phyapdmem))
1824 		return ret;
1825 
1826 	INIT_LIST_HEAD(&head);
1827 
1828 	for (i = 0; i < event_data->no_of_valid_segments; i++) {
1829 		memset(&segment, 0, sizeof(segment));
1830 
1831 		segment.va = devm_ioremap(&priv->pdev->dev,
1832 					  event_data->m3_segment[i].addr,
1833 					  event_data->m3_segment[i].size);
1834 		if (!segment.va) {
1835 			icnss_pr_err("Failed to ioremap M3 Dump region");
1836 			ret = -ENOMEM;
1837 			goto send_resp;
1838 		}
1839 
1840 		segment.size = event_data->m3_segment[i].size;
1841 
1842 		list_add(&segment.node, &head);
1843 		icnss_pr_dbg("Started Dump colletcion for %s segment",
1844 			     event_data->m3_segment[i].name);
1845 
1846 		switch (event_data->m3_segment[i].type) {
1847 		case QMI_M3_SEGMENT_PHYAREG_V01:
1848 			ret = qcom_dump(&head, priv->m3_dump_phyareg->dev);
1849 			break;
1850 		case QMI_M3_SEGMENT_PHYDBG_V01:
1851 			ret = qcom_dump(&head, priv->m3_dump_phydbg->dev);
1852 			break;
1853 		case QMI_M3_SEGMENT_WMAC0_REG_V01:
1854 			ret = qcom_dump(&head, priv->m3_dump_wmac0reg->dev);
1855 			break;
1856 		case QMI_M3_SEGMENT_WCSSDBG_V01:
1857 			ret = qcom_dump(&head, priv->m3_dump_wcssdbg->dev);
1858 			break;
1859 		case QMI_M3_SEGMENT_PHYAPDMEM_V01:
1860 			ret = qcom_dump(&head, priv->m3_dump_phyapdmem->dev);
1861 			break;
1862 		default:
1863 			icnss_pr_err("Invalid Segment type: %d",
1864 				     event_data->m3_segment[i].type);
1865 		}
1866 
1867 		if (ret) {
1868 			status = ret;
1869 			icnss_pr_err("Failed to dump m3 %s segment, err = %d\n",
1870 				     event_data->m3_segment[i].name, ret);
1871 		}
1872 		list_del(&segment.node);
1873 	}
1874 send_resp:
1875 	icnss_wlfw_m3_dump_upload_done_send_sync(priv, event_data->pdev_id,
1876 						 status);
1877 
1878 	return ret;
1879 }
1880 
icnss_subsys_restart_level(struct icnss_priv * priv,void * data)1881 static int icnss_subsys_restart_level(struct icnss_priv *priv, void *data)
1882 {
1883 	int ret = 0;
1884 	struct icnss_subsys_restart_level_data *event_data = data;
1885 
1886 	if (!priv)
1887 		return -ENODEV;
1888 
1889 	if (!data)
1890 		return -EINVAL;
1891 
1892 	ret = wlfw_subsys_restart_level_msg(priv, event_data->restart_level);
1893 
1894 	kfree(data);
1895 
1896 	return ret;
1897 }
1898 
icnss_wpss_self_recovery(struct work_struct * wpss_load_work)1899 static void icnss_wpss_self_recovery(struct work_struct *wpss_load_work)
1900 {
1901 	int ret;
1902 	struct icnss_priv *priv = icnss_get_plat_priv();
1903 
1904 	rproc_shutdown(priv->rproc);
1905 	ret = rproc_boot(priv->rproc);
1906 	if (ret) {
1907 		icnss_pr_err("Failed to self recover wpss rproc, ret: %d", ret);
1908 		rproc_put(priv->rproc);
1909 	}
1910 }
1911 
icnss_driver_event_work(struct work_struct * work)1912 static void icnss_driver_event_work(struct work_struct *work)
1913 {
1914 	struct icnss_priv *priv =
1915 		container_of(work, struct icnss_priv, event_work);
1916 	struct icnss_driver_event *event;
1917 	unsigned long flags;
1918 	int ret;
1919 
1920 	icnss_pm_stay_awake(priv);
1921 
1922 	spin_lock_irqsave(&priv->event_lock, flags);
1923 
1924 	while (!list_empty(&priv->event_list)) {
1925 		event = list_first_entry(&priv->event_list,
1926 					 struct icnss_driver_event, list);
1927 		list_del(&event->list);
1928 		spin_unlock_irqrestore(&priv->event_lock, flags);
1929 
1930 		icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
1931 			     icnss_driver_event_to_str(event->type),
1932 			     event->sync ? "-sync" : "", event->type,
1933 			     priv->state);
1934 
1935 		switch (event->type) {
1936 		case ICNSS_DRIVER_EVENT_SERVER_ARRIVE:
1937 			ret = icnss_driver_event_server_arrive(priv,
1938 								 event->data);
1939 			break;
1940 		case ICNSS_DRIVER_EVENT_SERVER_EXIT:
1941 			ret = icnss_driver_event_server_exit(priv);
1942 			break;
1943 		case ICNSS_DRIVER_EVENT_FW_READY_IND:
1944 			ret = icnss_driver_event_fw_ready_ind(priv,
1945 								 event->data);
1946 			break;
1947 		case ICNSS_DRIVER_EVENT_REGISTER_DRIVER:
1948 			ret = icnss_driver_event_register_driver(priv,
1949 								 event->data);
1950 			break;
1951 		case ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
1952 			ret = icnss_driver_event_unregister_driver(priv,
1953 								   event->data);
1954 			break;
1955 		case ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN:
1956 			ret = icnss_driver_event_pd_service_down(priv,
1957 								 event->data);
1958 			break;
1959 		case ICNSS_DRIVER_EVENT_FW_EARLY_CRASH_IND:
1960 			ret = icnss_driver_event_early_crash_ind(priv,
1961 								 event->data);
1962 			break;
1963 		case ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
1964 			ret = icnss_driver_event_idle_shutdown(priv,
1965 							       event->data);
1966 			break;
1967 		case ICNSS_DRIVER_EVENT_IDLE_RESTART:
1968 			ret = icnss_driver_event_idle_restart(priv,
1969 							      event->data);
1970 			break;
1971 		case ICNSS_DRIVER_EVENT_FW_INIT_DONE_IND:
1972 			ret = icnss_driver_event_fw_init_done(priv,
1973 							      event->data);
1974 			break;
1975 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
1976 			ret = icnss_qdss_trace_req_mem_hdlr(priv);
1977 			break;
1978 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
1979 			ret = icnss_qdss_trace_save_hdlr(priv,
1980 							 event->data);
1981 			break;
1982 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
1983 			ret = icnss_qdss_trace_free_hdlr(priv);
1984 			break;
1985 		case ICNSS_DRIVER_EVENT_M3_DUMP_UPLOAD_REQ:
1986 			ret = icnss_m3_dump_upload_req_hdlr(priv, event->data);
1987 			break;
1988 		case ICNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
1989 			ret = icnss_qdss_trace_req_data_hdlr(priv,
1990 							     event->data);
1991 			break;
1992 		case ICNSS_DRIVER_EVENT_SUBSYS_RESTART_LEVEL:
1993 			ret = icnss_subsys_restart_level(priv, event->data);
1994 			break;
1995 		case ICNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
1996 			ret = icnss_process_wfc_call_ind_event(priv,
1997 							      event->data);
1998 			break;
1999 		case ICNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
2000 			ret = icnss_process_twt_cfg_ind_event(priv,
2001 							     event->data);
2002 			break;
2003 		default:
2004 			icnss_pr_err("Invalid Event type: %d", event->type);
2005 			kfree(event);
2006 			continue;
2007 		}
2008 
2009 		priv->stats.events[event->type].processed++;
2010 
2011 		icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
2012 			     icnss_driver_event_to_str(event->type),
2013 			     event->sync ? "-sync" : "", event->type, ret,
2014 			     priv->state);
2015 
2016 		spin_lock_irqsave(&priv->event_lock, flags);
2017 		if (event->sync) {
2018 			event->ret = ret;
2019 			complete(&event->complete);
2020 			continue;
2021 		}
2022 		spin_unlock_irqrestore(&priv->event_lock, flags);
2023 
2024 		kfree(event);
2025 
2026 		spin_lock_irqsave(&priv->event_lock, flags);
2027 	}
2028 	spin_unlock_irqrestore(&priv->event_lock, flags);
2029 
2030 	icnss_pm_relax(priv);
2031 }
2032 
icnss_soc_wake_msg_work(struct work_struct * work)2033 static void icnss_soc_wake_msg_work(struct work_struct *work)
2034 {
2035 	struct icnss_priv *priv =
2036 		container_of(work, struct icnss_priv, soc_wake_msg_work);
2037 	struct icnss_soc_wake_event *event;
2038 	unsigned long flags;
2039 	int ret;
2040 
2041 	icnss_pm_stay_awake(priv);
2042 
2043 	spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
2044 
2045 	while (!list_empty(&priv->soc_wake_msg_list)) {
2046 		event = list_first_entry(&priv->soc_wake_msg_list,
2047 					 struct icnss_soc_wake_event, list);
2048 		list_del(&event->list);
2049 		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
2050 
2051 		icnss_pr_soc_wake("Processing event: %s%s(%d), state: 0x%lx\n",
2052 				  icnss_soc_wake_event_to_str(event->type),
2053 				  event->sync ? "-sync" : "", event->type,
2054 				  priv->state);
2055 
2056 		switch (event->type) {
2057 		case ICNSS_SOC_WAKE_REQUEST_EVENT:
2058 			ret = icnss_event_soc_wake_request(priv,
2059 							   event->data);
2060 			break;
2061 		case ICNSS_SOC_WAKE_RELEASE_EVENT:
2062 			ret = icnss_event_soc_wake_release(priv,
2063 							   event->data);
2064 			break;
2065 		default:
2066 			icnss_pr_err("Invalid Event type: %d", event->type);
2067 			kfree(event);
2068 			continue;
2069 		}
2070 
2071 		priv->stats.soc_wake_events[event->type].processed++;
2072 
2073 		icnss_pr_soc_wake("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
2074 				  icnss_soc_wake_event_to_str(event->type),
2075 				  event->sync ? "-sync" : "", event->type, ret,
2076 				  priv->state);
2077 
2078 		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
2079 		if (event->sync) {
2080 			event->ret = ret;
2081 			complete(&event->complete);
2082 			continue;
2083 		}
2084 		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
2085 
2086 		kfree(event);
2087 
2088 		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
2089 	}
2090 	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
2091 
2092 	icnss_pm_relax(priv);
2093 }
2094 
icnss_msa0_ramdump(struct icnss_priv * priv)2095 static int icnss_msa0_ramdump(struct icnss_priv *priv)
2096 {
2097 	int ret = 0;
2098 	struct qcom_dump_segment segment;
2099 	struct icnss_ramdump_info *msa0_dump_dev = priv->msa0_dump_dev;
2100 	struct list_head head;
2101 
2102 	if (!dump_enabled()) {
2103 		icnss_pr_info("Dump collection is not enabled\n");
2104 		return ret;
2105 	}
2106 
2107 	if (IS_ERR_OR_NULL(msa0_dump_dev))
2108 		return ret;
2109 
2110 	INIT_LIST_HEAD(&head);
2111 
2112 	memset(&segment, 0, sizeof(segment));
2113 
2114 	segment.va = priv->msa_va;
2115 	segment.size = priv->msa_mem_size;
2116 
2117 	list_add(&segment.node, &head);
2118 
2119 	if (!msa0_dump_dev->dev) {
2120 		icnss_pr_err("Created Dump Device not found\n");
2121 		return 0;
2122 	}
2123 
2124 	ret = qcom_dump(&head, msa0_dump_dev->dev);
2125 	if (ret) {
2126 		icnss_pr_err("Failed to dump msa0, err = %d\n", ret);
2127 		return ret;
2128 	}
2129 
2130 	list_del(&segment.node);
2131 	return ret;
2132 }
2133 
icnss_update_state_send_modem_shutdown(struct icnss_priv * priv,void * data)2134 static void icnss_update_state_send_modem_shutdown(struct icnss_priv *priv,
2135 							void *data)
2136 {
2137 	struct qcom_ssr_notify_data *notif = data;
2138 	int ret = 0;
2139 
2140 	if (!notif->crashed) {
2141 		if (atomic_read(&priv->is_shutdown)) {
2142 			atomic_set(&priv->is_shutdown, false);
2143 			if (!test_bit(ICNSS_PD_RESTART, &priv->state) &&
2144 				!test_bit(ICNSS_SHUTDOWN_DONE, &priv->state) &&
2145 				!test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
2146 				clear_bit(ICNSS_FW_READY, &priv->state);
2147 				icnss_driver_event_post(priv,
2148 					  ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
2149 					  ICNSS_EVENT_SYNC_UNINTERRUPTIBLE,
2150 					  NULL);
2151 			}
2152 		}
2153 
2154 		if (test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
2155 			if (!wait_for_completion_timeout(
2156 					&priv->unblock_shutdown,
2157 					msecs_to_jiffies(PROBE_TIMEOUT)))
2158 				icnss_pr_err("modem block shutdown timeout\n");
2159 		}
2160 
2161 		ret = wlfw_send_modem_shutdown_msg(priv);
2162 		if (ret < 0)
2163 			icnss_pr_err("Fail to send modem shutdown Indication %d\n",
2164 				     ret);
2165 	}
2166 }
2167 
icnss_qcom_ssr_notify_state_to_str(enum qcom_ssr_notify_type code)2168 static char *icnss_qcom_ssr_notify_state_to_str(enum qcom_ssr_notify_type code)
2169 {
2170 	switch (code) {
2171 	case QCOM_SSR_BEFORE_POWERUP:
2172 		return "BEFORE_POWERUP";
2173 	case QCOM_SSR_AFTER_POWERUP:
2174 		return "AFTER_POWERUP";
2175 	case QCOM_SSR_BEFORE_SHUTDOWN:
2176 		return "BEFORE_SHUTDOWN";
2177 	case QCOM_SSR_AFTER_SHUTDOWN:
2178 		return "AFTER_SHUTDOWN";
2179 	default:
2180 		return "UNKNOWN";
2181 	}
2182 };
2183 
icnss_wpss_early_notifier_nb(struct notifier_block * nb,unsigned long code,void * data)2184 static int icnss_wpss_early_notifier_nb(struct notifier_block *nb,
2185 					unsigned long code,
2186 					void *data)
2187 {
2188 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2189 					       wpss_early_ssr_nb);
2190 
2191 	icnss_pr_vdbg("WPSS-EARLY-Notify: event %s(%lu)\n",
2192 		      icnss_qcom_ssr_notify_state_to_str(code), code);
2193 
2194 	if (code == QCOM_SSR_BEFORE_SHUTDOWN) {
2195 		set_bit(ICNSS_FW_DOWN, &priv->state);
2196 		icnss_ignore_fw_timeout(true);
2197 	}
2198 
2199 	return NOTIFY_DONE;
2200 }
2201 
icnss_wpss_notifier_nb(struct notifier_block * nb,unsigned long code,void * data)2202 static int icnss_wpss_notifier_nb(struct notifier_block *nb,
2203 				  unsigned long code,
2204 				  void *data)
2205 {
2206 	struct icnss_event_pd_service_down_data *event_data;
2207 	struct qcom_ssr_notify_data *notif = data;
2208 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2209 					       wpss_ssr_nb);
2210 	struct icnss_uevent_fw_down_data fw_down_data = {0};
2211 
2212 	icnss_pr_vdbg("WPSS-Notify: event %s(%lu)\n",
2213 		      icnss_qcom_ssr_notify_state_to_str(code), code);
2214 
2215 	switch (code) {
2216 	case QCOM_SSR_BEFORE_SHUTDOWN:
2217 		break;
2218 	case QCOM_SSR_AFTER_SHUTDOWN:
2219 		/* Collect ramdump only when there was a crash. */
2220 		if (notif->crashed) {
2221 			icnss_pr_info("Collecting msa0 segment dump\n");
2222 			icnss_msa0_ramdump(priv);
2223 		}
2224 		goto out;
2225 	default:
2226 		goto out;
2227 	}
2228 
2229 
2230 	if (priv->wpss_self_recovery_enabled)
2231 		del_timer(&priv->wpss_ssr_timer);
2232 
2233 	priv->is_ssr = true;
2234 
2235 	icnss_pr_info("WPSS went down, state: 0x%lx, crashed: %d\n",
2236 		      priv->state, notif->crashed);
2237 
2238 	if (priv->device_id == ADRASTEA_DEVICE_ID)
2239 		icnss_update_state_send_modem_shutdown(priv, data);
2240 
2241 	set_bit(ICNSS_FW_DOWN, &priv->state);
2242 	icnss_ignore_fw_timeout(true);
2243 
2244 	if (notif->crashed)
2245 		priv->stats.recovery.root_pd_crash++;
2246 	else
2247 		priv->stats.recovery.root_pd_shutdown++;
2248 
2249 	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
2250 
2251 	if (event_data == NULL)
2252 		return notifier_from_errno(-ENOMEM);
2253 
2254 	event_data->crashed = notif->crashed;
2255 
2256 	fw_down_data.crashed = !!notif->crashed;
2257 	if (test_bit(ICNSS_FW_READY, &priv->state)) {
2258 		clear_bit(ICNSS_FW_READY, &priv->state);
2259 		fw_down_data.crashed = !!notif->crashed;
2260 		icnss_call_driver_uevent(priv,
2261 					 ICNSS_UEVENT_FW_DOWN,
2262 					 &fw_down_data);
2263 	}
2264 	icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
2265 				ICNSS_EVENT_SYNC, event_data);
2266 
2267 	if (notif->crashed)
2268 		mod_timer(&priv->recovery_timer,
2269 			  jiffies + msecs_to_jiffies(ICNSS_RECOVERY_TIMEOUT));
2270 out:
2271 	icnss_pr_vdbg("Exit %s,state: 0x%lx\n", __func__, priv->state);
2272 	return NOTIFY_OK;
2273 }
2274 
icnss_modem_notifier_nb(struct notifier_block * nb,unsigned long code,void * data)2275 static int icnss_modem_notifier_nb(struct notifier_block *nb,
2276 				  unsigned long code,
2277 				  void *data)
2278 {
2279 	struct icnss_event_pd_service_down_data *event_data;
2280 	struct qcom_ssr_notify_data *notif = data;
2281 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2282 					       modem_ssr_nb);
2283 	struct icnss_uevent_fw_down_data fw_down_data = {0};
2284 
2285 	icnss_pr_vdbg("Modem-Notify: event %s(%lu)\n",
2286 		      icnss_qcom_ssr_notify_state_to_str(code), code);
2287 
2288 	switch (code) {
2289 	case QCOM_SSR_BEFORE_SHUTDOWN:
2290 		if (priv->is_slate_rfa)
2291 			complete(&priv->slate_boot_complete);
2292 
2293 		if (!notif->crashed &&
2294 		    priv->low_power_support) { /* Hibernate */
2295 			if (test_bit(ICNSS_MODE_ON, &priv->state))
2296 				icnss_driver_event_post(
2297 					priv, ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
2298 					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
2299 			set_bit(ICNSS_LOW_POWER, &priv->state);
2300 		}
2301 		break;
2302 	case QCOM_SSR_AFTER_SHUTDOWN:
2303 		/* Collect ramdump only when there was a crash. */
2304 		if (notif->crashed) {
2305 			icnss_pr_info("Collecting msa0 segment dump\n");
2306 			icnss_msa0_ramdump(priv);
2307 		}
2308 
2309 		goto out;
2310 	default:
2311 		goto out;
2312 	}
2313 
2314 	priv->is_ssr = true;
2315 
2316 	if (notif->crashed) {
2317 		priv->stats.recovery.root_pd_crash++;
2318 		priv->root_pd_shutdown = false;
2319 	} else {
2320 		priv->stats.recovery.root_pd_shutdown++;
2321 		priv->root_pd_shutdown = true;
2322 	}
2323 
2324 	icnss_update_state_send_modem_shutdown(priv, data);
2325 
2326 	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
2327 		set_bit(ICNSS_FW_DOWN, &priv->state);
2328 		icnss_ignore_fw_timeout(true);
2329 
2330 		if (test_bit(ICNSS_FW_READY, &priv->state)) {
2331 			clear_bit(ICNSS_FW_READY, &priv->state);
2332 			fw_down_data.crashed = !!notif->crashed;
2333 			icnss_call_driver_uevent(priv,
2334 						 ICNSS_UEVENT_FW_DOWN,
2335 						 &fw_down_data);
2336 		}
2337 		goto out;
2338 	}
2339 
2340 	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
2341 		      priv->state, notif->crashed);
2342 
2343 	set_bit(ICNSS_FW_DOWN, &priv->state);
2344 
2345 	icnss_ignore_fw_timeout(true);
2346 
2347 	event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
2348 
2349 	if (event_data == NULL)
2350 		return notifier_from_errno(-ENOMEM);
2351 
2352 	event_data->crashed = notif->crashed;
2353 
2354 	fw_down_data.crashed = !!notif->crashed;
2355 	if (test_bit(ICNSS_FW_READY, &priv->state)) {
2356 		clear_bit(ICNSS_FW_READY, &priv->state);
2357 		fw_down_data.crashed = !!notif->crashed;
2358 		icnss_call_driver_uevent(priv,
2359 					 ICNSS_UEVENT_FW_DOWN,
2360 					 &fw_down_data);
2361 	}
2362 	icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
2363 				ICNSS_EVENT_SYNC, event_data);
2364 
2365 	if (notif->crashed)
2366 		mod_timer(&priv->recovery_timer,
2367 			  jiffies + msecs_to_jiffies(ICNSS_RECOVERY_TIMEOUT));
2368 out:
2369 	icnss_pr_vdbg("Exit %s,state: 0x%lx\n", __func__, priv->state);
2370 	return NOTIFY_OK;
2371 }
2372 
icnss_wpss_early_ssr_register_notifier(struct icnss_priv * priv)2373 static int icnss_wpss_early_ssr_register_notifier(struct icnss_priv *priv)
2374 {
2375 	int ret = 0;
2376 
2377 	priv->wpss_early_ssr_nb.notifier_call = icnss_wpss_early_notifier_nb;
2378 
2379 	priv->wpss_early_notify_handler =
2380 		qcom_register_early_ssr_notifier("wpss",
2381 						 &priv->wpss_early_ssr_nb);
2382 
2383 	if (IS_ERR_OR_NULL(priv->wpss_early_notify_handler)) {
2384 		ret = PTR_ERR(priv->wpss_early_notify_handler);
2385 		icnss_pr_err("WPSS register early notifier failed: %d\n", ret);
2386 	}
2387 
2388 	return ret;
2389 }
2390 
icnss_wpss_ssr_register_notifier(struct icnss_priv * priv)2391 static int icnss_wpss_ssr_register_notifier(struct icnss_priv *priv)
2392 {
2393 	int ret = 0;
2394 
2395 	priv->wpss_ssr_nb.notifier_call = icnss_wpss_notifier_nb;
2396 	/*
2397 	 * Assign priority of icnss wpss notifier callback over IPA
2398 	 * modem notifier callback which is 0
2399 	 */
2400 	priv->wpss_ssr_nb.priority = 1;
2401 
2402 	priv->wpss_notify_handler =
2403 		qcom_register_ssr_notifier("wpss", &priv->wpss_ssr_nb);
2404 
2405 	if (IS_ERR_OR_NULL(priv->wpss_notify_handler)) {
2406 		ret = PTR_ERR(priv->wpss_notify_handler);
2407 		icnss_pr_err("WPSS register notifier failed: %d\n", ret);
2408 	}
2409 
2410 	set_bit(ICNSS_SSR_REGISTERED, &priv->state);
2411 
2412 	return ret;
2413 }
2414 
2415 #ifdef CONFIG_SLATE_MODULE_ENABLED
icnss_slate_event_notifier_nb(struct notifier_block * nb,unsigned long event,void * data)2416 static int icnss_slate_event_notifier_nb(struct notifier_block *nb,
2417 					 unsigned long event, void *data)
2418 {
2419 	icnss_pr_info("Received slate event 0x%x\n", event);
2420 
2421 	if (event == SLATE_STATUS) {
2422 		struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2423 						       seb_nb);
2424 		enum boot_status status = *(enum boot_status *)data;
2425 
2426 		if (status == SLATE_READY) {
2427 			icnss_pr_dbg("Slate ready received, state: 0x%lx\n",
2428 				     priv->state);
2429 			set_bit(ICNSS_SLATE_READY, &priv->state);
2430 			set_bit(ICNSS_SLATE_UP, &priv->state);
2431 			complete(&priv->slate_boot_complete);
2432 		}
2433 	}
2434 
2435 	return NOTIFY_OK;
2436 }
2437 
icnss_register_slate_event_notifier(struct icnss_priv * priv)2438 static int icnss_register_slate_event_notifier(struct icnss_priv *priv)
2439 {
2440 	int ret = 0;
2441 
2442 	priv->seb_nb.notifier_call = icnss_slate_event_notifier_nb;
2443 
2444 	priv->seb_handle = seb_register_for_slate_event(SLATE_STATUS,
2445 							&priv->seb_nb);
2446 	if (IS_ERR_OR_NULL(priv->seb_handle)) {
2447 		ret = priv->seb_handle ? PTR_ERR(priv->seb_handle) : -EINVAL;
2448 		icnss_pr_err("SLATE event register notifier failed: %d\n",
2449 			     ret);
2450 	}
2451 
2452 	return ret;
2453 }
2454 
icnss_unregister_slate_event_notifier(struct icnss_priv * priv)2455 static int icnss_unregister_slate_event_notifier(struct icnss_priv *priv)
2456 {
2457 	int ret = 0;
2458 
2459 	ret = seb_unregister_for_slate_event(priv->seb_handle, &priv->seb_nb);
2460 	if (ret < 0)
2461 		icnss_pr_err("Slate event unregister failed: %d\n", ret);
2462 
2463 	return ret;
2464 }
2465 
icnss_slate_notifier_nb(struct notifier_block * nb,unsigned long code,void * data)2466 static int icnss_slate_notifier_nb(struct notifier_block *nb,
2467 				   unsigned long code,
2468 				   void *data)
2469 {
2470 	struct icnss_priv *priv = container_of(nb, struct icnss_priv,
2471 					       slate_ssr_nb);
2472 	int ret = 0;
2473 
2474 	icnss_pr_vdbg("Slate-subsys-notify: event %lu\n", code);
2475 
2476 	if (code == QCOM_SSR_AFTER_POWERUP &&
2477 	    test_bit(ICNSS_SLATE_READY, &priv->state)) {
2478 		set_bit(ICNSS_SLATE_UP, &priv->state);
2479 		complete(&priv->slate_boot_complete);
2480 		icnss_pr_dbg("Slate boot complete, state: 0x%lx\n",
2481 			     priv->state);
2482 	} else if (code == QCOM_SSR_BEFORE_SHUTDOWN &&
2483 		   test_bit(ICNSS_SLATE_UP, &priv->state)) {
2484 		clear_bit(ICNSS_SLATE_UP, &priv->state);
2485 		if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
2486 			icnss_pr_err("PD_RESTART in progress 0x%lx\n",
2487 				     priv->state);
2488 			goto skip_pdr;
2489 		}
2490 
2491 		icnss_pr_dbg("Initiating PDR 0x%lx\n", priv->state);
2492 		ret = icnss_trigger_recovery(&priv->pdev->dev);
2493 		if (ret < 0) {
2494 			icnss_fatal_err("Fail to trigger PDR: ret: %d, state: 0x%lx\n",
2495 					ret, priv->state);
2496 			goto skip_pdr;
2497 		}
2498 	}
2499 
2500 skip_pdr:
2501 	return NOTIFY_OK;
2502 }
2503 
icnss_slate_ssr_register_notifier(struct icnss_priv * priv)2504 static int icnss_slate_ssr_register_notifier(struct icnss_priv *priv)
2505 {
2506 	int ret = 0;
2507 
2508 	priv->slate_ssr_nb.notifier_call = icnss_slate_notifier_nb;
2509 
2510 	priv->slate_notify_handler =
2511 		qcom_register_ssr_notifier("slatefw", &priv->slate_ssr_nb);
2512 
2513 	if (IS_ERR_OR_NULL(priv->slate_notify_handler)) {
2514 		ret = PTR_ERR(priv->slate_notify_handler);
2515 		icnss_pr_err("SLATE register notifier failed: %d\n", ret);
2516 	}
2517 
2518 	set_bit(ICNSS_SLATE_SSR_REGISTERED, &priv->state);
2519 
2520 	return ret;
2521 }
2522 
icnss_slate_ssr_unregister_notifier(struct icnss_priv * priv)2523 static int icnss_slate_ssr_unregister_notifier(struct icnss_priv *priv)
2524 {
2525 	if (!test_and_clear_bit(ICNSS_SLATE_SSR_REGISTERED, &priv->state))
2526 		return 0;
2527 
2528 	qcom_unregister_ssr_notifier(priv->slate_notify_handler,
2529 				     &priv->slate_ssr_nb);
2530 	priv->slate_notify_handler = NULL;
2531 
2532 	return 0;
2533 }
2534 #else
icnss_register_slate_event_notifier(struct icnss_priv * priv)2535 static int icnss_register_slate_event_notifier(struct icnss_priv *priv)
2536 {
2537 	return 0;
2538 }
2539 
icnss_unregister_slate_event_notifier(struct icnss_priv * priv)2540 static int icnss_unregister_slate_event_notifier(struct icnss_priv *priv)
2541 {
2542 	return 0;
2543 }
2544 
icnss_slate_ssr_register_notifier(struct icnss_priv * priv)2545 static int icnss_slate_ssr_register_notifier(struct icnss_priv *priv)
2546 {
2547 	return 0;
2548 }
2549 
icnss_slate_ssr_unregister_notifier(struct icnss_priv * priv)2550 static int icnss_slate_ssr_unregister_notifier(struct icnss_priv *priv)
2551 {
2552 	return 0;
2553 }
2554 #endif
2555 
icnss_modem_ssr_register_notifier(struct icnss_priv * priv)2556 static int icnss_modem_ssr_register_notifier(struct icnss_priv *priv)
2557 {
2558 	int ret = 0;
2559 
2560 	priv->modem_ssr_nb.notifier_call = icnss_modem_notifier_nb;
2561 	/*
2562 	 * Assign priority of icnss modem notifier callback over IPA
2563 	 * modem notifier callback which is 0
2564 	 */
2565 	priv->modem_ssr_nb.priority = 1;
2566 
2567 	priv->modem_notify_handler =
2568 		qcom_register_ssr_notifier("mpss", &priv->modem_ssr_nb);
2569 
2570 	if (IS_ERR_OR_NULL(priv->modem_notify_handler)) {
2571 		ret = PTR_ERR(priv->modem_notify_handler);
2572 		icnss_pr_err("Modem register notifier failed: %d\n", ret);
2573 	}
2574 
2575 	set_bit(ICNSS_SSR_REGISTERED, &priv->state);
2576 
2577 	return ret;
2578 }
2579 
icnss_wpss_early_ssr_unregister_notifier(struct icnss_priv * priv)2580 static void icnss_wpss_early_ssr_unregister_notifier(struct icnss_priv *priv)
2581 {
2582 	if (IS_ERR_OR_NULL(priv->wpss_early_notify_handler))
2583 		return;
2584 
2585 	qcom_unregister_early_ssr_notifier(priv->wpss_early_notify_handler,
2586 					   &priv->wpss_early_ssr_nb);
2587 	priv->wpss_early_notify_handler = NULL;
2588 }
2589 
icnss_wpss_ssr_unregister_notifier(struct icnss_priv * priv)2590 static int icnss_wpss_ssr_unregister_notifier(struct icnss_priv *priv)
2591 {
2592 	if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
2593 		return 0;
2594 
2595 	qcom_unregister_ssr_notifier(priv->wpss_notify_handler,
2596 				     &priv->wpss_ssr_nb);
2597 	priv->wpss_notify_handler = NULL;
2598 
2599 	return 0;
2600 }
2601 
icnss_modem_ssr_unregister_notifier(struct icnss_priv * priv)2602 static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
2603 {
2604 	if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
2605 		return 0;
2606 
2607 	qcom_unregister_ssr_notifier(priv->modem_notify_handler,
2608 				     &priv->modem_ssr_nb);
2609 	priv->modem_notify_handler = NULL;
2610 
2611 	return 0;
2612 }
2613 
icnss_pdr_notifier_cb(int state,char * service_path,void * priv_cb)2614 static void icnss_pdr_notifier_cb(int state, char *service_path, void *priv_cb)
2615 {
2616 	struct icnss_priv *priv = priv_cb;
2617 	struct icnss_event_pd_service_down_data *event_data;
2618 	struct icnss_uevent_fw_down_data fw_down_data = {0};
2619 	enum icnss_pdr_cause_index cause = ICNSS_ROOT_PD_CRASH;
2620 
2621 	if (!priv)
2622 		return;
2623 
2624 	icnss_pr_dbg("PD service notification: 0x%lx state: 0x%lx\n",
2625 		     state, priv->state);
2626 
2627 	switch (state) {
2628 	case SERVREG_SERVICE_STATE_DOWN:
2629 		event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
2630 
2631 		if (!event_data)
2632 			return;
2633 
2634 		event_data->crashed = true;
2635 
2636 		if (!priv->is_ssr) {
2637 			set_bit(ICNSS_PDR, &penv->state);
2638 			if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
2639 				cause = ICNSS_HOST_ERROR;
2640 				priv->stats.recovery.pdr_host_error++;
2641 			} else {
2642 				cause = ICNSS_FW_CRASH;
2643 				priv->stats.recovery.pdr_fw_crash++;
2644 			}
2645 		} else if (priv->root_pd_shutdown) {
2646 			cause = ICNSS_ROOT_PD_SHUTDOWN;
2647 			event_data->crashed = false;
2648 		}
2649 
2650 		icnss_pr_info("PD service down, state: 0x%lx: cause: %s\n",
2651 			      priv->state, icnss_pdr_cause[cause]);
2652 
2653 		if (!test_bit(ICNSS_FW_DOWN, &priv->state)) {
2654 			set_bit(ICNSS_FW_DOWN, &priv->state);
2655 			icnss_ignore_fw_timeout(true);
2656 
2657 			if (test_bit(ICNSS_FW_READY, &priv->state)) {
2658 				clear_bit(ICNSS_FW_READY, &priv->state);
2659 				fw_down_data.crashed = event_data->crashed;
2660 				icnss_call_driver_uevent(priv,
2661 							 ICNSS_UEVENT_FW_DOWN,
2662 							 &fw_down_data);
2663 			}
2664 		}
2665 		clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
2666 
2667 		if (event_data->crashed)
2668 			mod_timer(&priv->recovery_timer,
2669 				  jiffies +
2670 				  msecs_to_jiffies(ICNSS_RECOVERY_TIMEOUT));
2671 
2672 		icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
2673 					ICNSS_EVENT_SYNC, event_data);
2674 		break;
2675 	case SERVREG_SERVICE_STATE_UP:
2676 		clear_bit(ICNSS_FW_DOWN, &priv->state);
2677 		break;
2678 	default:
2679 		break;
2680 	}
2681 	return;
2682 }
2683 
icnss_pd_restart_enable(struct icnss_priv * priv)2684 static int icnss_pd_restart_enable(struct icnss_priv *priv)
2685 {
2686 	struct pdr_handle *handle = NULL;
2687 	struct pdr_service *service = NULL;
2688 	int err = 0;
2689 
2690 	handle = pdr_handle_alloc(icnss_pdr_notifier_cb, priv);
2691 	if (IS_ERR_OR_NULL(handle)) {
2692 		err = PTR_ERR(handle);
2693 		icnss_pr_err("Failed to alloc pdr handle, err %d", err);
2694 		goto out;
2695 	}
2696 	service = pdr_add_lookup(handle, ICNSS_WLAN_SERVICE_NAME, ICNSS_WLANPD_NAME);
2697 	if (IS_ERR_OR_NULL(service)) {
2698 		err = PTR_ERR(service);
2699 		icnss_pr_err("Failed to add lookup, err %d", err);
2700 		goto out;
2701 	}
2702 	priv->pdr_handle = handle;
2703 	priv->pdr_service = service;
2704 	set_bit(ICNSS_PDR_REGISTERED, &priv->state);
2705 
2706 	icnss_pr_info("PDR registration happened");
2707 out:
2708 	return err;
2709 }
2710 
icnss_pdr_unregister_notifier(struct icnss_priv * priv)2711 static void icnss_pdr_unregister_notifier(struct icnss_priv *priv)
2712 {
2713 	if (!test_and_clear_bit(ICNSS_PDR_REGISTERED, &priv->state))
2714 		return;
2715 
2716 	pdr_handle_release(priv->pdr_handle);
2717 }
2718 
icnss_ramdump_devnode_init(struct icnss_priv * priv)2719 static int icnss_ramdump_devnode_init(struct icnss_priv *priv)
2720 {
2721 	int ret = 0;
2722 
2723 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
2724 	priv->icnss_ramdump_class = class_create(THIS_MODULE, ICNSS_RAMDUMP_NAME);
2725 #else
2726 	priv->icnss_ramdump_class = class_create(ICNSS_RAMDUMP_NAME);
2727 #endif
2728 	if (IS_ERR_OR_NULL(priv->icnss_ramdump_class)) {
2729 		ret = PTR_ERR(priv->icnss_ramdump_class);
2730 		icnss_pr_err("%s:Class create failed for ramdump devices (%d)\n", __func__, ret);
2731 		return ret;
2732 	}
2733 
2734 	ret = alloc_chrdev_region(&priv->icnss_ramdump_dev, 0, RAMDUMP_NUM_DEVICES,
2735 				  ICNSS_RAMDUMP_NAME);
2736 	if (ret < 0) {
2737 		icnss_pr_err("%s: Unable to allocate major\n", __func__);
2738 		goto fail_alloc_major;
2739 	}
2740 	return 0;
2741 
2742 fail_alloc_major:
2743 	class_destroy(priv->icnss_ramdump_class);
2744 	return ret;
2745 }
2746 
icnss_create_ramdump_device(struct icnss_priv * priv,const char * dev_name)2747 void *icnss_create_ramdump_device(struct icnss_priv *priv, const char *dev_name)
2748 {
2749 	int ret = 0;
2750 	struct icnss_ramdump_info *ramdump_info;
2751 
2752 	ramdump_info = kzalloc(sizeof(*ramdump_info), GFP_KERNEL);
2753 	if (!ramdump_info)
2754 		return ERR_PTR(-ENOMEM);
2755 
2756 	if (!dev_name) {
2757 		icnss_pr_err("%s: Invalid device name.\n", __func__);
2758 		return NULL;
2759 	}
2760 
2761 	snprintf(ramdump_info->name, ARRAY_SIZE(ramdump_info->name), "icnss_%s", dev_name);
2762 
2763 	ramdump_info->minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES, GFP_KERNEL);
2764 	if (ramdump_info->minor < 0) {
2765 		icnss_pr_err("%s: No more minor numbers left! rc:%d\n", __func__,
2766 			     ramdump_info->minor);
2767 		ret = -ENODEV;
2768 		goto fail_out_of_minors;
2769 	}
2770 
2771 	ramdump_info->dev = device_create(priv->icnss_ramdump_class, NULL,
2772 					  MKDEV(MAJOR(priv->icnss_ramdump_dev),
2773 					  ramdump_info->minor),
2774 					  ramdump_info, ramdump_info->name);
2775 	if (IS_ERR_OR_NULL(ramdump_info->dev)) {
2776 		ret = PTR_ERR(ramdump_info->dev);
2777 		icnss_pr_err("%s: Device create failed for %s (%d)\n", __func__,
2778 			     ramdump_info->name, ret);
2779 		goto fail_device_create;
2780 	}
2781 	return (void *)ramdump_info;
2782 
2783 fail_device_create:
2784 	ida_simple_remove(&rd_minor_id, ramdump_info->minor);
2785 fail_out_of_minors:
2786 	kfree(ramdump_info);
2787 	return ERR_PTR(ret);
2788 }
2789 
icnss_register_ramdump_devices(struct icnss_priv * priv)2790 static int icnss_register_ramdump_devices(struct icnss_priv *priv)
2791 {
2792 	int ret = 0;
2793 
2794 	if (!priv || !priv->pdev) {
2795 		icnss_pr_err("Platform priv or pdev is NULL\n");
2796 		return -EINVAL;
2797 	}
2798 
2799 	ret = icnss_ramdump_devnode_init(priv);
2800 	if (ret)
2801 		return ret;
2802 
2803 	priv->msa0_dump_dev = icnss_create_ramdump_device(priv, "wcss_msa0");
2804 
2805 	if (IS_ERR_OR_NULL(priv->msa0_dump_dev) || !priv->msa0_dump_dev->dev) {
2806 		icnss_pr_err("Failed to create msa0 dump device!");
2807 		return -ENOMEM;
2808 	}
2809 
2810 	if (priv->device_id == WCN6750_DEVICE_ID ||
2811 	    priv->device_id == WCN6450_DEVICE_ID) {
2812 		priv->m3_dump_phyareg = icnss_create_ramdump_device(priv,
2813 						ICNSS_M3_SEGMENT(
2814 						ICNSS_M3_SEGMENT_PHYAREG));
2815 
2816 		if (IS_ERR_OR_NULL(priv->m3_dump_phyareg) ||
2817 		    !priv->m3_dump_phyareg->dev) {
2818 			icnss_pr_err("Failed to create m3 dump for Phyareg segment device!");
2819 			return -ENOMEM;
2820 		}
2821 
2822 		priv->m3_dump_phydbg = icnss_create_ramdump_device(priv,
2823 						ICNSS_M3_SEGMENT(
2824 						ICNSS_M3_SEGMENT_PHYA));
2825 
2826 		if (IS_ERR_OR_NULL(priv->m3_dump_phydbg) ||
2827 		    !priv->m3_dump_phydbg->dev) {
2828 			icnss_pr_err("Failed to create m3 dump for Phydbg segment device!");
2829 			return -ENOMEM;
2830 		}
2831 
2832 		priv->m3_dump_wmac0reg = icnss_create_ramdump_device(priv,
2833 						ICNSS_M3_SEGMENT(
2834 						ICNSS_M3_SEGMENT_WMACREG));
2835 
2836 		if (IS_ERR_OR_NULL(priv->m3_dump_wmac0reg) ||
2837 		    !priv->m3_dump_wmac0reg->dev) {
2838 			icnss_pr_err("Failed to create m3 dump for Wmac0reg segment device!");
2839 			return -ENOMEM;
2840 		}
2841 
2842 		priv->m3_dump_wcssdbg = icnss_create_ramdump_device(priv,
2843 						ICNSS_M3_SEGMENT(
2844 						ICNSS_M3_SEGMENT_WCSSDBG));
2845 
2846 		if (IS_ERR_OR_NULL(priv->m3_dump_wcssdbg) ||
2847 		    !priv->m3_dump_wcssdbg->dev) {
2848 			icnss_pr_err("Failed to create m3 dump for Wcssdbg segment device!");
2849 			return -ENOMEM;
2850 		}
2851 
2852 		priv->m3_dump_phyapdmem = icnss_create_ramdump_device(priv,
2853 						ICNSS_M3_SEGMENT(
2854 						ICNSS_M3_SEGMENT_PHYAM3));
2855 
2856 		if (IS_ERR_OR_NULL(priv->m3_dump_phyapdmem) ||
2857 		    !priv->m3_dump_phyapdmem->dev) {
2858 			icnss_pr_err("Failed to create m3 dump for Phyapdmem segment device!");
2859 			return -ENOMEM;
2860 		}
2861 	}
2862 
2863 	return 0;
2864 }
2865 
icnss_enable_recovery(struct icnss_priv * priv)2866 static int icnss_enable_recovery(struct icnss_priv *priv)
2867 {
2868 	int ret;
2869 
2870 	if (test_bit(RECOVERY_DISABLE, &priv->ctrl_params.quirks)) {
2871 		icnss_pr_dbg("Recovery disabled through module parameter\n");
2872 		return 0;
2873 	}
2874 
2875 	if (test_bit(PDR_ONLY, &priv->ctrl_params.quirks)) {
2876 		icnss_pr_dbg("SSR disabled through module parameter\n");
2877 		goto enable_pdr;
2878 	}
2879 
2880 	ret = icnss_register_ramdump_devices(priv);
2881 	if (ret)
2882 		return ret;
2883 
2884 	if (priv->wpss_supported) {
2885 		icnss_wpss_early_ssr_register_notifier(priv);
2886 		icnss_wpss_ssr_register_notifier(priv);
2887 		return 0;
2888 	}
2889 
2890 	if (!(priv->rproc_fw_download))
2891 		icnss_modem_ssr_register_notifier(priv);
2892 
2893 	if (priv->is_slate_rfa) {
2894 		icnss_slate_ssr_register_notifier(priv);
2895 		icnss_register_slate_event_notifier(priv);
2896 	}
2897 
2898 	if (test_bit(SSR_ONLY, &priv->ctrl_params.quirks)) {
2899 		icnss_pr_dbg("PDR disabled through module parameter\n");
2900 		return 0;
2901 	}
2902 
2903 enable_pdr:
2904 	ret = icnss_pd_restart_enable(priv);
2905 
2906 	if (ret)
2907 		return ret;
2908 
2909 	return 0;
2910 }
2911 
icnss_dev_id_match(struct icnss_priv * priv,struct device_info * dev_info)2912 static int icnss_dev_id_match(struct icnss_priv *priv,
2913 			      struct device_info *dev_info)
2914 {
2915 	while (dev_info->device_id) {
2916 		if (priv->device_id == dev_info->device_id)
2917 			return 1;
2918 		dev_info++;
2919 	}
2920 	return 0;
2921 }
2922 
icnss_tcdev_get_max_state(struct thermal_cooling_device * tcdev,unsigned long * thermal_state)2923 static int icnss_tcdev_get_max_state(struct thermal_cooling_device *tcdev,
2924 					unsigned long *thermal_state)
2925 {
2926 	struct icnss_thermal_cdev *icnss_tcdev = tcdev->devdata;
2927 
2928 	*thermal_state = icnss_tcdev->max_thermal_state;
2929 
2930 	return 0;
2931 }
2932 
icnss_tcdev_get_cur_state(struct thermal_cooling_device * tcdev,unsigned long * thermal_state)2933 static int icnss_tcdev_get_cur_state(struct thermal_cooling_device *tcdev,
2934 					unsigned long *thermal_state)
2935 {
2936 	struct icnss_thermal_cdev *icnss_tcdev = tcdev->devdata;
2937 
2938 	*thermal_state = icnss_tcdev->curr_thermal_state;
2939 
2940 	return 0;
2941 }
2942 
icnss_tcdev_set_cur_state(struct thermal_cooling_device * tcdev,unsigned long thermal_state)2943 static int icnss_tcdev_set_cur_state(struct thermal_cooling_device *tcdev,
2944 					unsigned long thermal_state)
2945 {
2946 	struct icnss_thermal_cdev *icnss_tcdev = tcdev->devdata;
2947 	struct device *dev = &penv->pdev->dev;
2948 	int ret = 0;
2949 
2950 
2951 	if (!penv->ops || !penv->ops->set_therm_cdev_state)
2952 		return 0;
2953 
2954 	if (thermal_state > icnss_tcdev->max_thermal_state)
2955 		return -EINVAL;
2956 
2957 	icnss_pr_vdbg("Cooling device set current state: %ld,for cdev id %d",
2958 		      thermal_state, icnss_tcdev->tcdev_id);
2959 
2960 	mutex_lock(&penv->tcdev_lock);
2961 	ret = penv->ops->set_therm_cdev_state(dev, thermal_state,
2962 					      icnss_tcdev->tcdev_id);
2963 	if (!ret)
2964 		icnss_tcdev->curr_thermal_state = thermal_state;
2965 	mutex_unlock(&penv->tcdev_lock);
2966 	if (ret) {
2967 		icnss_pr_err("Setting Current Thermal State Failed: %d,for cdev id %d",
2968 			     ret, icnss_tcdev->tcdev_id);
2969 		return ret;
2970 	}
2971 
2972 	return 0;
2973 }
2974 
2975 static struct thermal_cooling_device_ops icnss_cooling_ops = {
2976 	.get_max_state = icnss_tcdev_get_max_state,
2977 	.get_cur_state = icnss_tcdev_get_cur_state,
2978 	.set_cur_state = icnss_tcdev_set_cur_state,
2979 };
2980 
icnss_thermal_cdev_register(struct device * dev,unsigned long max_state,int tcdev_id)2981 int icnss_thermal_cdev_register(struct device *dev, unsigned long max_state,
2982 			   int tcdev_id)
2983 {
2984 	struct icnss_priv *priv = dev_get_drvdata(dev);
2985 	struct icnss_thermal_cdev *icnss_tcdev = NULL;
2986 	char cdev_node_name[THERMAL_NAME_LENGTH] = "";
2987 	struct device_node *dev_node;
2988 	int ret = 0;
2989 
2990 	icnss_tcdev = kzalloc(sizeof(*icnss_tcdev), GFP_KERNEL);
2991 	if (!icnss_tcdev)
2992 		return -ENOMEM;
2993 
2994 	icnss_tcdev->tcdev_id = tcdev_id;
2995 	icnss_tcdev->max_thermal_state = max_state;
2996 
2997 	snprintf(cdev_node_name, THERMAL_NAME_LENGTH,
2998 		 "qcom,icnss_cdev%d", tcdev_id);
2999 
3000 	dev_node = of_find_node_by_name(NULL, cdev_node_name);
3001 	if (!dev_node) {
3002 		icnss_pr_err("Failed to get cooling device node\n");
3003 		return -EINVAL;
3004 	}
3005 
3006 	icnss_pr_dbg("tcdev node->name=%s\n", dev_node->name);
3007 
3008 	if (of_find_property(dev_node, "#cooling-cells", NULL)) {
3009 		icnss_tcdev->tcdev = thermal_of_cooling_device_register(
3010 						dev_node,
3011 						cdev_node_name, icnss_tcdev,
3012 						&icnss_cooling_ops);
3013 		if (IS_ERR_OR_NULL(icnss_tcdev->tcdev)) {
3014 			ret = PTR_ERR(icnss_tcdev->tcdev);
3015 			icnss_pr_err("Cooling device register failed: %d, for cdev id %d\n",
3016 				     ret, icnss_tcdev->tcdev_id);
3017 		} else {
3018 			icnss_pr_dbg("Cooling device registered for cdev id %d",
3019 				     icnss_tcdev->tcdev_id);
3020 			list_add(&icnss_tcdev->tcdev_list,
3021 				 &priv->icnss_tcdev_list);
3022 		}
3023 	} else {
3024 		icnss_pr_dbg("Cooling device registration not supported");
3025 		ret = -EOPNOTSUPP;
3026 	}
3027 
3028 	return ret;
3029 }
3030 EXPORT_SYMBOL(icnss_thermal_cdev_register);
3031 
icnss_thermal_cdev_unregister(struct device * dev,int tcdev_id)3032 void icnss_thermal_cdev_unregister(struct device *dev, int tcdev_id)
3033 {
3034 	struct icnss_priv *priv = dev_get_drvdata(dev);
3035 	struct icnss_thermal_cdev *icnss_tcdev = NULL;
3036 
3037 	while (!list_empty(&priv->icnss_tcdev_list)) {
3038 		icnss_tcdev = list_first_entry(&priv->icnss_tcdev_list,
3039 					       struct icnss_thermal_cdev,
3040 					       tcdev_list);
3041 		thermal_cooling_device_unregister(icnss_tcdev->tcdev);
3042 		list_del(&icnss_tcdev->tcdev_list);
3043 		kfree(icnss_tcdev);
3044 	}
3045 }
3046 EXPORT_SYMBOL(icnss_thermal_cdev_unregister);
3047 
icnss_get_curr_therm_cdev_state(struct device * dev,unsigned long * thermal_state,int tcdev_id)3048 int icnss_get_curr_therm_cdev_state(struct device *dev,
3049 				    unsigned long *thermal_state,
3050 				    int tcdev_id)
3051 {
3052 	struct icnss_priv *priv = dev_get_drvdata(dev);
3053 	struct icnss_thermal_cdev *icnss_tcdev = NULL;
3054 
3055 	mutex_lock(&priv->tcdev_lock);
3056 	list_for_each_entry(icnss_tcdev, &priv->icnss_tcdev_list, tcdev_list) {
3057 		if (icnss_tcdev->tcdev_id != tcdev_id)
3058 			continue;
3059 
3060 		*thermal_state = icnss_tcdev->curr_thermal_state;
3061 		mutex_unlock(&priv->tcdev_lock);
3062 		icnss_pr_dbg("Cooling device current state: %ld, for cdev id %d",
3063 			     icnss_tcdev->curr_thermal_state, tcdev_id);
3064 		return 0;
3065 	}
3066 	mutex_unlock(&priv->tcdev_lock);
3067 	icnss_pr_dbg("Cooling device ID not found: %d", tcdev_id);
3068 	return -EINVAL;
3069 }
3070 EXPORT_SYMBOL(icnss_get_curr_therm_cdev_state);
3071 
icnss_qmi_send(struct device * dev,int type,void * cmd,int cmd_len,void * cb_ctx,int (* cb)(void * ctx,void * event,int event_len))3072 int icnss_qmi_send(struct device *dev, int type, void *cmd,
3073 		  int cmd_len, void *cb_ctx,
3074 		  int (*cb)(void *ctx, void *event, int event_len))
3075 {
3076 	struct icnss_priv *priv = icnss_get_plat_priv();
3077 	int ret;
3078 
3079 	if (!priv)
3080 		return -ENODEV;
3081 
3082 	if (!test_bit(ICNSS_WLFW_CONNECTED, &priv->state))
3083 		return -EINVAL;
3084 
3085 	priv->get_info_cb = cb;
3086 	priv->get_info_cb_ctx = cb_ctx;
3087 
3088 	ret = icnss_wlfw_get_info_send_sync(priv, type, cmd, cmd_len);
3089 	if (ret) {
3090 		priv->get_info_cb = NULL;
3091 		priv->get_info_cb_ctx = NULL;
3092 	}
3093 
3094 	return ret;
3095 }
3096 EXPORT_SYMBOL(icnss_qmi_send);
3097 
__icnss_register_driver(struct icnss_driver_ops * ops,struct module * owner,const char * mod_name)3098 int __icnss_register_driver(struct icnss_driver_ops *ops,
3099 			    struct module *owner, const char *mod_name)
3100 {
3101 	int ret = 0;
3102 	struct icnss_priv *priv = icnss_get_plat_priv();
3103 
3104 	if (!priv || !priv->pdev) {
3105 		ret = -ENODEV;
3106 		goto out;
3107 	}
3108 
3109 	icnss_pr_dbg("Registering driver, state: 0x%lx\n", priv->state);
3110 
3111 	if (priv->ops) {
3112 		icnss_pr_err("Driver already registered\n");
3113 		ret = -EEXIST;
3114 		goto out;
3115 	}
3116 
3117 	if (!ops->dev_info) {
3118 		icnss_pr_err("WLAN driver devinfo is null, Reject wlan driver loading");
3119 		return -EINVAL;
3120 	}
3121 
3122 	if (!icnss_dev_id_match(priv, ops->dev_info)) {
3123 		icnss_pr_err("WLAN driver dev name is %s, not supported by platform driver\n",
3124 			     ops->dev_info->name);
3125 		return -ENODEV;
3126 	}
3127 
3128 	if (!ops->probe || !ops->remove) {
3129 		ret = -EINVAL;
3130 		goto out;
3131 	}
3132 
3133 	ret = icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
3134 				      0, ops);
3135 
3136 	if (ret == -EINTR)
3137 		ret = 0;
3138 
3139 out:
3140 	return ret;
3141 }
3142 EXPORT_SYMBOL(__icnss_register_driver);
3143 
icnss_unregister_driver(struct icnss_driver_ops * ops)3144 int icnss_unregister_driver(struct icnss_driver_ops *ops)
3145 {
3146 	int ret;
3147 	struct icnss_priv *priv = icnss_get_plat_priv();
3148 
3149 	if (!priv || !priv->pdev) {
3150 		ret = -ENODEV;
3151 		goto out;
3152 	}
3153 
3154 	icnss_pr_dbg("Unregistering driver, state: 0x%lx\n", priv->state);
3155 
3156 	if (!priv->ops) {
3157 		icnss_pr_err("Driver not registered\n");
3158 		ret = -ENOENT;
3159 		goto out;
3160 	}
3161 
3162 	ret = icnss_driver_event_post(priv,
3163 					 ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3164 				      ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
3165 out:
3166 	return ret;
3167 }
3168 EXPORT_SYMBOL(icnss_unregister_driver);
3169 
3170 static struct icnss_msi_config msi_config_wcn6750 = {
3171 	.total_vectors = 28,
3172 	.total_users = 2,
3173 	.users = (struct icnss_msi_user[]) {
3174 		{ .name = "CE", .num_vectors = 10, .base_vector = 0 },
3175 		{ .name = "DP", .num_vectors = 18, .base_vector = 10 },
3176 	},
3177 };
3178 
3179 static struct icnss_msi_config msi_config_wcn6450 = {
3180 	.total_vectors = 14,
3181 	.total_users = 2,
3182 	.users = (struct icnss_msi_user[]) {
3183 		{ .name = "CE", .num_vectors = 12, .base_vector = 0 },
3184 		{ .name = "DP", .num_vectors = 2, .base_vector = 12 },
3185 	},
3186 };
3187 
icnss_get_msi_assignment(struct icnss_priv * priv)3188 static int icnss_get_msi_assignment(struct icnss_priv *priv)
3189 {
3190 	if (priv->device_id == WCN6750_DEVICE_ID)
3191 		priv->msi_config = &msi_config_wcn6750;
3192 	else
3193 		priv->msi_config = &msi_config_wcn6450;
3194 
3195 	return 0;
3196 }
3197 
icnss_get_user_msi_assignment(struct device * dev,char * user_name,int * num_vectors,u32 * user_base_data,u32 * base_vector)3198 int icnss_get_user_msi_assignment(struct device *dev, char *user_name,
3199 				 int *num_vectors, u32 *user_base_data,
3200 				 u32 *base_vector)
3201 {
3202 	struct icnss_priv *priv = dev_get_drvdata(dev);
3203 	struct icnss_msi_config *msi_config;
3204 	int idx;
3205 
3206 	if (!priv)
3207 		return -ENODEV;
3208 
3209 	msi_config = priv->msi_config;
3210 	if (!msi_config) {
3211 		icnss_pr_err("MSI is not supported.\n");
3212 		return -EINVAL;
3213 	}
3214 
3215 	for (idx = 0; idx < msi_config->total_users; idx++) {
3216 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
3217 			*num_vectors = msi_config->users[idx].num_vectors;
3218 			*user_base_data = msi_config->users[idx].base_vector
3219 				+ priv->msi_base_data;
3220 			*base_vector = msi_config->users[idx].base_vector;
3221 
3222 			icnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
3223 				    user_name, *num_vectors, *user_base_data,
3224 				    *base_vector);
3225 
3226 			return 0;
3227 		}
3228 	}
3229 
3230 	icnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
3231 
3232 	return -EINVAL;
3233 }
3234 EXPORT_SYMBOL(icnss_get_user_msi_assignment);
3235 
icnss_get_msi_irq(struct device * dev,unsigned int vector)3236 int icnss_get_msi_irq(struct device *dev, unsigned int vector)
3237 {
3238 	struct icnss_priv *priv = dev_get_drvdata(dev);
3239 	int irq_num;
3240 
3241 	irq_num = priv->srng_irqs[vector];
3242 	icnss_pr_dbg("Get IRQ number %d for vector index %d\n",
3243 		     irq_num, vector);
3244 
3245 	return irq_num;
3246 }
3247 EXPORT_SYMBOL(icnss_get_msi_irq);
3248 
icnss_get_msi_address(struct device * dev,u32 * msi_addr_low,u32 * msi_addr_high)3249 void icnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
3250 			   u32 *msi_addr_high)
3251 {
3252 	struct icnss_priv *priv = dev_get_drvdata(dev);
3253 
3254 	*msi_addr_low = lower_32_bits(priv->msi_addr_iova);
3255 	*msi_addr_high = upper_32_bits(priv->msi_addr_iova);
3256 
3257 }
3258 EXPORT_SYMBOL(icnss_get_msi_address);
3259 
icnss_ce_request_irq(struct device * dev,unsigned int ce_id,irqreturn_t (* handler)(int,void *),unsigned long flags,const char * name,void * ctx)3260 int icnss_ce_request_irq(struct device *dev, unsigned int ce_id,
3261 	irqreturn_t (*handler)(int, void *),
3262 		unsigned long flags, const char *name, void *ctx)
3263 {
3264 	int ret = 0;
3265 	unsigned int irq;
3266 	struct ce_irq_list *irq_entry;
3267 	struct icnss_priv *priv = dev_get_drvdata(dev);
3268 
3269 	if (!priv || !priv->pdev) {
3270 		ret = -ENODEV;
3271 		goto out;
3272 	}
3273 
3274 	icnss_pr_vdbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, priv->state);
3275 
3276 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3277 		icnss_pr_err("Invalid CE ID, ce_id: %d\n", ce_id);
3278 		ret = -EINVAL;
3279 		goto out;
3280 	}
3281 	irq = priv->ce_irqs[ce_id];
3282 	irq_entry = &priv->ce_irq_list[ce_id];
3283 
3284 	if (irq_entry->handler || irq_entry->irq) {
3285 		icnss_pr_err("IRQ already requested: %d, ce_id: %d\n",
3286 			     irq, ce_id);
3287 		ret = -EEXIST;
3288 		goto out;
3289 	}
3290 
3291 	ret = request_irq(irq, handler, flags, name, ctx);
3292 	if (ret) {
3293 		icnss_pr_err("IRQ request failed: %d, ce_id: %d, ret: %d\n",
3294 			     irq, ce_id, ret);
3295 		goto out;
3296 	}
3297 	irq_entry->irq = irq;
3298 	irq_entry->handler = handler;
3299 
3300 	icnss_pr_vdbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
3301 
3302 	penv->stats.ce_irqs[ce_id].request++;
3303 out:
3304 	return ret;
3305 }
3306 EXPORT_SYMBOL(icnss_ce_request_irq);
3307 
icnss_ce_free_irq(struct device * dev,unsigned int ce_id,void * ctx)3308 int icnss_ce_free_irq(struct device *dev, unsigned int ce_id, void *ctx)
3309 {
3310 	int ret = 0;
3311 	unsigned int irq;
3312 	struct ce_irq_list *irq_entry;
3313 
3314 	if (!penv || !penv->pdev || !dev) {
3315 		ret = -ENODEV;
3316 		goto out;
3317 	}
3318 
3319 	icnss_pr_vdbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
3320 
3321 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3322 		icnss_pr_err("Invalid CE ID to free, ce_id: %d\n", ce_id);
3323 		ret = -EINVAL;
3324 		goto out;
3325 	}
3326 
3327 	irq = penv->ce_irqs[ce_id];
3328 	irq_entry = &penv->ce_irq_list[ce_id];
3329 	if (!irq_entry->handler || !irq_entry->irq) {
3330 		icnss_pr_err("IRQ not requested: %d, ce_id: %d\n", irq, ce_id);
3331 		ret = -EEXIST;
3332 		goto out;
3333 	}
3334 	free_irq(irq, ctx);
3335 	irq_entry->irq = 0;
3336 	irq_entry->handler = NULL;
3337 
3338 	penv->stats.ce_irqs[ce_id].free++;
3339 out:
3340 	return ret;
3341 }
3342 EXPORT_SYMBOL(icnss_ce_free_irq);
3343 
icnss_enable_irq(struct device * dev,unsigned int ce_id)3344 void icnss_enable_irq(struct device *dev, unsigned int ce_id)
3345 {
3346 	unsigned int irq;
3347 
3348 	if (!penv || !penv->pdev || !dev) {
3349 		icnss_pr_err("Platform driver not initialized\n");
3350 		return;
3351 	}
3352 
3353 	icnss_pr_vdbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
3354 		     penv->state);
3355 
3356 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3357 		icnss_pr_err("Invalid CE ID to enable IRQ, ce_id: %d\n", ce_id);
3358 		return;
3359 	}
3360 
3361 	penv->stats.ce_irqs[ce_id].enable++;
3362 
3363 	irq = penv->ce_irqs[ce_id];
3364 	enable_irq(irq);
3365 }
3366 EXPORT_SYMBOL(icnss_enable_irq);
3367 
icnss_disable_irq(struct device * dev,unsigned int ce_id)3368 void icnss_disable_irq(struct device *dev, unsigned int ce_id)
3369 {
3370 	unsigned int irq;
3371 
3372 	if (!penv || !penv->pdev || !dev) {
3373 		icnss_pr_err("Platform driver not initialized\n");
3374 		return;
3375 	}
3376 
3377 	icnss_pr_vdbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
3378 		     penv->state);
3379 
3380 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
3381 		icnss_pr_err("Invalid CE ID to disable IRQ, ce_id: %d\n",
3382 			     ce_id);
3383 		return;
3384 	}
3385 
3386 	irq = penv->ce_irqs[ce_id];
3387 	disable_irq(irq);
3388 
3389 	penv->stats.ce_irqs[ce_id].disable++;
3390 }
3391 EXPORT_SYMBOL(icnss_disable_irq);
3392 
icnss_get_soc_info(struct device * dev,struct icnss_soc_info * info)3393 int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
3394 {
3395 	char *fw_build_timestamp = NULL;
3396 	struct icnss_priv *priv = dev_get_drvdata(dev);
3397 
3398 	if (!priv) {
3399 		icnss_pr_err("Platform driver not initialized\n");
3400 		return -EINVAL;
3401 	}
3402 
3403 	info->v_addr = priv->mem_base_va;
3404 	info->p_addr = priv->mem_base_pa;
3405 	info->chip_id = priv->chip_info.chip_id;
3406 	info->chip_family = priv->chip_info.chip_family;
3407 	info->board_id = priv->board_id;
3408 	info->soc_id = priv->soc_id;
3409 	info->fw_version = priv->fw_version_info.fw_version;
3410 	fw_build_timestamp = priv->fw_version_info.fw_build_timestamp;
3411 	fw_build_timestamp[WLFW_MAX_TIMESTAMP_LEN] = '\0';
3412 	strlcpy(info->fw_build_timestamp,
3413 		priv->fw_version_info.fw_build_timestamp,
3414 		WLFW_MAX_TIMESTAMP_LEN + 1);
3415 	strlcpy(info->fw_build_id, priv->fw_build_id,
3416 	        ICNSS_WLFW_MAX_BUILD_ID_LEN + 1);
3417 	info->rd_card_chain_cap = priv->rd_card_chain_cap;
3418 	info->phy_he_channel_width_cap = priv->phy_he_channel_width_cap;
3419 	info->phy_qam_cap = priv->phy_qam_cap;
3420 	memcpy(&info->dev_mem_info, &priv->dev_mem_info,
3421 	       sizeof(info->dev_mem_info));
3422 
3423 	return 0;
3424 }
3425 EXPORT_SYMBOL(icnss_get_soc_info);
3426 
icnss_get_mhi_state(struct device * dev)3427 int icnss_get_mhi_state(struct device *dev)
3428 {
3429 	struct icnss_priv *priv = dev_get_drvdata(dev);
3430 
3431 	if (!priv) {
3432 		icnss_pr_err("Platform driver not initialized\n");
3433 		return -EINVAL;
3434 	}
3435 
3436 	if (!priv->mhi_state_info_va)
3437 		return -ENOMEM;
3438 
3439 	return ioread32(priv->mhi_state_info_va);
3440 }
3441 EXPORT_SYMBOL(icnss_get_mhi_state);
3442 
icnss_set_fw_log_mode(struct device * dev,uint8_t fw_log_mode)3443 int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode)
3444 {
3445 	int ret;
3446 	struct icnss_priv *priv;
3447 
3448 	if (!dev)
3449 		return -ENODEV;
3450 
3451 	priv = dev_get_drvdata(dev);
3452 
3453 	if (!priv) {
3454 		icnss_pr_err("Platform driver not initialized\n");
3455 		return -EINVAL;
3456 	}
3457 
3458 	if (test_bit(ICNSS_FW_DOWN, &penv->state) ||
3459 	    !test_bit(ICNSS_FW_READY, &penv->state)) {
3460 		icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n",
3461 			     priv->state);
3462 		return -EINVAL;
3463 	}
3464 
3465 	icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
3466 
3467 	ret = wlfw_ini_send_sync_msg(priv, fw_log_mode);
3468 	if (ret)
3469 		icnss_pr_err("Fail to send ini, ret = %d, fw_log_mode: %u\n",
3470 			     ret, fw_log_mode);
3471 	return ret;
3472 }
3473 EXPORT_SYMBOL(icnss_set_fw_log_mode);
3474 
icnss_force_wake_request(struct device * dev)3475 int icnss_force_wake_request(struct device *dev)
3476 {
3477 	struct icnss_priv *priv;
3478 
3479 	if (!dev)
3480 		return -ENODEV;
3481 
3482 	priv = dev_get_drvdata(dev);
3483 
3484 	if (!priv) {
3485 		icnss_pr_err("Platform driver not initialized\n");
3486 		return -EINVAL;
3487 	}
3488 
3489 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
3490 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
3491 		icnss_pr_soc_wake("FW down, ignoring SOC Wake request state: 0x%lx\n",
3492 				  priv->state);
3493 		return -EINVAL;
3494 	}
3495 
3496 	if (atomic_inc_not_zero(&priv->soc_wake_ref_count)) {
3497 		icnss_pr_soc_wake("SOC already awake, Ref count: %d",
3498 				  atomic_read(&priv->soc_wake_ref_count));
3499 		return 0;
3500 	}
3501 
3502 	icnss_pr_soc_wake("Calling SOC Wake request");
3503 
3504 	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_REQUEST_EVENT,
3505 				  0, NULL);
3506 
3507 	return 0;
3508 }
3509 EXPORT_SYMBOL(icnss_force_wake_request);
3510 
icnss_force_wake_release(struct device * dev)3511 int icnss_force_wake_release(struct device *dev)
3512 {
3513 	struct icnss_priv *priv;
3514 
3515 	if (!dev)
3516 		return -ENODEV;
3517 
3518 	priv = dev_get_drvdata(dev);
3519 
3520 	if (!priv) {
3521 		icnss_pr_err("Platform driver not initialized\n");
3522 		return -EINVAL;
3523 	}
3524 
3525 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
3526 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
3527 		icnss_pr_soc_wake("FW down, ignoring SOC Wake release state: 0x%lx\n",
3528 				  priv->state);
3529 		return -EINVAL;
3530 	}
3531 
3532 	icnss_pr_soc_wake("Calling SOC Wake response");
3533 
3534 	if (atomic_read(&priv->soc_wake_ref_count) &&
3535 	    icnss_atomic_dec_if_greater_one(&priv->soc_wake_ref_count)) {
3536 		icnss_pr_soc_wake("SOC previous release pending, Ref count: %d",
3537 				  atomic_read(&priv->soc_wake_ref_count));
3538 		return 0;
3539 	}
3540 
3541 	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_RELEASE_EVENT,
3542 				  0, NULL);
3543 
3544 	return 0;
3545 }
3546 EXPORT_SYMBOL(icnss_force_wake_release);
3547 
icnss_is_device_awake(struct device * dev)3548 int icnss_is_device_awake(struct device *dev)
3549 {
3550 	struct icnss_priv *priv = dev_get_drvdata(dev);
3551 
3552 	if (!priv) {
3553 		icnss_pr_err("Platform driver not initialized\n");
3554 		return -EINVAL;
3555 	}
3556 
3557 	return atomic_read(&priv->soc_wake_ref_count);
3558 }
3559 EXPORT_SYMBOL(icnss_is_device_awake);
3560 
icnss_is_pci_ep_awake(struct device * dev)3561 int icnss_is_pci_ep_awake(struct device *dev)
3562 {
3563 	struct icnss_priv *priv = dev_get_drvdata(dev);
3564 
3565 	if (!priv) {
3566 		icnss_pr_err("Platform driver not initialized\n");
3567 		return -EINVAL;
3568 	}
3569 
3570 	if (!priv->mhi_state_info_va)
3571 		return -ENOMEM;
3572 
3573 	return ioread32(priv->mhi_state_info_va + ICNSS_PCI_EP_WAKE_OFFSET);
3574 }
3575 EXPORT_SYMBOL(icnss_is_pci_ep_awake);
3576 
icnss_athdiag_read(struct device * dev,uint32_t offset,uint32_t mem_type,uint32_t data_len,uint8_t * output)3577 int icnss_athdiag_read(struct device *dev, uint32_t offset,
3578 		       uint32_t mem_type, uint32_t data_len,
3579 		       uint8_t *output)
3580 {
3581 	int ret = 0;
3582 	struct icnss_priv *priv = dev_get_drvdata(dev);
3583 
3584 	if (priv->magic != ICNSS_MAGIC) {
3585 		icnss_pr_err("Invalid drvdata for diag read: dev %pK, data %pK, magic 0x%x\n",
3586 			     dev, priv, priv->magic);
3587 		return -EINVAL;
3588 	}
3589 
3590 	if (!output || data_len == 0
3591 	    || data_len > WLFW_MAX_DATA_SIZE) {
3592 		icnss_pr_err("Invalid parameters for diag read: output %pK, data_len %u\n",
3593 			     output, data_len);
3594 		ret = -EINVAL;
3595 		goto out;
3596 	}
3597 
3598 	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
3599 	    !test_bit(ICNSS_POWER_ON, &priv->state)) {
3600 		icnss_pr_err("Invalid state for diag read: 0x%lx\n",
3601 			     priv->state);
3602 		ret = -EINVAL;
3603 		goto out;
3604 	}
3605 
3606 	ret = wlfw_athdiag_read_send_sync_msg(priv, offset, mem_type,
3607 					      data_len, output);
3608 out:
3609 	return ret;
3610 }
3611 EXPORT_SYMBOL(icnss_athdiag_read);
3612 
icnss_athdiag_write(struct device * dev,uint32_t offset,uint32_t mem_type,uint32_t data_len,uint8_t * input)3613 int icnss_athdiag_write(struct device *dev, uint32_t offset,
3614 			uint32_t mem_type, uint32_t data_len,
3615 			uint8_t *input)
3616 {
3617 	int ret = 0;
3618 	struct icnss_priv *priv = dev_get_drvdata(dev);
3619 
3620 	if (priv->magic != ICNSS_MAGIC) {
3621 		icnss_pr_err("Invalid drvdata for diag write: dev %pK, data %pK, magic 0x%x\n",
3622 			     dev, priv, priv->magic);
3623 		return -EINVAL;
3624 	}
3625 
3626 	if (!input || data_len == 0
3627 	    || data_len > WLFW_MAX_DATA_SIZE) {
3628 		icnss_pr_err("Invalid parameters for diag write: input %pK, data_len %u\n",
3629 			     input, data_len);
3630 		ret = -EINVAL;
3631 		goto out;
3632 	}
3633 
3634 	if (!test_bit(ICNSS_FW_READY, &priv->state) ||
3635 	    !test_bit(ICNSS_POWER_ON, &priv->state)) {
3636 		icnss_pr_err("Invalid state for diag write: 0x%lx\n",
3637 			     priv->state);
3638 		ret = -EINVAL;
3639 		goto out;
3640 	}
3641 
3642 	ret = wlfw_athdiag_write_send_sync_msg(priv, offset, mem_type,
3643 					       data_len, input);
3644 out:
3645 	return ret;
3646 }
3647 EXPORT_SYMBOL(icnss_athdiag_write);
3648 
icnss_wlan_enable(struct device * dev,struct icnss_wlan_enable_cfg * config,enum icnss_driver_mode mode,const char * host_version)3649 int icnss_wlan_enable(struct device *dev, struct icnss_wlan_enable_cfg *config,
3650 		      enum icnss_driver_mode mode,
3651 		      const char *host_version)
3652 {
3653 	struct icnss_priv *priv = dev_get_drvdata(dev);
3654 	int temp = 0, ret = 0;
3655 
3656 	if (test_bit(ICNSS_FW_DOWN, &priv->state) ||
3657 	    !test_bit(ICNSS_FW_READY, &priv->state)) {
3658 		icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n",
3659 			     priv->state);
3660 		return -EINVAL;
3661 	}
3662 
3663 	if (test_bit(ICNSS_MODE_ON, &priv->state)) {
3664 		icnss_pr_err("Already Mode on, ignoring wlan_enable state: 0x%lx\n",
3665 			     priv->state);
3666 		return -EINVAL;
3667 	}
3668 
3669 	if (priv->wpss_supported &&
3670 	    !priv->dms.nv_mac_not_prov && !priv->dms.mac_valid)
3671 		icnss_setup_dms_mac(priv);
3672 
3673 	if (priv->device_id == WCN6750_DEVICE_ID) {
3674 		if (!icnss_get_temperature(priv, &temp)) {
3675 			icnss_pr_dbg("Temperature: %d\n", temp);
3676 			if (temp < WLAN_EN_TEMP_THRESHOLD)
3677 				icnss_set_wlan_en_delay(priv);
3678 		}
3679 	}
3680 
3681 	if (priv->device_id == WCN6450_DEVICE_ID)
3682 		icnss_hw_power_off(priv);
3683 
3684 	ret = icnss_send_wlan_enable_to_fw(priv, config, mode, host_version);
3685 
3686 	if (priv->device_id == WCN6450_DEVICE_ID)
3687 		icnss_hw_power_on(priv);
3688 
3689 	return ret;
3690 }
3691 EXPORT_SYMBOL(icnss_wlan_enable);
3692 
icnss_wlan_disable(struct device * dev,enum icnss_driver_mode mode)3693 int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode)
3694 {
3695 	struct icnss_priv *priv = dev_get_drvdata(dev);
3696 
3697 	if (test_bit(ICNSS_FW_DOWN, &priv->state)) {
3698 		icnss_pr_dbg("FW down, ignoring wlan_disable state: 0x%lx\n",
3699 			     priv->state);
3700 		return 0;
3701 	}
3702 
3703 	return icnss_send_wlan_disable_to_fw(priv);
3704 }
3705 EXPORT_SYMBOL(icnss_wlan_disable);
3706 
icnss_is_qmi_disable(struct device * dev)3707 bool icnss_is_qmi_disable(struct device *dev)
3708 {
3709 	return test_bit(SKIP_QMI, &penv->ctrl_params.quirks) ? true : false;
3710 }
3711 EXPORT_SYMBOL(icnss_is_qmi_disable);
3712 
icnss_get_ce_id(struct device * dev,int irq)3713 int icnss_get_ce_id(struct device *dev, int irq)
3714 {
3715 	int i;
3716 
3717 	if (!penv || !penv->pdev || !dev)
3718 		return -ENODEV;
3719 
3720 	for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
3721 		if (penv->ce_irqs[i] == irq)
3722 			return i;
3723 	}
3724 
3725 	icnss_pr_err("No matching CE id for irq %d\n", irq);
3726 
3727 	return -EINVAL;
3728 }
3729 EXPORT_SYMBOL(icnss_get_ce_id);
3730 
icnss_get_irq(struct device * dev,int ce_id)3731 int icnss_get_irq(struct device *dev, int ce_id)
3732 {
3733 	int irq;
3734 
3735 	if (!penv || !penv->pdev || !dev)
3736 		return -ENODEV;
3737 
3738 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS)
3739 		return -EINVAL;
3740 
3741 	irq = penv->ce_irqs[ce_id];
3742 
3743 	return irq;
3744 }
3745 EXPORT_SYMBOL(icnss_get_irq);
3746 
icnss_smmu_get_domain(struct device * dev)3747 struct iommu_domain *icnss_smmu_get_domain(struct device *dev)
3748 {
3749 	struct icnss_priv *priv = dev_get_drvdata(dev);
3750 
3751 	if (!priv) {
3752 		icnss_pr_err("Invalid drvdata: dev %pK\n", dev);
3753 		return NULL;
3754 	}
3755 	return priv->iommu_domain;
3756 }
3757 EXPORT_SYMBOL(icnss_smmu_get_domain);
3758 
3759 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
icnss_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)3760 int icnss_iommu_map(struct iommu_domain *domain,
3761 				   unsigned long iova, phys_addr_t paddr, size_t size, int prot)
3762 {
3763 		return iommu_map(domain, iova, paddr, size, prot);
3764 }
3765 #else
icnss_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)3766 int icnss_iommu_map(struct iommu_domain *domain,
3767 				   unsigned long iova, phys_addr_t paddr, size_t size, int prot)
3768 {
3769 		return iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
3770 }
3771 #endif
3772 
icnss_smmu_map(struct device * dev,phys_addr_t paddr,uint32_t * iova_addr,size_t size)3773 int icnss_smmu_map(struct device *dev,
3774 		   phys_addr_t paddr, uint32_t *iova_addr, size_t size)
3775 {
3776 	struct icnss_priv *priv = dev_get_drvdata(dev);
3777 	int flag = IOMMU_READ | IOMMU_WRITE;
3778 	bool dma_coherent = false;
3779 	unsigned long iova;
3780 	int prop_len = 0;
3781 	size_t len;
3782 	int ret = 0;
3783 
3784 	if (!priv) {
3785 		icnss_pr_err("Invalid drvdata: dev %pK, data %pK\n",
3786 			     dev, priv);
3787 		return -EINVAL;
3788 	}
3789 
3790 	if (!iova_addr) {
3791 		icnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
3792 			     &paddr, size);
3793 		return -EINVAL;
3794 	}
3795 
3796 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
3797 	iova = roundup(priv->smmu_iova_ipa_current, PAGE_SIZE);
3798 
3799 	if (of_get_property(dev->of_node, "qcom,iommu-geometry", &prop_len) &&
3800 	    iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
3801 		icnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
3802 			     iova,
3803 			     &priv->smmu_iova_ipa_start,
3804 			     priv->smmu_iova_ipa_len);
3805 		return -ENOMEM;
3806 	}
3807 
3808 	dma_coherent = of_property_read_bool(dev->of_node, "dma-coherent");
3809 	icnss_pr_dbg("dma-coherent is %s\n",
3810 		     dma_coherent ? "enabled" : "disabled");
3811 	if (dma_coherent)
3812 		flag |= IOMMU_CACHE;
3813 
3814 	icnss_pr_dbg("IOMMU Map: iova %lx, len %zu\n", iova, len);
3815 
3816 	ret = icnss_iommu_map(priv->iommu_domain, iova,
3817 			rounddown(paddr, PAGE_SIZE), len,
3818 			flag);
3819 	if (ret) {
3820 		icnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
3821 		return ret;
3822 	}
3823 
3824 	priv->smmu_iova_ipa_current = iova + len;
3825 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
3826 
3827 	icnss_pr_dbg("IOVA addr mapped to physical addr %lx\n", *iova_addr);
3828 	return 0;
3829 }
3830 EXPORT_SYMBOL(icnss_smmu_map);
3831 
icnss_smmu_unmap(struct device * dev,uint32_t iova_addr,size_t size)3832 int icnss_smmu_unmap(struct device *dev,
3833 		     uint32_t iova_addr, size_t size)
3834 {
3835 	struct icnss_priv *priv = dev_get_drvdata(dev);
3836 	unsigned long iova;
3837 	size_t len, unmapped_len;
3838 
3839 	if (!priv) {
3840 		icnss_pr_err("Invalid drvdata: dev %pK, data %pK\n",
3841 			     dev, priv);
3842 		return -EINVAL;
3843 	}
3844 
3845 	if (!iova_addr) {
3846 		icnss_pr_err("iova_addr is NULL, size %zu\n",
3847 			     size);
3848 		return -EINVAL;
3849 	}
3850 
3851 	len = roundup(size + iova_addr - rounddown(iova_addr, PAGE_SIZE),
3852 		      PAGE_SIZE);
3853 	iova = rounddown(iova_addr, PAGE_SIZE);
3854 
3855 	if (iova >= priv->smmu_iova_ipa_start + priv->smmu_iova_ipa_len) {
3856 		icnss_pr_err("Out of IOVA space during unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
3857 			     iova,
3858 			     &priv->smmu_iova_ipa_start,
3859 			     priv->smmu_iova_ipa_len);
3860 		return -ENOMEM;
3861 	}
3862 
3863 	icnss_pr_dbg("IOMMU Unmap: iova %lx, len %zu\n",
3864 		     iova, len);
3865 
3866 	unmapped_len = iommu_unmap(priv->iommu_domain, iova, len);
3867 	if (unmapped_len != len) {
3868 		icnss_pr_err("Failed to unmap, %zu\n", unmapped_len);
3869 		return -EINVAL;
3870 	}
3871 
3872 	priv->smmu_iova_ipa_current = iova;
3873 	return 0;
3874 }
3875 EXPORT_SYMBOL(icnss_smmu_unmap);
3876 
icnss_socinfo_get_serial_number(struct device * dev)3877 unsigned int icnss_socinfo_get_serial_number(struct device *dev)
3878 {
3879 	return socinfo_get_serial_number();
3880 }
3881 EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
3882 
icnss_trigger_recovery(struct device * dev)3883 int icnss_trigger_recovery(struct device *dev)
3884 {
3885 	int ret = 0;
3886 	struct icnss_priv *priv = dev_get_drvdata(dev);
3887 
3888 	if (priv->magic != ICNSS_MAGIC) {
3889 		icnss_pr_err("Invalid drvdata: magic 0x%x\n", priv->magic);
3890 		ret = -EINVAL;
3891 		goto out;
3892 	}
3893 
3894 	if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
3895 		icnss_pr_err("PD recovery already in progress: state: 0x%lx\n",
3896 			     priv->state);
3897 		ret = -EPERM;
3898 		goto out;
3899 	}
3900 
3901 	if (priv->wpss_supported) {
3902 		icnss_pr_vdbg("Initiate Root PD restart");
3903 		ret = icnss_send_smp2p(priv, ICNSS_TRIGGER_SSR,
3904 				       ICNSS_SMP2P_OUT_POWER_SAVE);
3905 		if (!ret)
3906 			set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
3907 		return ret;
3908 	}
3909 
3910 	if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
3911 		icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
3912 			     priv->state);
3913 		ret = -EOPNOTSUPP;
3914 		goto out;
3915 	}
3916 
3917 	icnss_pr_warn("Initiate PD restart at WLAN FW, state: 0x%lx\n",
3918 		      priv->state);
3919 
3920 	ret = pdr_restart_pd(priv->pdr_handle, priv->pdr_service);
3921 
3922 	if (!ret)
3923 		set_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
3924 
3925 out:
3926 	return ret;
3927 }
3928 EXPORT_SYMBOL(icnss_trigger_recovery);
3929 
icnss_idle_shutdown(struct device * dev)3930 int icnss_idle_shutdown(struct device *dev)
3931 {
3932 	struct icnss_priv *priv = dev_get_drvdata(dev);
3933 
3934 	if (!priv) {
3935 		icnss_pr_err("Invalid drvdata: dev %pK", dev);
3936 		return -EINVAL;
3937 	}
3938 
3939 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
3940 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
3941 		icnss_pr_err("SSR/PDR is already in-progress during idle shutdown\n");
3942 		return -EBUSY;
3943 	}
3944 
3945 	return icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
3946 					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
3947 }
3948 EXPORT_SYMBOL(icnss_idle_shutdown);
3949 
icnss_idle_restart(struct device * dev)3950 int icnss_idle_restart(struct device *dev)
3951 {
3952 	struct icnss_priv *priv = dev_get_drvdata(dev);
3953 
3954 	if (!priv) {
3955 		icnss_pr_err("Invalid drvdata: dev %pK", dev);
3956 		return -EINVAL;
3957 	}
3958 
3959 	if (priv->is_ssr || test_bit(ICNSS_PDR, &priv->state) ||
3960 	    test_bit(ICNSS_REJUVENATE, &priv->state)) {
3961 		icnss_pr_err("SSR/PDR is already in-progress during idle restart\n");
3962 		return -EBUSY;
3963 	}
3964 
3965 	return icnss_driver_event_post(priv, ICNSS_DRIVER_EVENT_IDLE_RESTART,
3966 					ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
3967 }
3968 EXPORT_SYMBOL(icnss_idle_restart);
3969 
icnss_exit_power_save(struct device * dev)3970 int icnss_exit_power_save(struct device *dev)
3971 {
3972 	struct icnss_priv *priv = dev_get_drvdata(dev);
3973 
3974 	icnss_pr_vdbg("Calling Exit Power Save\n");
3975 
3976 	if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
3977 	    !test_bit(ICNSS_MODE_ON, &priv->state))
3978 		return 0;
3979 
3980 	return icnss_send_smp2p(priv, ICNSS_POWER_SAVE_EXIT,
3981 				ICNSS_SMP2P_OUT_POWER_SAVE);
3982 }
3983 EXPORT_SYMBOL(icnss_exit_power_save);
3984 
icnss_prevent_l1(struct device * dev)3985 int icnss_prevent_l1(struct device *dev)
3986 {
3987 	struct icnss_priv *priv = dev_get_drvdata(dev);
3988 
3989 	if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
3990 	    !test_bit(ICNSS_MODE_ON, &priv->state))
3991 		return 0;
3992 
3993 	return icnss_send_smp2p(priv, ICNSS_PCI_EP_POWER_SAVE_EXIT,
3994 				ICNSS_SMP2P_OUT_EP_POWER_SAVE);
3995 }
3996 EXPORT_SYMBOL(icnss_prevent_l1);
3997 
icnss_allow_l1(struct device * dev)3998 void icnss_allow_l1(struct device *dev)
3999 {
4000 	struct icnss_priv *priv = dev_get_drvdata(dev);
4001 
4002 	if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
4003 	    !test_bit(ICNSS_MODE_ON, &priv->state))
4004 		return;
4005 
4006 	icnss_send_smp2p(priv, ICNSS_PCI_EP_POWER_SAVE_ENTER,
4007 			 ICNSS_SMP2P_OUT_EP_POWER_SAVE);
4008 }
4009 EXPORT_SYMBOL(icnss_allow_l1);
4010 
icnss_allow_recursive_recovery(struct device * dev)4011 void icnss_allow_recursive_recovery(struct device *dev)
4012 {
4013 	struct icnss_priv *priv = dev_get_drvdata(dev);
4014 
4015 	priv->allow_recursive_recovery = true;
4016 
4017 	icnss_pr_info("Recursive recovery allowed for WLAN\n");
4018 }
4019 
icnss_disallow_recursive_recovery(struct device * dev)4020 void icnss_disallow_recursive_recovery(struct device *dev)
4021 {
4022 	struct icnss_priv *priv = dev_get_drvdata(dev);
4023 
4024 	priv->allow_recursive_recovery = false;
4025 
4026 	icnss_pr_info("Recursive recovery disallowed for WLAN\n");
4027 }
4028 
icnss_create_shutdown_sysfs(struct icnss_priv * priv)4029 static int icnss_create_shutdown_sysfs(struct icnss_priv *priv)
4030 {
4031 	struct kobject *icnss_kobject;
4032 	int ret = 0;
4033 
4034 	atomic_set(&priv->is_shutdown, false);
4035 
4036 	icnss_kobject = kobject_create_and_add("shutdown_wlan", kernel_kobj);
4037 	if (!icnss_kobject) {
4038 		icnss_pr_err("Unable to create shutdown_wlan kernel object");
4039 		return -EINVAL;
4040 	}
4041 
4042 	priv->icnss_kobject = icnss_kobject;
4043 
4044 	ret = sysfs_create_file(icnss_kobject, &icnss_sysfs_attribute.attr);
4045 	if (ret) {
4046 		icnss_pr_err("Unable to create icnss sysfs file err:%d", ret);
4047 		return ret;
4048 	}
4049 
4050 	return ret;
4051 }
4052 
icnss_destroy_shutdown_sysfs(struct icnss_priv * priv)4053 static void icnss_destroy_shutdown_sysfs(struct icnss_priv *priv)
4054 {
4055 	struct kobject *icnss_kobject;
4056 
4057 	icnss_kobject = priv->icnss_kobject;
4058 	if (icnss_kobject)
4059 		kobject_put(icnss_kobject);
4060 }
4061 
qdss_tr_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4062 static ssize_t qdss_tr_start_store(struct device *dev,
4063 				   struct device_attribute *attr,
4064 				   const char *buf, size_t count)
4065 {
4066 	struct icnss_priv *priv = dev_get_drvdata(dev);
4067 
4068 	wlfw_qdss_trace_start(priv);
4069 	icnss_pr_dbg("Received QDSS start command\n");
4070 	return count;
4071 }
4072 
qdss_tr_stop_store(struct device * dev,struct device_attribute * attr,const char * user_buf,size_t count)4073 static ssize_t qdss_tr_stop_store(struct device *dev,
4074 				  struct device_attribute *attr,
4075 				  const char *user_buf, size_t count)
4076 {
4077 	struct icnss_priv *priv = dev_get_drvdata(dev);
4078 	u32 option = 0;
4079 
4080 	if (sscanf(user_buf, "%du", &option) != 1)
4081 		return -EINVAL;
4082 
4083 	wlfw_qdss_trace_stop(priv, option);
4084 	icnss_pr_dbg("Received QDSS stop command\n");
4085 	return count;
4086 }
4087 
qdss_conf_download_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4088 static ssize_t qdss_conf_download_store(struct device *dev,
4089 					struct device_attribute *attr,
4090 					const char *buf, size_t count)
4091 {
4092 	struct icnss_priv *priv = dev_get_drvdata(dev);
4093 
4094 	icnss_wlfw_qdss_dnld_send_sync(priv);
4095 	icnss_pr_dbg("Received QDSS download config command\n");
4096 	return count;
4097 }
4098 
hw_trc_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4099 static ssize_t hw_trc_override_store(struct device *dev,
4100 				     struct device_attribute *attr,
4101 				     const char *buf, size_t count)
4102 {
4103 	struct icnss_priv *priv = dev_get_drvdata(dev);
4104 	int tmp = 0;
4105 
4106 	if (sscanf(buf, "%du", &tmp) != 1)
4107 		return -EINVAL;
4108 
4109 	priv->hw_trc_override = tmp;
4110 	icnss_pr_dbg("Received QDSS hw_trc_override indication\n");
4111 	return count;
4112 }
4113 
icnss_wpss_load(struct work_struct * wpss_load_work)4114 static void icnss_wpss_load(struct work_struct *wpss_load_work)
4115 {
4116 	struct icnss_priv *priv = icnss_get_plat_priv();
4117 	phandle rproc_phandle;
4118 	int ret;
4119 
4120 	if (of_property_read_u32(priv->pdev->dev.of_node, "qcom,rproc-handle",
4121 				 &rproc_phandle)) {
4122 		icnss_pr_err("error reading rproc phandle\n");
4123 		return;
4124 	}
4125 
4126 	priv->rproc = rproc_get_by_phandle(rproc_phandle);
4127 	if (IS_ERR_OR_NULL(priv->rproc)) {
4128 		icnss_pr_err("rproc not found");
4129 		return;
4130 	}
4131 
4132 	ret = rproc_boot(priv->rproc);
4133 	if (ret) {
4134 		icnss_pr_err("Failed to boot wpss rproc, ret: %d", ret);
4135 		rproc_put(priv->rproc);
4136 	}
4137 }
4138 
wpss_boot_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4139 static ssize_t wpss_boot_store(struct device *dev,
4140 			       struct device_attribute *attr,
4141 			       const char *buf, size_t count)
4142 {
4143 	struct icnss_priv *priv = dev_get_drvdata(dev);
4144 	int wpss_rproc = 0;
4145 
4146 	if (!priv->wpss_supported && !priv->rproc_fw_download)
4147 		return count;
4148 
4149 	if (sscanf(buf, "%du", &wpss_rproc) != 1) {
4150 		icnss_pr_err("Failed to read wpss rproc info");
4151 		return -EINVAL;
4152 	}
4153 
4154 	icnss_pr_dbg("WPSS Remote Processor: %s", wpss_rproc ? "GET" : "PUT");
4155 
4156 	if (wpss_rproc == 1)
4157 		schedule_work(&wpss_loader);
4158 	else if (wpss_rproc == 0)
4159 		icnss_wpss_unload(priv);
4160 
4161 	return count;
4162 }
4163 
wlan_en_delay_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4164 static ssize_t wlan_en_delay_store(struct device *dev,
4165 			       struct device_attribute *attr,
4166 			       const char *buf, size_t count)
4167 {
4168 	struct icnss_priv *priv = dev_get_drvdata(dev);
4169 	uint32_t wlan_en_delay  = 0;
4170 
4171 	if (priv->device_id == ADRASTEA_DEVICE_ID)
4172 		return count;
4173 
4174 	if (sscanf(buf, "%du", &wlan_en_delay) != 1) {
4175 		icnss_pr_err("Failed to read wlan_en_delay");
4176 		return -EINVAL;
4177 	}
4178 
4179 	icnss_pr_dbg("WLAN_EN delay: %dms", wlan_en_delay);
4180 	priv->wlan_en_delay_ms_user = wlan_en_delay;
4181 
4182 	return count;
4183 }
4184 
4185 static DEVICE_ATTR_WO(qdss_tr_start);
4186 static DEVICE_ATTR_WO(qdss_tr_stop);
4187 static DEVICE_ATTR_WO(qdss_conf_download);
4188 static DEVICE_ATTR_WO(hw_trc_override);
4189 static DEVICE_ATTR_WO(wpss_boot);
4190 static DEVICE_ATTR_WO(wlan_en_delay);
4191 
4192 static struct attribute *icnss_attrs[] = {
4193 	&dev_attr_qdss_tr_start.attr,
4194 	&dev_attr_qdss_tr_stop.attr,
4195 	&dev_attr_qdss_conf_download.attr,
4196 	&dev_attr_hw_trc_override.attr,
4197 	&dev_attr_wpss_boot.attr,
4198 	&dev_attr_wlan_en_delay.attr,
4199 	NULL,
4200 };
4201 
4202 static struct attribute_group icnss_attr_group = {
4203 	.attrs = icnss_attrs,
4204 };
4205 
icnss_create_sysfs_link(struct icnss_priv * priv)4206 static int icnss_create_sysfs_link(struct icnss_priv *priv)
4207 {
4208 	struct device *dev = &priv->pdev->dev;
4209 	int ret;
4210 
4211 	ret = sysfs_create_link(kernel_kobj, &dev->kobj, "icnss");
4212 	if (ret) {
4213 		icnss_pr_err("Failed to create icnss link, err = %d\n",
4214 			     ret);
4215 		goto out;
4216 	}
4217 
4218 	return 0;
4219 out:
4220 	return ret;
4221 }
4222 
icnss_remove_sysfs_link(struct icnss_priv * priv)4223 static void icnss_remove_sysfs_link(struct icnss_priv *priv)
4224 {
4225 	sysfs_remove_link(kernel_kobj, "icnss");
4226 }
4227 
4228 
4229 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0))
4230 union icnss_device_group_devres {
4231 	const struct attribute_group *group;
4232 };
4233 
devm_icnss_group_remove(struct device * dev,void * res)4234 static void devm_icnss_group_remove(struct device *dev, void *res)
4235 {
4236 	union icnss_device_group_devres *devres = res;
4237 	const struct attribute_group *group = devres->group;
4238 
4239 	icnss_pr_dbg("%s: removing group %p\n", __func__, group);
4240 	sysfs_remove_group(&dev->kobj, group);
4241 }
4242 
devm_icnss_group_match(struct device * dev,void * res,void * data)4243 static int devm_icnss_group_match(struct device *dev, void *res, void *data)
4244 {
4245 	return ((union icnss_device_group_devres *)res) == data;
4246 }
4247 
icnss_devm_device_remove_group(struct icnss_priv * priv)4248 static void icnss_devm_device_remove_group(struct icnss_priv *priv)
4249 {
4250 	WARN_ON(devres_release(&priv->pdev->dev,
4251 			       devm_icnss_group_remove, devm_icnss_group_match,
4252 			       (void *)&icnss_attr_group));
4253 }
4254 #else
icnss_devm_device_remove_group(struct icnss_priv * priv)4255 static void icnss_devm_device_remove_group(struct icnss_priv *priv)
4256 {
4257 	devm_device_remove_group(&priv->pdev->dev, &icnss_attr_group);
4258 }
4259 #endif
4260 
icnss_sysfs_create(struct icnss_priv * priv)4261 static int icnss_sysfs_create(struct icnss_priv *priv)
4262 {
4263 	int ret = 0;
4264 
4265 	ret = devm_device_add_group(&priv->pdev->dev,
4266 				    &icnss_attr_group);
4267 	if (ret) {
4268 		icnss_pr_err("Failed to create icnss device group, err = %d\n",
4269 			     ret);
4270 		goto out;
4271 	}
4272 
4273 	icnss_create_sysfs_link(priv);
4274 
4275 	ret = icnss_create_shutdown_sysfs(priv);
4276 	if (ret)
4277 		goto remove_icnss_group;
4278 
4279 	return 0;
4280 remove_icnss_group:
4281 	icnss_devm_device_remove_group(priv);
4282 out:
4283 	return ret;
4284 }
4285 
icnss_sysfs_destroy(struct icnss_priv * priv)4286 static void icnss_sysfs_destroy(struct icnss_priv *priv)
4287 {
4288 	icnss_destroy_shutdown_sysfs(priv);
4289 	icnss_remove_sysfs_link(priv);
4290 	icnss_devm_device_remove_group(priv);
4291 }
4292 
icnss_resource_parse(struct icnss_priv * priv)4293 static int icnss_resource_parse(struct icnss_priv *priv)
4294 {
4295 	int ret = 0, i = 0, irq = 0;
4296 	struct platform_device *pdev = priv->pdev;
4297 	struct device *dev = &pdev->dev;
4298 	struct resource *res;
4299 	u32 int_prop;
4300 
4301 	ret = icnss_get_vreg(priv);
4302 	if (ret) {
4303 		icnss_pr_err("Failed to get vreg, err = %d\n", ret);
4304 		goto out;
4305 	}
4306 
4307 	ret = icnss_get_clk(priv);
4308 	if (ret) {
4309 		icnss_pr_err("Failed to get clocks, err = %d\n", ret);
4310 		goto put_vreg;
4311 	}
4312 
4313 	if (of_property_read_bool(pdev->dev.of_node, "qcom,psf-supported")) {
4314 		ret = icnss_get_psf_info(priv);
4315 		if (ret < 0)
4316 			goto out;
4317 		priv->psf_supported = true;
4318 	}
4319 
4320 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
4321 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4322 						   "membase");
4323 		if (!res) {
4324 			icnss_pr_err("Memory base not found in DT\n");
4325 			ret = -EINVAL;
4326 			goto put_clk;
4327 		}
4328 
4329 		priv->mem_base_pa = res->start;
4330 		priv->mem_base_va = devm_ioremap(dev, priv->mem_base_pa,
4331 						 resource_size(res));
4332 		if (!priv->mem_base_va) {
4333 			icnss_pr_err("Memory base ioremap failed: phy addr: %pa\n",
4334 				     &priv->mem_base_pa);
4335 			ret = -EINVAL;
4336 			goto put_clk;
4337 		}
4338 		icnss_pr_dbg("MEM_BASE pa: %pa, va: 0x%pK\n",
4339 			     &priv->mem_base_pa,
4340 			     priv->mem_base_va);
4341 
4342 		for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
4343 			irq = platform_get_irq(pdev, i);
4344 			if (irq < 0) {
4345 				icnss_pr_err("Fail to get IRQ-%d\n", i);
4346 				ret = -ENODEV;
4347 				goto put_clk;
4348 			} else {
4349 				priv->ce_irqs[i] = irq;
4350 			}
4351 		}
4352 
4353 		if (of_property_read_bool(pdev->dev.of_node,
4354 					  "qcom,is_low_power")) {
4355 			priv->low_power_support = true;
4356 			icnss_pr_dbg("Deep Sleep/Hibernate mode supported\n");
4357 		}
4358 
4359 		if (of_property_read_u32(pdev->dev.of_node, "qcom,rf_subtype",
4360 					 &priv->rf_subtype) == 0) {
4361 			priv->is_rf_subtype_valid = true;
4362 			icnss_pr_dbg("RF subtype 0x%x\n", priv->rf_subtype);
4363 		}
4364 
4365 		if (of_property_read_bool(pdev->dev.of_node,
4366 					  "qcom,is_slate_rfa")) {
4367 			priv->is_slate_rfa = true;
4368 			icnss_pr_err("SLATE rfa is enabled\n");
4369 		}
4370 	} else if (priv->device_id == WCN6750_DEVICE_ID ||
4371 		   priv->device_id == WCN6450_DEVICE_ID) {
4372 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
4373 						   "msi_addr");
4374 		if (!res) {
4375 			icnss_pr_err("MSI address not found in DT\n");
4376 			ret = -EINVAL;
4377 			goto put_clk;
4378 		}
4379 
4380 		priv->msi_addr_pa = res->start;
4381 		priv->msi_addr_iova = dma_map_resource(dev, priv->msi_addr_pa,
4382 						       PAGE_SIZE,
4383 						       DMA_FROM_DEVICE, 0);
4384 		if (dma_mapping_error(dev, priv->msi_addr_iova)) {
4385 			icnss_pr_err("MSI: failed to map msi address\n");
4386 			priv->msi_addr_iova = 0;
4387 			ret = -ENOMEM;
4388 			goto put_clk;
4389 		}
4390 		icnss_pr_dbg("MSI Addr pa: %pa, iova: 0x%pK\n",
4391 			     &priv->msi_addr_pa,
4392 			     priv->msi_addr_iova);
4393 
4394 		ret = of_property_read_u32_index(dev->of_node,
4395 						 "interrupts",
4396 						 1,
4397 						 &int_prop);
4398 		if (ret) {
4399 			icnss_pr_dbg("Read interrupt prop failed");
4400 			goto put_clk;
4401 		}
4402 
4403 		priv->msi_base_data = int_prop + 32;
4404 		icnss_pr_dbg(" MSI Base Data: %d, IRQ Index: %d\n",
4405 			     priv->msi_base_data, int_prop);
4406 
4407 		icnss_get_msi_assignment(priv);
4408 		for (i = 0; i < priv->msi_config->total_vectors; i++) {
4409 			irq = platform_get_irq(priv->pdev, i);
4410 			if (irq < 0) {
4411 				icnss_pr_err("Fail to get IRQ-%d\n", i);
4412 				ret = -ENODEV;
4413 				goto put_clk;
4414 			} else {
4415 				priv->srng_irqs[i] = irq;
4416 			}
4417 		}
4418 	}
4419 
4420 	return 0;
4421 
4422 put_clk:
4423 	icnss_put_clk(priv);
4424 put_vreg:
4425 	icnss_put_vreg(priv);
4426 out:
4427 	return ret;
4428 }
4429 
icnss_msa_dt_parse(struct icnss_priv * priv)4430 static int icnss_msa_dt_parse(struct icnss_priv *priv)
4431 {
4432 	int ret = 0;
4433 	struct platform_device *pdev = priv->pdev;
4434 	struct device *dev = &pdev->dev;
4435 	struct device_node *np = NULL;
4436 	u64 prop_size = 0;
4437 	const __be32 *addrp = NULL;
4438 
4439 	np = of_parse_phandle(dev->of_node,
4440 			      "qcom,wlan-msa-fixed-region", 0);
4441 	if (np) {
4442 		addrp = of_get_address(np, 0, &prop_size, NULL);
4443 		if (!addrp) {
4444 			icnss_pr_err("Failed to get assigned-addresses or property\n");
4445 			ret = -EINVAL;
4446 			of_node_put(np);
4447 			goto out;
4448 		}
4449 
4450 		priv->msa_pa = of_translate_address(np, addrp);
4451 		if (priv->msa_pa == OF_BAD_ADDR) {
4452 			icnss_pr_err("Failed to translate MSA PA from device-tree\n");
4453 			ret = -EINVAL;
4454 			of_node_put(np);
4455 			goto out;
4456 		}
4457 
4458 		of_node_put(np);
4459 
4460 		priv->msa_va = memremap(priv->msa_pa,
4461 					(unsigned long)prop_size, MEMREMAP_WT);
4462 		if (!priv->msa_va) {
4463 			icnss_pr_err("MSA PA ioremap failed: phy addr: %pa\n",
4464 				     &priv->msa_pa);
4465 			ret = -EINVAL;
4466 			goto out;
4467 		}
4468 		priv->msa_mem_size = prop_size;
4469 	} else {
4470 		ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory",
4471 					   &priv->msa_mem_size);
4472 		if (ret || priv->msa_mem_size == 0) {
4473 			icnss_pr_err("Fail to get MSA Memory Size: %u ret: %d\n",
4474 				     priv->msa_mem_size, ret);
4475 			goto out;
4476 		}
4477 
4478 		priv->msa_va = dmam_alloc_coherent(&pdev->dev,
4479 				priv->msa_mem_size, &priv->msa_pa, GFP_KERNEL);
4480 
4481 		if (!priv->msa_va) {
4482 			icnss_pr_err("DMA alloc failed for MSA\n");
4483 			ret = -ENOMEM;
4484 			goto out;
4485 		}
4486 	}
4487 
4488 	icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%pK MSA Memory Size: 0x%x\n",
4489 		     &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size);
4490 
4491 	priv->use_prefix_path = of_property_read_bool(priv->pdev->dev.of_node,
4492 						      "qcom,fw-prefix");
4493 	return 0;
4494 
4495 out:
4496 	return ret;
4497 }
4498 
icnss_smmu_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * handler_token)4499 static int icnss_smmu_fault_handler(struct iommu_domain *domain,
4500 				    struct device *dev, unsigned long iova,
4501 				    int flags, void *handler_token)
4502 {
4503 	struct icnss_priv *priv = handler_token;
4504 	struct icnss_uevent_fw_down_data fw_down_data = {0};
4505 
4506 	icnss_fatal_err("SMMU fault happened with IOVA 0x%lx\n", iova);
4507 
4508 	if (!priv) {
4509 		icnss_pr_err("priv is NULL\n");
4510 		return -ENODEV;
4511 	}
4512 
4513 	if (test_bit(ICNSS_FW_READY, &priv->state)) {
4514 		fw_down_data.crashed = true;
4515 		icnss_call_driver_uevent(priv, ICNSS_UEVENT_SMMU_FAULT,
4516 					 &fw_down_data);
4517 		icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
4518 					 &fw_down_data);
4519 	}
4520 
4521 	icnss_trigger_recovery(&priv->pdev->dev);
4522 
4523 	/* IOMMU driver requires -ENOSYS return value to print debug info. */
4524 	return -ENOSYS;
4525 }
4526 
icnss_smmu_dt_parse(struct icnss_priv * priv)4527 static int icnss_smmu_dt_parse(struct icnss_priv *priv)
4528 {
4529 	int ret = 0;
4530 	struct platform_device *pdev = priv->pdev;
4531 	struct device *dev = &pdev->dev;
4532 	const char *iommu_dma_type;
4533 	struct resource *res;
4534 	u32 addr_win[2];
4535 
4536 	ret = of_property_read_u32_array(dev->of_node,
4537 					 "qcom,iommu-dma-addr-pool",
4538 					 addr_win,
4539 					 ARRAY_SIZE(addr_win));
4540 
4541 	if (ret) {
4542 		icnss_pr_err("SMMU IOVA base not found\n");
4543 	} else {
4544 		priv->smmu_iova_start = addr_win[0];
4545 		priv->smmu_iova_len = addr_win[1];
4546 		icnss_pr_dbg("SMMU IOVA start: %pa, len: %zx\n",
4547 			     &priv->smmu_iova_start,
4548 			     priv->smmu_iova_len);
4549 
4550 		priv->iommu_domain =
4551 			iommu_get_domain_for_dev(&pdev->dev);
4552 
4553 		ret = of_property_read_string(dev->of_node, "qcom,iommu-dma",
4554 					      &iommu_dma_type);
4555 		if (!ret && !strcmp("fastmap", iommu_dma_type)) {
4556 			icnss_pr_dbg("SMMU S1 stage enabled\n");
4557 			priv->smmu_s1_enable = true;
4558 			if (priv->device_id == WCN6750_DEVICE_ID ||
4559 			    priv->device_id == WCN6450_DEVICE_ID)
4560 				iommu_set_fault_handler(priv->iommu_domain,
4561 						icnss_smmu_fault_handler,
4562 						priv);
4563 		}
4564 
4565 		res = platform_get_resource_byname(pdev,
4566 						   IORESOURCE_MEM,
4567 						   "smmu_iova_ipa");
4568 		if (!res) {
4569 			icnss_pr_err("SMMU IOVA IPA not found\n");
4570 		} else {
4571 			priv->smmu_iova_ipa_start = res->start;
4572 			priv->smmu_iova_ipa_current = res->start;
4573 			priv->smmu_iova_ipa_len = resource_size(res);
4574 			icnss_pr_dbg("SMMU IOVA IPA start: %pa, len: %zx\n",
4575 				     &priv->smmu_iova_ipa_start,
4576 				     priv->smmu_iova_ipa_len);
4577 		}
4578 	}
4579 
4580 	return 0;
4581 }
4582 
icnss_get_iova(struct icnss_priv * priv,u64 * addr,u64 * size)4583 int icnss_get_iova(struct icnss_priv *priv, u64 *addr, u64 *size)
4584 {
4585 	if (!priv)
4586 		return -ENODEV;
4587 
4588 	if (!priv->smmu_iova_len)
4589 		return -EINVAL;
4590 
4591 	*addr = priv->smmu_iova_start;
4592 	*size = priv->smmu_iova_len;
4593 
4594 	return 0;
4595 }
4596 
icnss_get_iova_ipa(struct icnss_priv * priv,u64 * addr,u64 * size)4597 int icnss_get_iova_ipa(struct icnss_priv *priv, u64 *addr, u64 *size)
4598 {
4599 	if (!priv)
4600 		return -ENODEV;
4601 
4602 	if (!priv->smmu_iova_ipa_len)
4603 		return -EINVAL;
4604 
4605 	*addr = priv->smmu_iova_ipa_start;
4606 	*size = priv->smmu_iova_ipa_len;
4607 
4608 	return 0;
4609 }
4610 
icnss_add_fw_prefix_name(struct icnss_priv * priv,char * prefix_name,char * name)4611 void icnss_add_fw_prefix_name(struct icnss_priv *priv, char *prefix_name,
4612 			      char *name)
4613 {
4614 	if (!priv)
4615 		return;
4616 
4617 	if (!priv->use_prefix_path) {
4618 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME, "%s", name);
4619 		return;
4620 	}
4621 
4622 	if (priv->device_id == ADRASTEA_DEVICE_ID)
4623 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME,
4624 			  ADRASTEA_PATH_PREFIX "%s", name);
4625 	else if (priv->device_id == WCN6750_DEVICE_ID)
4626 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME,
4627 			  QCA6750_PATH_PREFIX "%s", name);
4628 	else if (priv->device_id == WCN6450_DEVICE_ID)
4629 		scnprintf(prefix_name, ICNSS_MAX_FILE_NAME,
4630 			  WCN6450_PATH_PREFIX "%s", name);
4631 	icnss_pr_dbg("File added with prefix: %s\n", prefix_name);
4632 }
4633 
4634 static const struct platform_device_id icnss_platform_id_table[] = {
4635 	{ .name = "wcn6750", .driver_data = WCN6750_DEVICE_ID, },
4636 	{ .name = "adrastea", .driver_data = ADRASTEA_DEVICE_ID, },
4637 	{ .name = "wcn6450", .driver_data = WCN6450_DEVICE_ID, },
4638 	{ },
4639 };
4640 
4641 static const struct of_device_id icnss_dt_match[] = {
4642 	{
4643 		.compatible = "qcom,wcn6750",
4644 		.data = (void *)&icnss_platform_id_table[0]},
4645 	{
4646 		.compatible = "qcom,icnss",
4647 		.data = (void *)&icnss_platform_id_table[1]},
4648 	{
4649 		.compatible = "qcom,wcn6450",
4650 		.data = (void *)&icnss_platform_id_table[2]},
4651 	{ },
4652 };
4653 
4654 MODULE_DEVICE_TABLE(of, icnss_dt_match);
4655 
icnss_init_control_params(struct icnss_priv * priv)4656 static void icnss_init_control_params(struct icnss_priv *priv)
4657 {
4658 	priv->ctrl_params.qmi_timeout = WLFW_TIMEOUT;
4659 	priv->ctrl_params.quirks = ICNSS_QUIRKS_DEFAULT;
4660 	priv->ctrl_params.bdf_type = ICNSS_BDF_TYPE_DEFAULT;
4661 
4662 	if (priv->device_id == WCN6750_DEVICE_ID ||
4663 	    priv->device_id == WCN6450_DEVICE_ID ||
4664 	    of_property_read_bool(priv->pdev->dev.of_node,
4665 				  "wpss-support-enable"))
4666 		priv->wpss_supported = true;
4667 
4668 	if (of_property_read_bool(priv->pdev->dev.of_node,
4669 				  "bdf-download-support"))
4670 		priv->bdf_download_support = true;
4671 
4672 	if (of_property_read_bool(priv->pdev->dev.of_node,
4673 				  "rproc-fw-download"))
4674 		priv->rproc_fw_download = true;
4675 
4676 	if (priv->bdf_download_support && priv->device_id == ADRASTEA_DEVICE_ID)
4677 		priv->ctrl_params.bdf_type = ICNSS_BDF_BIN;
4678 }
4679 
icnss_read_device_configs(struct icnss_priv * priv)4680 static void icnss_read_device_configs(struct icnss_priv *priv)
4681 {
4682 	if (of_property_read_bool(priv->pdev->dev.of_node,
4683 				  "wlan-ipa-disabled")) {
4684 		set_bit(ICNSS_IPA_DISABLED, &priv->device_config);
4685 	}
4686 
4687 	if (of_property_read_bool(priv->pdev->dev.of_node,
4688 				  "qcom,wpss-self-recovery"))
4689 		priv->wpss_self_recovery_enabled = true;
4690 }
4691 
icnss_runtime_pm_init(struct icnss_priv * priv)4692 static inline void icnss_runtime_pm_init(struct icnss_priv *priv)
4693 {
4694 	pm_runtime_get_sync(&priv->pdev->dev);
4695 	pm_runtime_forbid(&priv->pdev->dev);
4696 	pm_runtime_set_active(&priv->pdev->dev);
4697 	pm_runtime_enable(&priv->pdev->dev);
4698 }
4699 
icnss_runtime_pm_deinit(struct icnss_priv * priv)4700 static inline void icnss_runtime_pm_deinit(struct icnss_priv *priv)
4701 {
4702 	pm_runtime_disable(&priv->pdev->dev);
4703 	pm_runtime_allow(&priv->pdev->dev);
4704 	pm_runtime_put_sync(&priv->pdev->dev);
4705 }
4706 
icnss_use_nv_mac(struct icnss_priv * priv)4707 static inline bool icnss_use_nv_mac(struct icnss_priv *priv)
4708 {
4709 	return of_property_read_bool(priv->pdev->dev.of_node,
4710 				     "use-nv-mac");
4711 }
4712 
rproc_restart_level_notifier(void * data,struct rproc * rproc)4713 static void rproc_restart_level_notifier(void *data, struct rproc *rproc)
4714 {
4715 	struct icnss_subsys_restart_level_data *restart_level_data;
4716 
4717 	icnss_pr_info("rproc name: %s recovery disable: %d",
4718 		      rproc->name, rproc->recovery_disabled);
4719 
4720 	restart_level_data = kzalloc(sizeof(*restart_level_data), GFP_ATOMIC);
4721 	if (!restart_level_data)
4722 		return;
4723 
4724 	if (strnstr(rproc->name, "wpss", ICNSS_RPROC_LEN)) {
4725 		if (rproc->recovery_disabled)
4726 			restart_level_data->restart_level = ICNSS_DISABLE_M3_SSR;
4727 		else
4728 			restart_level_data->restart_level = ICNSS_ENABLE_M3_SSR;
4729 
4730 		icnss_driver_event_post(penv, ICNSS_DRIVER_EVENT_SUBSYS_RESTART_LEVEL,
4731 					0, restart_level_data);
4732 	}
4733 }
4734 
4735 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
icnss_initialize_mem_pool(unsigned long device_id)4736 static void icnss_initialize_mem_pool(unsigned long device_id)
4737 {
4738 	cnss_initialize_prealloc_pool(device_id);
4739 }
icnss_deinitialize_mem_pool(void)4740 static void icnss_deinitialize_mem_pool(void)
4741 {
4742 	cnss_deinitialize_prealloc_pool();
4743 }
4744 #else
icnss_initialize_mem_pool(unsigned long device_id)4745 static void icnss_initialize_mem_pool(unsigned long device_id)
4746 {
4747 }
icnss_deinitialize_mem_pool(void)4748 static void icnss_deinitialize_mem_pool(void)
4749 {
4750 }
4751 #endif
4752 
register_rproc_restart_level_notifier(void)4753 static void register_rproc_restart_level_notifier(void)
4754 {
4755 	register_trace_android_vh_rproc_recovery_set(rproc_restart_level_notifier, NULL);
4756 }
4757 
unregister_rproc_restart_level_notifier(void)4758 static void unregister_rproc_restart_level_notifier(void)
4759 {
4760 	unregister_trace_android_vh_rproc_recovery_set(rproc_restart_level_notifier, NULL);
4761 }
4762 
icnss_probe(struct platform_device * pdev)4763 static int icnss_probe(struct platform_device *pdev)
4764 {
4765 	int ret = 0;
4766 	struct device *dev = &pdev->dev;
4767 	struct icnss_priv *priv;
4768 	const struct of_device_id *of_id;
4769 	const struct platform_device_id *device_id;
4770 
4771 	if (dev_get_drvdata(dev)) {
4772 		icnss_pr_err("Driver is already initialized\n");
4773 		return -EEXIST;
4774 	}
4775 
4776 	of_id = of_match_device(icnss_dt_match, &pdev->dev);
4777 	if (!of_id || !of_id->data) {
4778 		icnss_pr_err("Failed to find of match device!\n");
4779 		ret = -ENODEV;
4780 		goto out_reset_drvdata;
4781 	}
4782 
4783 	device_id = of_id->data;
4784 
4785 	icnss_pr_dbg("Platform driver probe\n");
4786 
4787 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
4788 	if (!priv)
4789 		return -ENOMEM;
4790 
4791 	priv->magic = ICNSS_MAGIC;
4792 	dev_set_drvdata(dev, priv);
4793 
4794 	priv->pdev = pdev;
4795 	priv->device_id = device_id->driver_data;
4796 	priv->is_chain1_supported = true;
4797 	INIT_LIST_HEAD(&priv->vreg_list);
4798 	INIT_LIST_HEAD(&priv->clk_list);
4799 	icnss_allow_recursive_recovery(dev);
4800 
4801 	icnss_initialize_mem_pool(priv->device_id);
4802 
4803 	icnss_init_control_params(priv);
4804 
4805 	icnss_read_device_configs(priv);
4806 
4807 	ret = icnss_resource_parse(priv);
4808 	if (ret)
4809 		goto out_reset_drvdata;
4810 
4811 	ret = icnss_msa_dt_parse(priv);
4812 	if (ret)
4813 		goto out_free_resources;
4814 
4815 	ret = icnss_smmu_dt_parse(priv);
4816 	if (ret)
4817 		goto out_free_resources;
4818 
4819 	spin_lock_init(&priv->event_lock);
4820 	spin_lock_init(&priv->on_off_lock);
4821 	spin_lock_init(&priv->soc_wake_msg_lock);
4822 	mutex_init(&priv->dev_lock);
4823 	mutex_init(&priv->tcdev_lock);
4824 
4825 	priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
4826 	if (!priv->event_wq) {
4827 		icnss_pr_err("Workqueue creation failed\n");
4828 		ret = -EFAULT;
4829 		goto smmu_cleanup;
4830 	}
4831 
4832 	INIT_WORK(&priv->event_work, icnss_driver_event_work);
4833 	INIT_LIST_HEAD(&priv->event_list);
4834 
4835 	if (priv->is_slate_rfa)
4836 		init_completion(&priv->slate_boot_complete);
4837 
4838 	ret = icnss_register_fw_service(priv);
4839 	if (ret < 0) {
4840 		icnss_pr_err("fw service registration failed: %d\n", ret);
4841 		goto out_destroy_wq;
4842 	}
4843 	icnss_power_misc_params_init(priv);
4844 
4845 	icnss_enable_recovery(priv);
4846 
4847 	icnss_debugfs_create(priv);
4848 
4849 	icnss_sysfs_create(priv);
4850 
4851 	ret = device_init_wakeup(&priv->pdev->dev, true);
4852 	if (ret)
4853 		icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
4854 			     ret);
4855 
4856 	icnss_set_plat_priv(priv);
4857 
4858 	init_completion(&priv->unblock_shutdown);
4859 
4860 	if (priv->device_id == WCN6750_DEVICE_ID ||
4861 	    priv->device_id == WCN6450_DEVICE_ID) {
4862 		priv->soc_wake_wq = alloc_workqueue("icnss_soc_wake_event",
4863 						    WQ_UNBOUND|WQ_HIGHPRI, 1);
4864 		if (!priv->soc_wake_wq) {
4865 			icnss_pr_err("Soc wake Workqueue creation failed\n");
4866 			ret = -EFAULT;
4867 			goto out_unregister_fw_service;
4868 		}
4869 
4870 		INIT_WORK(&priv->soc_wake_msg_work, icnss_soc_wake_msg_work);
4871 		INIT_LIST_HEAD(&priv->soc_wake_msg_list);
4872 
4873 		ret = icnss_genl_init();
4874 		if (ret < 0)
4875 			icnss_pr_err("ICNSS genl init failed %d\n", ret);
4876 
4877 		init_completion(&priv->smp2p_soc_wake_wait);
4878 		icnss_runtime_pm_init(priv);
4879 		icnss_aop_interface_init(priv);
4880 		set_bit(ICNSS_COLD_BOOT_CAL, &priv->state);
4881 		priv->bdf_download_support = true;
4882 		register_rproc_restart_level_notifier();
4883 	}
4884 
4885 	if (priv->wpss_supported) {
4886 		ret = icnss_dms_init(priv);
4887 		if (ret)
4888 			icnss_pr_err("ICNSS DMS init failed %d\n", ret);
4889 		priv->use_nv_mac = icnss_use_nv_mac(priv);
4890 		icnss_pr_dbg("NV MAC feature is %s\n",
4891 			     priv->use_nv_mac ? "Mandatory":"Not Mandatory");
4892 	}
4893 
4894 	if (priv->wpss_supported || priv->rproc_fw_download)
4895 		INIT_WORK(&wpss_loader, icnss_wpss_load);
4896 
4897 	timer_setup(&priv->recovery_timer,
4898 		    icnss_recovery_timeout_hdlr, 0);
4899 
4900 	if (priv->wpss_self_recovery_enabled) {
4901 		INIT_WORK(&wpss_ssr_work, icnss_wpss_self_recovery);
4902 		timer_setup(&priv->wpss_ssr_timer,
4903 			    icnss_wpss_ssr_timeout_hdlr, 0);
4904 	}
4905 
4906 	icnss_register_ims_service(priv);
4907 	INIT_LIST_HEAD(&priv->icnss_tcdev_list);
4908 
4909 	icnss_pr_info("Platform driver probed successfully\n");
4910 
4911 	return 0;
4912 
4913 out_unregister_fw_service:
4914 	icnss_unregister_fw_service(priv);
4915 out_destroy_wq:
4916 	destroy_workqueue(priv->event_wq);
4917 smmu_cleanup:
4918 	priv->iommu_domain = NULL;
4919 out_free_resources:
4920 	icnss_put_resources(priv);
4921 out_reset_drvdata:
4922 	icnss_deinitialize_mem_pool();
4923 	dev_set_drvdata(dev, NULL);
4924 	return ret;
4925 }
4926 
icnss_destroy_ramdump_device(struct icnss_ramdump_info * ramdump_info)4927 void icnss_destroy_ramdump_device(struct icnss_ramdump_info *ramdump_info)
4928 {
4929 
4930 	if (IS_ERR_OR_NULL(ramdump_info))
4931 		return;
4932 
4933 	device_unregister(ramdump_info->dev);
4934 
4935 	ida_simple_remove(&rd_minor_id, ramdump_info->minor);
4936 
4937 	kfree(ramdump_info);
4938 }
4939 
icnss_unregister_power_supply_notifier(struct icnss_priv * priv)4940 static void icnss_unregister_power_supply_notifier(struct icnss_priv *priv)
4941 {
4942 	if (priv->batt_psy)
4943 		power_supply_put(penv->batt_psy);
4944 
4945 	if (priv->psf_supported) {
4946 		flush_workqueue(priv->soc_update_wq);
4947 		destroy_workqueue(priv->soc_update_wq);
4948 		power_supply_unreg_notifier(&priv->psf_nb);
4949 	}
4950 }
4951 
icnss_remove(struct platform_device * pdev)4952 static int icnss_remove(struct platform_device *pdev)
4953 {
4954 	struct icnss_priv *priv = dev_get_drvdata(&pdev->dev);
4955 
4956 	icnss_pr_info("Removing driver: state: 0x%lx\n", priv->state);
4957 
4958 	del_timer(&priv->recovery_timer);
4959 
4960 	if (priv->wpss_self_recovery_enabled)
4961 		del_timer(&priv->wpss_ssr_timer);
4962 
4963 	device_init_wakeup(&priv->pdev->dev, false);
4964 
4965 	icnss_unregister_ims_service(priv);
4966 
4967 	icnss_debugfs_destroy(priv);
4968 
4969 	icnss_unregister_power_supply_notifier(penv);
4970 
4971 	icnss_sysfs_destroy(priv);
4972 
4973 	complete_all(&priv->unblock_shutdown);
4974 
4975 	if (priv->is_slate_rfa) {
4976 		complete(&priv->slate_boot_complete);
4977 		icnss_slate_ssr_unregister_notifier(priv);
4978 		icnss_unregister_slate_event_notifier(priv);
4979 	}
4980 
4981 	icnss_destroy_ramdump_device(priv->msa0_dump_dev);
4982 
4983 	if (priv->wpss_supported) {
4984 		icnss_dms_deinit(priv);
4985 		icnss_wpss_early_ssr_unregister_notifier(priv);
4986 		icnss_wpss_ssr_unregister_notifier(priv);
4987 	} else {
4988 		icnss_modem_ssr_unregister_notifier(priv);
4989 		icnss_pdr_unregister_notifier(priv);
4990 	}
4991 
4992 	if (priv->device_id == WCN6750_DEVICE_ID ||
4993 	    priv->device_id == WCN6450_DEVICE_ID) {
4994 		icnss_genl_exit();
4995 		icnss_runtime_pm_deinit(priv);
4996 		unregister_rproc_restart_level_notifier();
4997 		complete_all(&priv->smp2p_soc_wake_wait);
4998 		icnss_destroy_ramdump_device(priv->m3_dump_phyareg);
4999 		icnss_destroy_ramdump_device(priv->m3_dump_phydbg);
5000 		icnss_destroy_ramdump_device(priv->m3_dump_wmac0reg);
5001 		icnss_destroy_ramdump_device(priv->m3_dump_wcssdbg);
5002 		icnss_destroy_ramdump_device(priv->m3_dump_phyapdmem);
5003 		if (priv->soc_wake_wq)
5004 			destroy_workqueue(priv->soc_wake_wq);
5005 		icnss_aop_interface_deinit(priv);
5006 	}
5007 
5008 	class_destroy(priv->icnss_ramdump_class);
5009 	unregister_chrdev_region(priv->icnss_ramdump_dev, RAMDUMP_NUM_DEVICES);
5010 
5011 	icnss_unregister_fw_service(priv);
5012 	if (priv->event_wq)
5013 		destroy_workqueue(priv->event_wq);
5014 
5015 	priv->iommu_domain = NULL;
5016 
5017 	icnss_hw_power_off(priv);
5018 
5019 	icnss_put_resources(priv);
5020 
5021 	icnss_deinitialize_mem_pool();
5022 
5023 	dev_set_drvdata(&pdev->dev, NULL);
5024 
5025 	return 0;
5026 }
5027 
icnss_recovery_timeout_hdlr(struct timer_list * t)5028 void icnss_recovery_timeout_hdlr(struct timer_list *t)
5029 {
5030 	struct icnss_priv *priv = from_timer(priv, t, recovery_timer);
5031 
5032 	/* This is to handle if slate is not up and modem SSR is triggered */
5033 	if (priv->is_slate_rfa && !test_bit(ICNSS_SLATE_UP, &priv->state))
5034 		return;
5035 
5036 	icnss_pr_err("Timeout waiting for FW Ready 0x%lx\n", priv->state);
5037 	ICNSS_ASSERT(0);
5038 }
5039 
icnss_wpss_ssr_timeout_hdlr(struct timer_list * t)5040 void icnss_wpss_ssr_timeout_hdlr(struct timer_list *t)
5041 {
5042 	struct icnss_priv *priv = from_timer(priv, t, wpss_ssr_timer);
5043 
5044 	icnss_pr_err("Timeout waiting for WPSS SSR notification 0x%lx\n",
5045 		      priv->state);
5046 	schedule_work(&wpss_ssr_work);
5047 }
5048 
5049 #ifdef CONFIG_PM_SLEEP
icnss_pm_suspend(struct device * dev)5050 static int icnss_pm_suspend(struct device *dev)
5051 {
5052 	struct icnss_priv *priv = dev_get_drvdata(dev);
5053 	int ret = 0;
5054 
5055 	if (priv->magic != ICNSS_MAGIC) {
5056 		icnss_pr_err("Invalid drvdata for pm suspend: dev %pK, data %pK, magic 0x%x\n",
5057 			     dev, priv, priv->magic);
5058 		return -EINVAL;
5059 	}
5060 
5061 	icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state);
5062 
5063 	if (!priv->ops || !priv->ops->pm_suspend ||
5064 	    icnss_is_smp2p_valid(priv, ICNSS_SMP2P_OUT_POWER_SAVE) ||
5065 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
5066 		return 0;
5067 
5068 	ret = priv->ops->pm_suspend(dev);
5069 
5070 	if (ret == 0) {
5071 		if (priv->device_id == WCN6750_DEVICE_ID ||
5072 		    priv->device_id == WCN6450_DEVICE_ID) {
5073 			if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
5074 			    !test_bit(ICNSS_MODE_ON, &priv->state))
5075 				return 0;
5076 
5077 			ret = icnss_send_smp2p(priv, ICNSS_POWER_SAVE_ENTER,
5078 					       ICNSS_SMP2P_OUT_POWER_SAVE);
5079 		}
5080 		priv->stats.pm_suspend++;
5081 		set_bit(ICNSS_PM_SUSPEND, &priv->state);
5082 	} else {
5083 		priv->stats.pm_suspend_err++;
5084 	}
5085 	return ret;
5086 }
5087 
icnss_pm_resume(struct device * dev)5088 static int icnss_pm_resume(struct device *dev)
5089 {
5090 	struct icnss_priv *priv = dev_get_drvdata(dev);
5091 	int ret = 0;
5092 
5093 	if (priv->magic != ICNSS_MAGIC) {
5094 		icnss_pr_err("Invalid drvdata for pm resume: dev %pK, data %pK, magic 0x%x\n",
5095 			     dev, priv, priv->magic);
5096 		return -EINVAL;
5097 	}
5098 
5099 	icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state);
5100 
5101 	if (!priv->ops || !priv->ops->pm_resume ||
5102 	    icnss_is_smp2p_valid(priv, ICNSS_SMP2P_OUT_POWER_SAVE) ||
5103 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
5104 		goto out;
5105 
5106 	ret = priv->ops->pm_resume(dev);
5107 
5108 out:
5109 	if (ret == 0) {
5110 		priv->stats.pm_resume++;
5111 		clear_bit(ICNSS_PM_SUSPEND, &priv->state);
5112 	} else {
5113 		priv->stats.pm_resume_err++;
5114 	}
5115 	return ret;
5116 }
5117 
icnss_pm_suspend_noirq(struct device * dev)5118 static int icnss_pm_suspend_noirq(struct device *dev)
5119 {
5120 	struct icnss_priv *priv = dev_get_drvdata(dev);
5121 	int ret = 0;
5122 
5123 	if (priv->magic != ICNSS_MAGIC) {
5124 		icnss_pr_err("Invalid drvdata for pm suspend_noirq: dev %pK, data %pK, magic 0x%x\n",
5125 			     dev, priv, priv->magic);
5126 		return -EINVAL;
5127 	}
5128 
5129 	icnss_pr_vdbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
5130 
5131 	if (!priv->ops || !priv->ops->suspend_noirq ||
5132 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
5133 		goto out;
5134 
5135 	ret = priv->ops->suspend_noirq(dev);
5136 
5137 out:
5138 	if (ret == 0) {
5139 		priv->stats.pm_suspend_noirq++;
5140 		set_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
5141 	} else {
5142 		priv->stats.pm_suspend_noirq_err++;
5143 	}
5144 	return ret;
5145 }
5146 
icnss_pm_resume_noirq(struct device * dev)5147 static int icnss_pm_resume_noirq(struct device *dev)
5148 {
5149 	struct icnss_priv *priv = dev_get_drvdata(dev);
5150 	int ret = 0;
5151 
5152 	if (priv->magic != ICNSS_MAGIC) {
5153 		icnss_pr_err("Invalid drvdata for pm resume_noirq: dev %pK, data %pK, magic 0x%x\n",
5154 			     dev, priv, priv->magic);
5155 		return -EINVAL;
5156 	}
5157 
5158 	icnss_pr_vdbg("PM resume_noirq, state: 0x%lx\n", priv->state);
5159 
5160 	if (!priv->ops || !priv->ops->resume_noirq ||
5161 	    !test_bit(ICNSS_DRIVER_PROBED, &priv->state))
5162 		goto out;
5163 
5164 	ret = priv->ops->resume_noirq(dev);
5165 
5166 out:
5167 	if (ret == 0) {
5168 		priv->stats.pm_resume_noirq++;
5169 		clear_bit(ICNSS_PM_SUSPEND_NOIRQ, &priv->state);
5170 	} else {
5171 		priv->stats.pm_resume_noirq_err++;
5172 	}
5173 	return ret;
5174 }
5175 
icnss_pm_runtime_suspend(struct device * dev)5176 static int icnss_pm_runtime_suspend(struct device *dev)
5177 {
5178 	struct icnss_priv *priv = dev_get_drvdata(dev);
5179 	int ret = 0;
5180 
5181 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
5182 		icnss_pr_err("Ignore runtime suspend:\n");
5183 		goto out;
5184 	}
5185 
5186 	if (priv->magic != ICNSS_MAGIC) {
5187 		icnss_pr_err("Invalid drvdata for runtime suspend: dev %pK, data %pK, magic 0x%x\n",
5188 			     dev, priv, priv->magic);
5189 		return -EINVAL;
5190 	}
5191 
5192 	if (!priv->ops || !priv->ops->runtime_suspend ||
5193 	    icnss_is_smp2p_valid(priv, ICNSS_SMP2P_OUT_POWER_SAVE))
5194 		goto out;
5195 
5196 	icnss_pr_vdbg("Runtime suspend\n");
5197 	ret = priv->ops->runtime_suspend(dev);
5198 	if (!ret) {
5199 		if (test_bit(ICNSS_PD_RESTART, &priv->state) ||
5200 		    !test_bit(ICNSS_MODE_ON, &priv->state))
5201 			return 0;
5202 
5203 		ret = icnss_send_smp2p(priv, ICNSS_POWER_SAVE_ENTER,
5204 				       ICNSS_SMP2P_OUT_POWER_SAVE);
5205 	}
5206 out:
5207 	return ret;
5208 }
5209 
icnss_pm_runtime_resume(struct device * dev)5210 static int icnss_pm_runtime_resume(struct device *dev)
5211 {
5212 	struct icnss_priv *priv = dev_get_drvdata(dev);
5213 	int ret = 0;
5214 
5215 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
5216 		icnss_pr_err("Ignore runtime resume\n");
5217 		goto out;
5218 	}
5219 
5220 	if (priv->magic != ICNSS_MAGIC) {
5221 		icnss_pr_err("Invalid drvdata for runtime resume: dev %pK, data %pK, magic 0x%x\n",
5222 			     dev, priv, priv->magic);
5223 		return -EINVAL;
5224 	}
5225 
5226 	if (!priv->ops || !priv->ops->runtime_resume ||
5227 	    icnss_is_smp2p_valid(priv, ICNSS_SMP2P_OUT_POWER_SAVE))
5228 		goto out;
5229 
5230 	icnss_pr_vdbg("Runtime resume, state: 0x%lx\n", priv->state);
5231 
5232 	ret = priv->ops->runtime_resume(dev);
5233 
5234 out:
5235 	return ret;
5236 }
5237 
icnss_pm_runtime_idle(struct device * dev)5238 static int icnss_pm_runtime_idle(struct device *dev)
5239 {
5240 	struct icnss_priv *priv = dev_get_drvdata(dev);
5241 
5242 	if (priv->device_id == ADRASTEA_DEVICE_ID) {
5243 		icnss_pr_err("Ignore runtime idle\n");
5244 		goto out;
5245 	}
5246 
5247 	icnss_pr_vdbg("Runtime idle\n");
5248 
5249 	pm_request_autosuspend(dev);
5250 
5251 out:
5252 	return -EBUSY;
5253 }
5254 #endif
5255 
5256 static const struct dev_pm_ops icnss_pm_ops = {
5257 	SET_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend,
5258 				icnss_pm_resume)
5259 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(icnss_pm_suspend_noirq,
5260 				      icnss_pm_resume_noirq)
5261 	SET_RUNTIME_PM_OPS(icnss_pm_runtime_suspend, icnss_pm_runtime_resume,
5262 			   icnss_pm_runtime_idle)
5263 };
5264 
5265 static struct platform_driver icnss_driver = {
5266 	.probe  = icnss_probe,
5267 	.remove = icnss_remove,
5268 	.driver = {
5269 		.name = "icnss2",
5270 		.pm = &icnss_pm_ops,
5271 		.of_match_table = icnss_dt_match,
5272 	},
5273 };
5274 
icnss_initialize(void)5275 static int __init icnss_initialize(void)
5276 {
5277 	icnss_debug_init();
5278 	return platform_driver_register(&icnss_driver);
5279 }
5280 
icnss_exit(void)5281 static void __exit icnss_exit(void)
5282 {
5283 	platform_driver_unregister(&icnss_driver);
5284 	icnss_debug_deinit();
5285 }
5286 
5287 
5288 module_init(icnss_initialize);
5289 module_exit(icnss_exit);
5290 
5291 MODULE_LICENSE("GPL v2");
5292 MODULE_DESCRIPTION("iWCN CORE platform driver");
5293