xref: /wlan-dirver/platform/cnss2/main.c (revision 5a888e90a2d595c929907f9f11821ed298ecde6c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/devcoredump.h>
9 #include <linux/elf.h>
10 #include <linux/jiffies.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_device.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_wakeup.h>
16 #include <linux/reboot.h>
17 #include <linux/rwsem.h>
18 #include <linux/suspend.h>
19 #include <linux/timer.h>
20 #include <linux/thermal.h>
21 #include <linux/version.h>
22 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0))
23 #include <linux/panic_notifier.h>
24 #endif
25 #if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
26 #include <soc/qcom/minidump.h>
27 #endif
28 
29 #include "cnss_plat_ipc_qmi.h"
30 #include "cnss_utils.h"
31 #include "main.h"
32 #include "bus.h"
33 #include "debug.h"
34 #include "genl.h"
35 #include "reg.h"
36 
37 #ifdef CONFIG_CNSS_HW_SECURE_DISABLE
38 #ifdef CONFIG_CNSS_HW_SECURE_SMEM
39 #include <linux/soc/qcom/smem.h>
40 #define PERISEC_SMEM_ID 651
41 #define HW_WIFI_UID 0x508
42 #else
43 #include "smcinvoke.h"
44 #include "smcinvoke_object.h"
45 #include "IClientEnv.h"
46 #define HW_STATE_UID 0x108
47 #define HW_OP_GET_STATE 1
48 #define HW_WIFI_UID 0x508
49 #define FEATURE_NOT_SUPPORTED 12
50 #define PERIPHERAL_NOT_FOUND 10
51 #endif
52 #endif
53 
54 #define CNSS_DUMP_FORMAT_VER		0x11
55 #define CNSS_DUMP_FORMAT_VER_V2		0x22
56 #define CNSS_DUMP_MAGIC_VER_V2		0x42445953
57 #define CNSS_DUMP_NAME			"CNSS_WLAN"
58 #define CNSS_DUMP_DESC_SIZE		0x1000
59 #define CNSS_DUMP_SEG_VER		0x1
60 #define FILE_SYSTEM_READY		1
61 #define FW_READY_TIMEOUT		20000
62 #define FW_ASSERT_TIMEOUT		5000
63 #define CNSS_EVENT_PENDING		2989
64 #define POWER_RESET_MIN_DELAY_MS	100
65 
66 #define CNSS_QUIRKS_DEFAULT		0
67 #ifdef CONFIG_CNSS_EMULATION
68 #define CNSS_MHI_TIMEOUT_DEFAULT	90000
69 #define CNSS_MHI_M2_TIMEOUT_DEFAULT	2000
70 #define CNSS_QMI_TIMEOUT_DEFAULT	90000
71 #else
72 #define CNSS_MHI_TIMEOUT_DEFAULT	0
73 #define CNSS_MHI_M2_TIMEOUT_DEFAULT	25
74 #define CNSS_QMI_TIMEOUT_DEFAULT	10000
75 #endif
76 #define CNSS_BDF_TYPE_DEFAULT		CNSS_BDF_ELF
77 #define CNSS_TIME_SYNC_PERIOD_DEFAULT	900000
78 #define CNSS_MIN_TIME_SYNC_PERIOD	2000
79 #define CNSS_DMS_QMI_CONNECTION_WAIT_MS 50
80 #define CNSS_DMS_QMI_CONNECTION_WAIT_RETRY 200
81 #define CNSS_DAEMON_CONNECT_TIMEOUT_MS  30000
82 #define CNSS_CAL_DB_FILE_NAME "wlfw_cal_db.bin"
83 #define CNSS_CAL_START_PROBE_WAIT_RETRY_MAX 100
84 #define CNSS_CAL_START_PROBE_WAIT_MS	500
85 #define CNSS_TIME_SYNC_PERIOD_INVALID	0xFFFFFFFF
86 
87 enum cnss_cal_db_op {
88 	CNSS_CAL_DB_UPLOAD,
89 	CNSS_CAL_DB_DOWNLOAD,
90 	CNSS_CAL_DB_INVALID_OP,
91 };
92 
93 enum cnss_recovery_type {
94 	CNSS_WLAN_RECOVERY = 0x1,
95 	CNSS_PCSS_RECOVERY = 0x2,
96 };
97 
98 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
99 #define CNSS_MAX_DEV_NUM		2
100 static struct cnss_plat_data *plat_env[CNSS_MAX_DEV_NUM];
101 static int plat_env_count;
102 #else
103 static struct cnss_plat_data *plat_env;
104 #endif
105 
106 static bool cnss_allow_driver_loading;
107 
108 static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
109 	"qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
110 	"utfbd30.bin", "epping30.bin", "evicted30.bin"
111 };
112 
113 static struct cnss_fw_files FW_FILES_DEFAULT = {
114 	"qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
115 	"utfbd.bin", "epping.bin", "evicted.bin"
116 };
117 
118 struct cnss_driver_event {
119 	struct list_head list;
120 	enum cnss_driver_event_type type;
121 	bool sync;
122 	struct completion complete;
123 	int ret;
124 	void *data;
125 };
126 
127 bool cnss_check_driver_loading_allowed(void)
128 {
129 	return cnss_allow_driver_loading;
130 }
131 
132 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
133 static void cnss_set_plat_priv(struct platform_device *plat_dev,
134 			       struct cnss_plat_data *plat_priv)
135 {
136 	cnss_pr_dbg("Set plat_priv at %d", plat_env_count);
137 	if (plat_priv) {
138 		plat_priv->plat_idx = plat_env_count;
139 		plat_env[plat_priv->plat_idx] = plat_priv;
140 		plat_env_count++;
141 	}
142 }
143 
144 struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
145 						 *plat_dev)
146 {
147 	int i;
148 
149 	if (!plat_dev)
150 		return NULL;
151 
152 	for (i = 0; i < plat_env_count; i++) {
153 		if (plat_env[i]->plat_dev == plat_dev)
154 			return plat_env[i];
155 	}
156 	return NULL;
157 }
158 
159 struct cnss_plat_data *cnss_get_first_plat_priv(struct platform_device
160 						 *plat_dev)
161 {
162 	int i;
163 
164 	if (!plat_dev) {
165 		for (i = 0; i < plat_env_count; i++) {
166 			if (plat_env[i])
167 				return plat_env[i];
168 		}
169 	}
170 	return NULL;
171 }
172 
173 static void cnss_clear_plat_priv(struct cnss_plat_data *plat_priv)
174 {
175 	cnss_pr_dbg("Clear plat_priv at %d", plat_priv->plat_idx);
176 	plat_env[plat_priv->plat_idx] = NULL;
177 	plat_env_count--;
178 }
179 
180 static int cnss_set_device_name(struct cnss_plat_data *plat_priv)
181 {
182 	snprintf(plat_priv->device_name, sizeof(plat_priv->device_name),
183 		 "wlan_%d", plat_priv->plat_idx);
184 
185 	return 0;
186 }
187 
188 static int cnss_plat_env_available(void)
189 {
190 	int ret = 0;
191 
192 	if (plat_env_count >= CNSS_MAX_DEV_NUM) {
193 		cnss_pr_err("ERROR: No space to store plat_priv\n");
194 		ret = -ENOMEM;
195 	}
196 	return ret;
197 }
198 
199 int cnss_get_plat_env_count(void)
200 {
201 	return plat_env_count;
202 }
203 
204 struct cnss_plat_data *cnss_get_plat_env(int index)
205 {
206 	return plat_env[index];
207 }
208 
209 struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num)
210 {
211 	int i;
212 
213 	for (i = 0; i < plat_env_count; i++) {
214 		if (plat_env[i]->rc_num == rc_num)
215 			return plat_env[i];
216 	}
217 	return NULL;
218 }
219 
220 static inline int
221 cnss_get_qrtr_node_id(struct cnss_plat_data *plat_priv)
222 {
223 	return of_property_read_u32(plat_priv->dev_node,
224 		"qcom,qrtr_node_id", &plat_priv->qrtr_node_id);
225 }
226 
227 void cnss_get_qrtr_info(struct cnss_plat_data *plat_priv)
228 {
229 	int ret = 0;
230 
231 	ret = cnss_get_qrtr_node_id(plat_priv);
232 	if (ret) {
233 		cnss_pr_warn("Failed to find qrtr_node_id err=%d\n", ret);
234 		plat_priv->qrtr_node_id = 0;
235 		plat_priv->wlfw_service_instance_id = 0;
236 	} else {
237 		plat_priv->wlfw_service_instance_id = plat_priv->qrtr_node_id +
238 						      QRTR_NODE_FW_ID_BASE;
239 		cnss_pr_dbg("service_instance_id=0x%x\n",
240 			    plat_priv->wlfw_service_instance_id);
241 	}
242 }
243 
244 static inline int
245 cnss_get_pld_bus_ops_name(struct cnss_plat_data *plat_priv)
246 {
247 	return of_property_read_string(plat_priv->plat_dev->dev.of_node,
248 				       "qcom,pld_bus_ops_name",
249 				       &plat_priv->pld_bus_ops_name);
250 }
251 
252 #else
253 static void cnss_set_plat_priv(struct platform_device *plat_dev,
254 			       struct cnss_plat_data *plat_priv)
255 {
256 	plat_env = plat_priv;
257 }
258 
259 struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev)
260 {
261 	return plat_env;
262 }
263 
264 static void cnss_clear_plat_priv(struct cnss_plat_data *plat_priv)
265 {
266 	plat_env = NULL;
267 }
268 
269 static int cnss_set_device_name(struct cnss_plat_data *plat_priv)
270 {
271 	snprintf(plat_priv->device_name, sizeof(plat_priv->device_name),
272 		 "wlan");
273 	return 0;
274 }
275 
276 static int cnss_plat_env_available(void)
277 {
278 	return 0;
279 }
280 
281 struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num)
282 {
283 	return cnss_bus_dev_to_plat_priv(NULL);
284 }
285 
286 void cnss_get_qrtr_info(struct cnss_plat_data *plat_priv)
287 {
288 }
289 
290 static int
291 cnss_get_pld_bus_ops_name(struct cnss_plat_data *plat_priv)
292 {
293 	return 0;
294 }
295 #endif
296 
297 void cnss_get_sleep_clk_supported(struct cnss_plat_data *plat_priv)
298 {
299 	plat_priv->sleep_clk = of_property_read_bool(plat_priv->dev_node,
300 						     "qcom,sleep-clk-support");
301 	cnss_pr_dbg("qcom,sleep-clk-support is %d\n",
302 		    plat_priv->sleep_clk);
303 }
304 
305 void cnss_get_bwscal_info(struct cnss_plat_data *plat_priv)
306 {
307 	plat_priv->no_bwscale = of_property_read_bool(plat_priv->dev_node,
308 						      "qcom,no-bwscale");
309 }
310 
311 static inline int
312 cnss_get_rc_num(struct cnss_plat_data *plat_priv)
313 {
314 	return of_property_read_u32(plat_priv->plat_dev->dev.of_node,
315 		"qcom,wlan-rc-num", &plat_priv->rc_num);
316 }
317 
318 bool cnss_is_dual_wlan_enabled(void)
319 {
320 	return IS_ENABLED(CONFIG_CNSS_SUPPORT_DUAL_DEV);
321 }
322 
323 /**
324  * cnss_get_mem_seg_count - Get segment count of memory
325  * @type: memory type
326  * @seg: segment count
327  *
328  * Return: 0 on success, negative value on failure
329  */
330 int cnss_get_mem_seg_count(enum cnss_remote_mem_type type, u32 *seg)
331 {
332 	struct cnss_plat_data *plat_priv;
333 
334 	plat_priv = cnss_get_plat_priv(NULL);
335 	if (!plat_priv)
336 		return -ENODEV;
337 
338 	switch (type) {
339 	case CNSS_REMOTE_MEM_TYPE_FW:
340 		*seg = plat_priv->fw_mem_seg_len;
341 		break;
342 	case CNSS_REMOTE_MEM_TYPE_QDSS:
343 		*seg = plat_priv->qdss_mem_seg_len;
344 		break;
345 	default:
346 		return -EINVAL;
347 	}
348 
349 	return 0;
350 }
351 EXPORT_SYMBOL(cnss_get_mem_seg_count);
352 
353 /**
354  * cnss_get_wifi_kobject -return wifi kobject
355  * Return: Null, to maintain driver comnpatibilty
356  */
357 struct kobject *cnss_get_wifi_kobj(struct device *dev)
358 {
359 	struct cnss_plat_data *plat_priv;
360 
361 	plat_priv = cnss_get_plat_priv(NULL);
362 	if (!plat_priv)
363 		return NULL;
364 
365 	return plat_priv->wifi_kobj;
366 }
367 EXPORT_SYMBOL(cnss_get_wifi_kobj);
368 
369 /**
370  * cnss_get_mem_segment_info - Get memory info of different type
371  * @type: memory type
372  * @segment: array to save the segment info
373  * @seg: segment count
374  *
375  * Return: 0 on success, negative value on failure
376  */
377 int cnss_get_mem_segment_info(enum cnss_remote_mem_type type,
378 			      struct cnss_mem_segment segment[],
379 			      u32 segment_count)
380 {
381 	struct cnss_plat_data *plat_priv;
382 	u32 i;
383 
384 	plat_priv = cnss_get_plat_priv(NULL);
385 	if (!plat_priv)
386 		return -ENODEV;
387 
388 	switch (type) {
389 	case CNSS_REMOTE_MEM_TYPE_FW:
390 		if (segment_count > plat_priv->fw_mem_seg_len)
391 			segment_count = plat_priv->fw_mem_seg_len;
392 		for (i = 0; i < segment_count; i++) {
393 			segment[i].size = plat_priv->fw_mem[i].size;
394 			segment[i].va = plat_priv->fw_mem[i].va;
395 			segment[i].pa = plat_priv->fw_mem[i].pa;
396 		}
397 		break;
398 	case CNSS_REMOTE_MEM_TYPE_QDSS:
399 		if (segment_count > plat_priv->qdss_mem_seg_len)
400 			segment_count = plat_priv->qdss_mem_seg_len;
401 		for (i = 0; i < segment_count; i++) {
402 			segment[i].size = plat_priv->qdss_mem[i].size;
403 			segment[i].va = plat_priv->qdss_mem[i].va;
404 			segment[i].pa = plat_priv->qdss_mem[i].pa;
405 		}
406 		break;
407 	default:
408 		return -EINVAL;
409 	}
410 
411 	return 0;
412 }
413 EXPORT_SYMBOL(cnss_get_mem_segment_info);
414 
415 static int cnss_get_audio_iommu_domain(struct cnss_plat_data *plat_priv)
416 {
417 	struct device_node *audio_ion_node;
418 	struct platform_device *audio_ion_pdev;
419 
420 	audio_ion_node = of_find_compatible_node(NULL, NULL,
421 						 "qcom,msm-audio-ion");
422 	if (!audio_ion_node) {
423 		cnss_pr_err("Unable to get Audio ion node");
424 		return -EINVAL;
425 	}
426 
427 	audio_ion_pdev = of_find_device_by_node(audio_ion_node);
428 	of_node_put(audio_ion_node);
429 	if (!audio_ion_pdev) {
430 		cnss_pr_err("Unable to get Audio ion platform device");
431 		return -EINVAL;
432 	}
433 
434 	plat_priv->audio_iommu_domain =
435 				iommu_get_domain_for_dev(&audio_ion_pdev->dev);
436 	put_device(&audio_ion_pdev->dev);
437 	if (!plat_priv->audio_iommu_domain) {
438 		cnss_pr_err("Unable to get Audio ion iommu domain");
439 		return -EINVAL;
440 	}
441 
442 	return 0;
443 }
444 
445 int cnss_set_feature_list(struct cnss_plat_data *plat_priv,
446 			  enum cnss_feature_v01 feature)
447 {
448 	if (unlikely(!plat_priv || feature >= CNSS_MAX_FEATURE_V01))
449 		return -EINVAL;
450 
451 	plat_priv->feature_list |= 1 << feature;
452 	return 0;
453 }
454 
455 int cnss_clear_feature_list(struct cnss_plat_data *plat_priv,
456 			    enum cnss_feature_v01 feature)
457 {
458 	if (unlikely(!plat_priv || feature >= CNSS_MAX_FEATURE_V01))
459 		return -EINVAL;
460 
461 	plat_priv->feature_list &= ~(1 << feature);
462 	return 0;
463 }
464 
465 int cnss_get_feature_list(struct cnss_plat_data *plat_priv,
466 			  u64 *feature_list)
467 {
468 	if (unlikely(!plat_priv))
469 		return -EINVAL;
470 
471 	*feature_list = plat_priv->feature_list;
472 	return 0;
473 }
474 
475 size_t cnss_get_platform_name(struct cnss_plat_data *plat_priv,
476 			      char *buf, const size_t buf_len)
477 {
478 	if (unlikely(!plat_priv || !buf || !buf_len))
479 		return 0;
480 
481 	if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
482 				  "platform-name-required")) {
483 		struct device_node *root;
484 
485 		root = of_find_node_by_path("/");
486 		if (root) {
487 			const char *model;
488 			size_t model_len;
489 
490 			model = of_get_property(root, "model", NULL);
491 			if (model) {
492 				model_len = strlcpy(buf, model, buf_len);
493 				cnss_pr_dbg("Platform name: %s (%zu)\n",
494 					    buf, model_len);
495 
496 				return model_len;
497 			}
498 		}
499 	}
500 
501 	return 0;
502 }
503 
504 void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv)
505 {
506 	if (atomic_inc_return(&plat_priv->pm_count) != 1)
507 		return;
508 
509 	cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n",
510 		    plat_priv->driver_state,
511 		    atomic_read(&plat_priv->pm_count));
512 	pm_stay_awake(&plat_priv->plat_dev->dev);
513 }
514 
515 void cnss_pm_relax(struct cnss_plat_data *plat_priv)
516 {
517 	int r = atomic_dec_return(&plat_priv->pm_count);
518 
519 	WARN_ON(r < 0);
520 
521 	if (r != 0)
522 		return;
523 
524 	cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n",
525 		    plat_priv->driver_state,
526 		    atomic_read(&plat_priv->pm_count));
527 	pm_relax(&plat_priv->plat_dev->dev);
528 }
529 
530 int cnss_get_fw_files_for_target(struct device *dev,
531 				 struct cnss_fw_files *pfw_files,
532 				 u32 target_type, u32 target_version)
533 {
534 	if (!pfw_files)
535 		return -ENODEV;
536 
537 	switch (target_version) {
538 	case QCA6174_REV3_VERSION:
539 	case QCA6174_REV3_2_VERSION:
540 		memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
541 		break;
542 	default:
543 		memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
544 		cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X",
545 			    target_type, target_version);
546 		break;
547 	}
548 
549 	return 0;
550 }
551 EXPORT_SYMBOL(cnss_get_fw_files_for_target);
552 
553 int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap)
554 {
555 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
556 
557 	if (!plat_priv)
558 		return -ENODEV;
559 
560 	if (!cap)
561 		return -EINVAL;
562 
563 	*cap = plat_priv->cap;
564 	cnss_pr_dbg("Platform cap_flag is 0x%x\n", cap->cap_flag);
565 
566 	return 0;
567 }
568 EXPORT_SYMBOL(cnss_get_platform_cap);
569 
570 /**
571  * cnss_get_fw_cap - Check whether FW supports specific capability or not
572  * @dev: Device
573  * @fw_cap: FW Capability which needs to be checked
574  *
575  * Return: TRUE if supported, FALSE on failure or if not supported
576  */
577 bool cnss_get_fw_cap(struct device *dev, enum cnss_fw_caps fw_cap)
578 {
579 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
580 	bool is_supported = false;
581 
582 	if (!plat_priv)
583 		return is_supported;
584 
585 	if (!plat_priv->fw_caps)
586 		return is_supported;
587 
588 	switch (fw_cap) {
589 	case CNSS_FW_CAP_DIRECT_LINK_SUPPORT:
590 		is_supported = !!(plat_priv->fw_caps &
591 				  QMI_WLFW_DIRECT_LINK_SUPPORT_V01);
592 		if (is_supported && cnss_get_audio_iommu_domain(plat_priv))
593 			is_supported = false;
594 		break;
595 	case CNSS_FW_CAP_CALDB_SEG_DDR_SUPPORT:
596 		is_supported = !!(plat_priv->fw_caps &
597 				  QMI_WLFW_CALDB_SEG_DDR_SUPPORT_V01);
598 		break;
599 	default:
600 		cnss_pr_err("Invalid FW Capability: 0x%x\n", fw_cap);
601 	}
602 
603 	cnss_pr_dbg("FW Capability 0x%x is %s\n", fw_cap,
604 		    is_supported ? "supported" : "not supported");
605 	return is_supported;
606 }
607 EXPORT_SYMBOL(cnss_get_fw_cap);
608 
609 void cnss_request_pm_qos(struct device *dev, u32 qos_val)
610 {
611 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
612 
613 	if (!plat_priv)
614 		return;
615 
616 	cpu_latency_qos_add_request(&plat_priv->qos_request, qos_val);
617 }
618 EXPORT_SYMBOL(cnss_request_pm_qos);
619 
620 void cnss_remove_pm_qos(struct device *dev)
621 {
622 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
623 
624 	if (!plat_priv)
625 		return;
626 
627 	cpu_latency_qos_remove_request(&plat_priv->qos_request);
628 }
629 EXPORT_SYMBOL(cnss_remove_pm_qos);
630 
631 int cnss_wlan_enable(struct device *dev,
632 		     struct cnss_wlan_enable_cfg *config,
633 		     enum cnss_driver_mode mode,
634 		     const char *host_version)
635 {
636 	int ret = 0;
637 	struct cnss_plat_data *plat_priv;
638 
639 	if (!dev) {
640 		cnss_pr_err("Invalid dev pointer\n");
641 		return -EINVAL;
642 	}
643 
644 	plat_priv = cnss_bus_dev_to_plat_priv(dev);
645 	if (!plat_priv)
646 		return -ENODEV;
647 
648 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
649 		return 0;
650 
651 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
652 		return 0;
653 
654 	if (!config || !host_version) {
655 		cnss_pr_err("Invalid config or host_version pointer\n");
656 		return -EINVAL;
657 	}
658 
659 	cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n",
660 		    mode, config, host_version);
661 
662 	if (mode == CNSS_WALTEST || mode == CNSS_CCPM)
663 		goto skip_cfg;
664 
665 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
666 		config->send_msi_ce = true;
667 
668 	ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, config, host_version);
669 	if (ret)
670 		goto out;
671 
672 skip_cfg:
673 	ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode);
674 out:
675 	return ret;
676 }
677 EXPORT_SYMBOL(cnss_wlan_enable);
678 
679 int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
680 {
681 	int ret = 0;
682 	struct cnss_plat_data *plat_priv;
683 
684 	if (!dev) {
685 		cnss_pr_err("Invalid dev pointer\n");
686 		return -EINVAL;
687 	}
688 
689 	plat_priv = cnss_bus_dev_to_plat_priv(dev);
690 	if (!plat_priv)
691 		return -ENODEV;
692 
693 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
694 		return 0;
695 
696 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
697 		return 0;
698 
699 	ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
700 	cnss_bus_free_qdss_mem(plat_priv);
701 
702 	return ret;
703 }
704 EXPORT_SYMBOL(cnss_wlan_disable);
705 
706 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
707 int cnss_iommu_map(struct iommu_domain *domain,
708 		   unsigned long iova, phys_addr_t paddr, size_t size, int prot)
709 {
710 	return iommu_map(domain, iova, paddr, size, prot);
711 }
712 #else
713 int cnss_iommu_map(struct iommu_domain *domain,
714 		   unsigned long iova, phys_addr_t paddr, size_t size, int prot)
715 {
716 	return iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
717 }
718 #endif
719 
720 int cnss_audio_smmu_map(struct device *dev, phys_addr_t paddr,
721 			dma_addr_t iova, size_t size)
722 {
723 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
724 	uint32_t page_offset;
725 
726 	if (!plat_priv)
727 		return -ENODEV;
728 
729 	if (!plat_priv->audio_iommu_domain)
730 		return -EINVAL;
731 
732 	page_offset = iova & (PAGE_SIZE - 1);
733 	if (page_offset + size > PAGE_SIZE)
734 		size += PAGE_SIZE;
735 
736 	iova -= page_offset;
737 	paddr -= page_offset;
738 
739 	return cnss_iommu_map(plat_priv->audio_iommu_domain, iova, paddr,
740 			      roundup(size, PAGE_SIZE), IOMMU_READ |
741 			      IOMMU_WRITE | IOMMU_CACHE);
742 }
743 EXPORT_SYMBOL(cnss_audio_smmu_map);
744 
745 void cnss_audio_smmu_unmap(struct device *dev, dma_addr_t iova, size_t size)
746 {
747 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
748 	uint32_t page_offset;
749 
750 	if (!plat_priv)
751 		return;
752 
753 	if (!plat_priv->audio_iommu_domain)
754 		return;
755 
756 	page_offset = iova & (PAGE_SIZE - 1);
757 	if (page_offset + size > PAGE_SIZE)
758 		size += PAGE_SIZE;
759 
760 	iova -= page_offset;
761 
762 	iommu_unmap(plat_priv->audio_iommu_domain, iova,
763 		    roundup(size, PAGE_SIZE));
764 }
765 EXPORT_SYMBOL(cnss_audio_smmu_unmap);
766 
767 int cnss_get_fw_lpass_shared_mem(struct device *dev, dma_addr_t *iova,
768 				 size_t *size)
769 {
770 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
771 	uint8_t i;
772 
773 	if (!plat_priv)
774 		return -EINVAL;
775 
776 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
777 		if (plat_priv->fw_mem[i].type ==
778 		    QMI_WLFW_MEM_LPASS_SHARED_V01) {
779 			*iova = plat_priv->fw_mem[i].pa;
780 			*size = plat_priv->fw_mem[i].size;
781 			return 0;
782 		}
783 	}
784 
785 	return -EINVAL;
786 }
787 EXPORT_SYMBOL(cnss_get_fw_lpass_shared_mem);
788 
789 int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
790 		      u32 data_len, u8 *output)
791 {
792 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
793 	int ret = 0;
794 
795 	if (!plat_priv) {
796 		cnss_pr_err("plat_priv is NULL!\n");
797 		return -EINVAL;
798 	}
799 
800 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
801 		return 0;
802 
803 	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
804 		cnss_pr_err("Invalid state for athdiag read: 0x%lx\n",
805 			    plat_priv->driver_state);
806 		ret = -EINVAL;
807 		goto out;
808 	}
809 
810 	ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type,
811 					       data_len, output);
812 
813 out:
814 	return ret;
815 }
816 EXPORT_SYMBOL(cnss_athdiag_read);
817 
818 int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
819 		       u32 data_len, u8 *input)
820 {
821 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
822 	int ret = 0;
823 
824 	if (!plat_priv) {
825 		cnss_pr_err("plat_priv is NULL!\n");
826 		return -EINVAL;
827 	}
828 
829 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
830 		return 0;
831 
832 	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
833 		cnss_pr_err("Invalid state for athdiag write: 0x%lx\n",
834 			    plat_priv->driver_state);
835 		ret = -EINVAL;
836 		goto out;
837 	}
838 
839 	ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type,
840 						data_len, input);
841 
842 out:
843 	return ret;
844 }
845 EXPORT_SYMBOL(cnss_athdiag_write);
846 
847 int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
848 {
849 	struct cnss_plat_data *plat_priv;
850 
851 	if (!dev) {
852 		cnss_pr_err("Invalid dev pointer\n");
853 		return -EINVAL;
854 	}
855 
856 	plat_priv = cnss_bus_dev_to_plat_priv(dev);
857 	if (!plat_priv)
858 		return -ENODEV;
859 
860 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
861 		return 0;
862 
863 	return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode);
864 }
865 EXPORT_SYMBOL(cnss_set_fw_log_mode);
866 
867 int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed)
868 {
869 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
870 
871 	if (!plat_priv)
872 		return -EINVAL;
873 
874 	if (!plat_priv->fw_pcie_gen_switch) {
875 		cnss_pr_err("Firmware does not support PCIe gen switch\n");
876 		return -EOPNOTSUPP;
877 	}
878 
879 	if (pcie_gen_speed < QMI_PCIE_GEN_SPEED_1_V01 ||
880 	    pcie_gen_speed > QMI_PCIE_GEN_SPEED_3_V01)
881 		return -EINVAL;
882 
883 	cnss_pr_dbg("WLAN provided PCIE gen speed: %d\n", pcie_gen_speed);
884 	plat_priv->pcie_gen_speed = pcie_gen_speed;
885 	return 0;
886 }
887 EXPORT_SYMBOL(cnss_set_pcie_gen_speed);
888 
889 static bool cnss_is_aux_support_enabled(struct cnss_plat_data *plat_priv)
890 {
891 	switch (plat_priv->device_id) {
892 	case PEACH_DEVICE_ID:
893 		if (!plat_priv->fw_aux_uc_support) {
894 			cnss_pr_dbg("FW does not support aux uc capability\n");
895 			return false;
896 		}
897 		break;
898 	default:
899 		cnss_pr_dbg("Host does not support aux uc capability\n");
900 		return false;
901 	}
902 
903 	return true;
904 }
905 
906 static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
907 {
908 	int ret = 0;
909 
910 	if (!plat_priv)
911 		return -ENODEV;
912 
913 	set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
914 
915 	ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
916 	if (ret)
917 		goto out;
918 
919 	cnss_bus_load_tme_patch(plat_priv);
920 
921 	cnss_wlfw_tme_patch_dnld_send_sync(plat_priv,
922 					   WLFW_TME_LITE_PATCH_FILE_V01);
923 
924 	if (plat_priv->hds_enabled)
925 		cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_HDS);
926 
927 	cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_REGDB);
928 
929 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
930 		plat_priv->ctrl_params.bdf_type = CNSS_BDF_BIN;
931 
932 	ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv,
933 					   plat_priv->ctrl_params.bdf_type);
934 	if (ret)
935 		goto out;
936 
937 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
938 		return 0;
939 
940 	ret = cnss_bus_load_m3(plat_priv);
941 	if (ret)
942 		goto out;
943 
944 	ret = cnss_wlfw_m3_dnld_send_sync(plat_priv);
945 	if (ret)
946 		goto out;
947 
948 	if (cnss_is_aux_support_enabled(plat_priv)) {
949 		ret = cnss_bus_load_aux(plat_priv);
950 		if (ret)
951 			goto out;
952 
953 		ret = cnss_wlfw_aux_dnld_send_sync(plat_priv);
954 		if (ret)
955 			goto out;
956 	}
957 
958 	cnss_wlfw_qdss_dnld_send_sync(plat_priv);
959 
960 	return 0;
961 out:
962 	return ret;
963 }
964 
965 static int cnss_request_antenna_sharing(struct cnss_plat_data *plat_priv)
966 {
967 	int ret = 0;
968 
969 	if (!plat_priv->antenna) {
970 		ret = cnss_wlfw_antenna_switch_send_sync(plat_priv);
971 		if (ret)
972 			goto out;
973 	}
974 
975 	if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state)) {
976 		ret = coex_antenna_switch_to_wlan_send_sync_msg(plat_priv);
977 		if (ret)
978 			goto out;
979 	}
980 
981 	ret = cnss_wlfw_antenna_grant_send_sync(plat_priv);
982 	if (ret)
983 		goto out;
984 
985 	return 0;
986 
987 out:
988 	return ret;
989 }
990 
991 static void cnss_release_antenna_sharing(struct cnss_plat_data *plat_priv)
992 {
993 	if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state))
994 		coex_antenna_switch_to_mdm_send_sync_msg(plat_priv);
995 }
996 
997 static int cnss_setup_dms_mac(struct cnss_plat_data *plat_priv)
998 {
999 	u32 i;
1000 	int ret = 0;
1001 	struct cnss_plat_ipc_daemon_config *cfg;
1002 
1003 	ret = cnss_qmi_get_dms_mac(plat_priv);
1004 	if (ret == 0 && plat_priv->dms.mac_valid)
1005 		goto qmi_send;
1006 
1007 	/* DTSI property use-nv-mac is used to force DMS MAC address for WLAN.
1008 	 * Thus assert on failure to get MAC from DMS even after retries
1009 	 */
1010 	if (plat_priv->use_nv_mac) {
1011 		/* Check if Daemon says platform support DMS MAC provisioning */
1012 		cfg = cnss_plat_ipc_qmi_daemon_config();
1013 		if (cfg) {
1014 			if (!cfg->dms_mac_addr_supported) {
1015 				cnss_pr_err("DMS MAC address not supported\n");
1016 				CNSS_ASSERT(0);
1017 				return -EINVAL;
1018 			}
1019 		}
1020 		for (i = 0; i < CNSS_DMS_QMI_CONNECTION_WAIT_RETRY; i++) {
1021 			if (plat_priv->dms.mac_valid)
1022 				break;
1023 
1024 			ret = cnss_qmi_get_dms_mac(plat_priv);
1025 			if (ret == 0)
1026 				break;
1027 			msleep(CNSS_DMS_QMI_CONNECTION_WAIT_MS);
1028 		}
1029 		if (!plat_priv->dms.mac_valid) {
1030 			cnss_pr_err("Unable to get MAC from DMS after retries\n");
1031 			CNSS_ASSERT(0);
1032 			return -EINVAL;
1033 		}
1034 	}
1035 qmi_send:
1036 	if (plat_priv->dms.mac_valid)
1037 		ret =
1038 		cnss_wlfw_wlan_mac_req_send_sync(plat_priv, plat_priv->dms.mac,
1039 						 ARRAY_SIZE(plat_priv->dms.mac));
1040 
1041 	return ret;
1042 }
1043 
1044 static int cnss_cal_db_mem_update(struct cnss_plat_data *plat_priv,
1045 				  enum cnss_cal_db_op op, u32 *size)
1046 {
1047 	int ret = 0;
1048 	u32 timeout = cnss_get_timeout(plat_priv,
1049 				       CNSS_TIMEOUT_DAEMON_CONNECTION);
1050 	enum cnss_plat_ipc_qmi_client_id_v01 client_id =
1051 					CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01;
1052 
1053 	if (op >= CNSS_CAL_DB_INVALID_OP)
1054 		return -EINVAL;
1055 
1056 	if (!plat_priv->cbc_file_download) {
1057 		cnss_pr_info("CAL DB file not required as per BDF\n");
1058 		return 0;
1059 	}
1060 	if (*size == 0) {
1061 		cnss_pr_err("Invalid cal file size\n");
1062 		return -EINVAL;
1063 	}
1064 	if (!test_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state)) {
1065 		cnss_pr_info("Waiting for CNSS Daemon connection\n");
1066 		ret = wait_for_completion_timeout(&plat_priv->daemon_connected,
1067 						  msecs_to_jiffies(timeout));
1068 		if (!ret) {
1069 			cnss_pr_err("Daemon not yet connected\n");
1070 			CNSS_ASSERT(0);
1071 			return ret;
1072 		}
1073 	}
1074 	if (!plat_priv->cal_mem->va) {
1075 		cnss_pr_err("CAL DB Memory not setup for FW\n");
1076 		return -EINVAL;
1077 	}
1078 
1079 	/* Copy CAL DB file contents to/from CAL_TYPE_DDR mem allocated to FW */
1080 	if (op == CNSS_CAL_DB_DOWNLOAD) {
1081 		cnss_pr_dbg("Initiating Calibration file download to mem\n");
1082 		ret = cnss_plat_ipc_qmi_file_download(client_id,
1083 						      CNSS_CAL_DB_FILE_NAME,
1084 						      plat_priv->cal_mem->va,
1085 						      size);
1086 	} else {
1087 		cnss_pr_dbg("Initiating Calibration mem upload to file\n");
1088 		ret = cnss_plat_ipc_qmi_file_upload(client_id,
1089 						    CNSS_CAL_DB_FILE_NAME,
1090 						    plat_priv->cal_mem->va,
1091 						    *size);
1092 	}
1093 
1094 	if (ret)
1095 		cnss_pr_err("Cal DB file %s %s failure\n",
1096 			    CNSS_CAL_DB_FILE_NAME,
1097 			    op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload");
1098 	else
1099 		cnss_pr_dbg("Cal DB file %s %s size %d done\n",
1100 			    CNSS_CAL_DB_FILE_NAME,
1101 			    op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload",
1102 			    *size);
1103 
1104 	return ret;
1105 }
1106 
1107 static int cnss_cal_mem_upload_to_file(struct cnss_plat_data *plat_priv)
1108 {
1109 	if (plat_priv->cal_file_size > plat_priv->cal_mem->size) {
1110 		cnss_pr_err("Cal file size is larger than Cal DB Mem size\n");
1111 		return -EINVAL;
1112 	}
1113 	return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_UPLOAD,
1114 				      &plat_priv->cal_file_size);
1115 }
1116 
1117 static int cnss_cal_file_download_to_mem(struct cnss_plat_data *plat_priv,
1118 					 u32 *cal_file_size)
1119 {
1120 	/* To download pass the total size of cal DB mem allocated.
1121 	 * After cal file is download to mem, its size is updated in
1122 	 * return pointer
1123 	 */
1124 	*cal_file_size = plat_priv->cal_mem->size;
1125 	return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_DOWNLOAD,
1126 				      cal_file_size);
1127 }
1128 
1129 static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
1130 {
1131 	int ret = 0;
1132 	u32 cal_file_size = 0;
1133 
1134 	if (!plat_priv)
1135 		return -ENODEV;
1136 
1137 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
1138 		cnss_pr_err("Reboot is in progress, ignore FW ready\n");
1139 		return -EINVAL;
1140 	}
1141 
1142 	cnss_pr_dbg("Processing FW Init Done..\n");
1143 	del_timer(&plat_priv->fw_boot_timer);
1144 	set_bit(CNSS_FW_READY, &plat_priv->driver_state);
1145 	clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
1146 
1147 	cnss_wlfw_send_pcie_gen_speed_sync(plat_priv);
1148 	cnss_send_subsys_restart_level_msg(plat_priv);
1149 
1150 	if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
1151 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
1152 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
1153 	}
1154 
1155 	if (test_bit(ENABLE_WALTEST, &plat_priv->ctrl_params.quirks)) {
1156 		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
1157 						    CNSS_WALTEST);
1158 	} else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
1159 		cnss_request_antenna_sharing(plat_priv);
1160 		cnss_cal_file_download_to_mem(plat_priv, &cal_file_size);
1161 		cnss_wlfw_cal_report_req_send_sync(plat_priv, cal_file_size);
1162 		plat_priv->cal_time = jiffies;
1163 		ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
1164 						    CNSS_CALIBRATION);
1165 	} else {
1166 		ret = cnss_setup_dms_mac(plat_priv);
1167 		ret = cnss_bus_call_driver_probe(plat_priv);
1168 	}
1169 
1170 	if (ret && test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
1171 		goto out;
1172 	else if (ret)
1173 		goto shutdown;
1174 
1175 	cnss_vreg_unvote_type(plat_priv, CNSS_VREG_PRIM);
1176 
1177 	return 0;
1178 
1179 shutdown:
1180 	cnss_bus_dev_shutdown(plat_priv);
1181 
1182 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
1183 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
1184 
1185 out:
1186 	return ret;
1187 }
1188 
1189 static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
1190 {
1191 	switch (type) {
1192 	case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
1193 		return "SERVER_ARRIVE";
1194 	case CNSS_DRIVER_EVENT_SERVER_EXIT:
1195 		return "SERVER_EXIT";
1196 	case CNSS_DRIVER_EVENT_REQUEST_MEM:
1197 		return "REQUEST_MEM";
1198 	case CNSS_DRIVER_EVENT_FW_MEM_READY:
1199 		return "FW_MEM_READY";
1200 	case CNSS_DRIVER_EVENT_FW_READY:
1201 		return "FW_READY";
1202 	case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
1203 		return "COLD_BOOT_CAL_START";
1204 	case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
1205 		return "COLD_BOOT_CAL_DONE";
1206 	case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
1207 		return "REGISTER_DRIVER";
1208 	case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
1209 		return "UNREGISTER_DRIVER";
1210 	case CNSS_DRIVER_EVENT_RECOVERY:
1211 		return "RECOVERY";
1212 	case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
1213 		return "FORCE_FW_ASSERT";
1214 	case CNSS_DRIVER_EVENT_POWER_UP:
1215 		return "POWER_UP";
1216 	case CNSS_DRIVER_EVENT_POWER_DOWN:
1217 		return "POWER_DOWN";
1218 	case CNSS_DRIVER_EVENT_IDLE_RESTART:
1219 		return "IDLE_RESTART";
1220 	case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
1221 		return "IDLE_SHUTDOWN";
1222 	case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
1223 		return "IMS_WFC_CALL_IND";
1224 	case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
1225 		return "WLFW_TWC_CFG_IND";
1226 	case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
1227 		return "QDSS_TRACE_REQ_MEM";
1228 	case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
1229 		return "FW_MEM_FILE_SAVE";
1230 	case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
1231 		return "QDSS_TRACE_FREE";
1232 	case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
1233 		return "QDSS_TRACE_REQ_DATA";
1234 	case CNSS_DRIVER_EVENT_MAX:
1235 		return "EVENT_MAX";
1236 	}
1237 
1238 	return "UNKNOWN";
1239 };
1240 
1241 int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
1242 			   enum cnss_driver_event_type type,
1243 			   u32 flags, void *data)
1244 {
1245 	struct cnss_driver_event *event;
1246 	unsigned long irq_flags;
1247 	int gfp = GFP_KERNEL;
1248 	int ret = 0;
1249 
1250 	if (!plat_priv)
1251 		return -ENODEV;
1252 
1253 	cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx flags: 0x%0x\n",
1254 		    cnss_driver_event_to_str(type), type,
1255 		    flags ? "-sync" : "", plat_priv->driver_state, flags);
1256 
1257 	if (type >= CNSS_DRIVER_EVENT_MAX) {
1258 		cnss_pr_err("Invalid Event type: %d, can't post", type);
1259 		return -EINVAL;
1260 	}
1261 
1262 	if (in_interrupt() || irqs_disabled())
1263 		gfp = GFP_ATOMIC;
1264 
1265 	event = kzalloc(sizeof(*event), gfp);
1266 	if (!event)
1267 		return -ENOMEM;
1268 
1269 	cnss_pm_stay_awake(plat_priv);
1270 
1271 	event->type = type;
1272 	event->data = data;
1273 	init_completion(&event->complete);
1274 	event->ret = CNSS_EVENT_PENDING;
1275 	event->sync = !!(flags & CNSS_EVENT_SYNC);
1276 
1277 	spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
1278 	list_add_tail(&event->list, &plat_priv->event_list);
1279 	spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
1280 
1281 	queue_work(plat_priv->event_wq, &plat_priv->event_work);
1282 
1283 	if (!(flags & CNSS_EVENT_SYNC))
1284 		goto out;
1285 
1286 	if (flags & CNSS_EVENT_UNKILLABLE)
1287 		wait_for_completion(&event->complete);
1288 	else if (flags & CNSS_EVENT_UNINTERRUPTIBLE)
1289 		ret = wait_for_completion_killable(&event->complete);
1290 	else
1291 		ret = wait_for_completion_interruptible(&event->complete);
1292 
1293 	cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
1294 		    cnss_driver_event_to_str(type), type,
1295 		    plat_priv->driver_state, ret, event->ret);
1296 	spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
1297 	if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
1298 		event->sync = false;
1299 		spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
1300 		ret = -EINTR;
1301 		goto out;
1302 	}
1303 	spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
1304 
1305 	ret = event->ret;
1306 	kfree(event);
1307 
1308 out:
1309 	cnss_pm_relax(plat_priv);
1310 	return ret;
1311 }
1312 
1313 /**
1314  * cnss_get_timeout - Get timeout for corresponding type.
1315  * @plat_priv: Pointer to platform driver context.
1316  * @cnss_timeout_type: Timeout type.
1317  *
1318  * Return: Timeout in milliseconds.
1319  */
1320 unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
1321 			      enum cnss_timeout_type timeout_type)
1322 {
1323 	unsigned int qmi_timeout = cnss_get_qmi_timeout(plat_priv);
1324 
1325 	switch (timeout_type) {
1326 	case CNSS_TIMEOUT_QMI:
1327 		return qmi_timeout;
1328 	case CNSS_TIMEOUT_POWER_UP:
1329 		return (qmi_timeout << 2);
1330 	case CNSS_TIMEOUT_IDLE_RESTART:
1331 		/* In idle restart power up sequence, we have fw_boot_timer to
1332 		 * handle FW initialization failure.
1333 		 * It uses WLAN_MISSION_MODE_TIMEOUT, so setup 3x that time to
1334 		 * account for FW dump collection and FW re-initialization on
1335 		 * retry.
1336 		 */
1337 		return (qmi_timeout + WLAN_MISSION_MODE_TIMEOUT * 3);
1338 	case CNSS_TIMEOUT_CALIBRATION:
1339 		/* Similar to mission mode, in CBC if FW init fails
1340 		 * fw recovery is tried. Thus return 2x the CBC timeout.
1341 		 */
1342 		return (qmi_timeout + WLAN_COLD_BOOT_CAL_TIMEOUT * 2);
1343 	case CNSS_TIMEOUT_WLAN_WATCHDOG:
1344 		return ((qmi_timeout << 1) + WLAN_WD_TIMEOUT_MS);
1345 	case CNSS_TIMEOUT_RDDM:
1346 		return CNSS_RDDM_TIMEOUT_MS;
1347 	case CNSS_TIMEOUT_RECOVERY:
1348 		return RECOVERY_TIMEOUT;
1349 	case CNSS_TIMEOUT_DAEMON_CONNECTION:
1350 		return qmi_timeout + CNSS_DAEMON_CONNECT_TIMEOUT_MS;
1351 	default:
1352 		return qmi_timeout;
1353 	}
1354 }
1355 
1356 unsigned int cnss_get_boot_timeout(struct device *dev)
1357 {
1358 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1359 
1360 	if (!plat_priv) {
1361 		cnss_pr_err("plat_priv is NULL\n");
1362 		return 0;
1363 	}
1364 
1365 	return cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
1366 }
1367 EXPORT_SYMBOL(cnss_get_boot_timeout);
1368 
1369 int cnss_power_up(struct device *dev)
1370 {
1371 	int ret = 0;
1372 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1373 	unsigned int timeout;
1374 
1375 	if (!plat_priv) {
1376 		cnss_pr_err("plat_priv is NULL\n");
1377 		return -ENODEV;
1378 	}
1379 
1380 	cnss_pr_dbg("Powering up device\n");
1381 
1382 	ret = cnss_driver_event_post(plat_priv,
1383 				     CNSS_DRIVER_EVENT_POWER_UP,
1384 				     CNSS_EVENT_SYNC, NULL);
1385 	if (ret)
1386 		goto out;
1387 
1388 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
1389 		goto out;
1390 
1391 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_POWER_UP);
1392 
1393 	reinit_completion(&plat_priv->power_up_complete);
1394 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
1395 					  msecs_to_jiffies(timeout));
1396 	if (!ret) {
1397 		cnss_pr_err("Timeout (%ums) waiting for power up to complete\n",
1398 			    timeout);
1399 		ret = -EAGAIN;
1400 		goto out;
1401 	}
1402 
1403 	return 0;
1404 
1405 out:
1406 	return ret;
1407 }
1408 EXPORT_SYMBOL(cnss_power_up);
1409 
1410 int cnss_power_down(struct device *dev)
1411 {
1412 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1413 
1414 	if (!plat_priv) {
1415 		cnss_pr_err("plat_priv is NULL\n");
1416 		return -ENODEV;
1417 	}
1418 
1419 	cnss_pr_dbg("Powering down device\n");
1420 
1421 	return cnss_driver_event_post(plat_priv,
1422 				      CNSS_DRIVER_EVENT_POWER_DOWN,
1423 				      CNSS_EVENT_SYNC, NULL);
1424 }
1425 EXPORT_SYMBOL(cnss_power_down);
1426 
1427 int cnss_idle_restart(struct device *dev)
1428 {
1429 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1430 	unsigned int timeout;
1431 	int ret = 0;
1432 
1433 	if (!plat_priv) {
1434 		cnss_pr_err("plat_priv is NULL\n");
1435 		return -ENODEV;
1436 	}
1437 
1438 	if (!mutex_trylock(&plat_priv->driver_ops_lock)) {
1439 		cnss_pr_dbg("Another driver operation is in progress, ignore idle restart\n");
1440 		return -EBUSY;
1441 	}
1442 
1443 	cnss_pr_dbg("Doing idle restart\n");
1444 
1445 	reinit_completion(&plat_priv->power_up_complete);
1446 
1447 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
1448 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
1449 		ret = -EINVAL;
1450 		goto out;
1451 	}
1452 
1453 	ret = cnss_driver_event_post(plat_priv,
1454 				     CNSS_DRIVER_EVENT_IDLE_RESTART,
1455 				     CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
1456 	if (ret == -EINTR && plat_priv->device_id != QCA6174_DEVICE_ID)
1457 		cnss_pr_err("Idle restart has been interrupted but device power up is still in progress");
1458 	else if (ret)
1459 		goto out;
1460 
1461 	if (plat_priv->device_id == QCA6174_DEVICE_ID) {
1462 		ret = cnss_bus_call_driver_probe(plat_priv);
1463 		goto out;
1464 	}
1465 
1466 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_IDLE_RESTART);
1467 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
1468 					  msecs_to_jiffies(timeout));
1469 	if (plat_priv->power_up_error) {
1470 		ret = plat_priv->power_up_error;
1471 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
1472 		cnss_pr_dbg("Power up error:%d, exiting\n",
1473 			    plat_priv->power_up_error);
1474 		goto out;
1475 	}
1476 
1477 	if (!ret) {
1478 		/* This exception occurs after attempting retry of FW recovery.
1479 		 * Thus we can safely power off the device.
1480 		 */
1481 		cnss_fatal_err("Timeout (%ums) waiting for idle restart to complete\n",
1482 			       timeout);
1483 		ret = -ETIMEDOUT;
1484 		cnss_power_down(dev);
1485 		CNSS_ASSERT(0);
1486 		goto out;
1487 	}
1488 
1489 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
1490 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
1491 		del_timer(&plat_priv->fw_boot_timer);
1492 		ret = -EINVAL;
1493 		goto out;
1494 	}
1495 
1496 	/* In non-DRV mode, remove MHI satellite configuration. Switching to
1497 	 * non-DRV is supported only once after device reboots and before wifi
1498 	 * is turned on. We do not allow switching back to DRV.
1499 	 * To bring device back into DRV, user needs to reboot device.
1500 	 */
1501 	if (test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks)) {
1502 		cnss_pr_dbg("DRV is disabled\n");
1503 		cnss_bus_disable_mhi_satellite_cfg(plat_priv);
1504 	}
1505 
1506 	mutex_unlock(&plat_priv->driver_ops_lock);
1507 	return 0;
1508 
1509 out:
1510 	mutex_unlock(&plat_priv->driver_ops_lock);
1511 	return ret;
1512 }
1513 EXPORT_SYMBOL(cnss_idle_restart);
1514 
1515 int cnss_idle_shutdown(struct device *dev)
1516 {
1517 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1518 
1519 	if (!plat_priv) {
1520 		cnss_pr_err("plat_priv is NULL\n");
1521 		return -ENODEV;
1522 	}
1523 
1524 	if (test_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state)) {
1525 		cnss_pr_dbg("System suspend or resume in progress, ignore idle shutdown\n");
1526 		return -EAGAIN;
1527 	}
1528 
1529 	cnss_pr_dbg("Doing idle shutdown\n");
1530 
1531 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) ||
1532 	    test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
1533 		cnss_pr_dbg("Recovery in progress. Ignore IDLE Shutdown\n");
1534 		return -EBUSY;
1535 	}
1536 
1537 	return cnss_driver_event_post(plat_priv,
1538 				      CNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
1539 				      CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
1540 }
1541 EXPORT_SYMBOL(cnss_idle_shutdown);
1542 
1543 static int cnss_get_resources(struct cnss_plat_data *plat_priv)
1544 {
1545 	int ret = 0;
1546 
1547 	ret = cnss_get_vreg_type(plat_priv, CNSS_VREG_PRIM);
1548 	if (ret < 0) {
1549 		cnss_pr_err("Failed to get vreg, err = %d\n", ret);
1550 		goto out;
1551 	}
1552 
1553 	ret = cnss_get_clk(plat_priv);
1554 	if (ret) {
1555 		cnss_pr_err("Failed to get clocks, err = %d\n", ret);
1556 		goto put_vreg;
1557 	}
1558 
1559 	ret = cnss_get_pinctrl(plat_priv);
1560 	if (ret) {
1561 		cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
1562 		goto put_clk;
1563 	}
1564 
1565 	return 0;
1566 
1567 put_clk:
1568 	cnss_put_clk(plat_priv);
1569 put_vreg:
1570 	cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
1571 out:
1572 	return ret;
1573 }
1574 
1575 static void cnss_put_resources(struct cnss_plat_data *plat_priv)
1576 {
1577 	cnss_put_clk(plat_priv);
1578 	cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
1579 }
1580 
1581 #if IS_ENABLED(CONFIG_ESOC) && IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
1582 static int cnss_modem_notifier_nb(struct notifier_block *nb,
1583 				  unsigned long code,
1584 				  void *ss_handle)
1585 {
1586 	struct cnss_plat_data *plat_priv =
1587 		container_of(nb, struct cnss_plat_data, modem_nb);
1588 	struct cnss_esoc_info *esoc_info;
1589 
1590 	cnss_pr_dbg("Modem notifier: event %lu\n", code);
1591 
1592 	if (!plat_priv)
1593 		return NOTIFY_DONE;
1594 
1595 	esoc_info = &plat_priv->esoc_info;
1596 
1597 	if (code == SUBSYS_AFTER_POWERUP)
1598 		esoc_info->modem_current_status = 1;
1599 	else if (code == SUBSYS_BEFORE_SHUTDOWN)
1600 		esoc_info->modem_current_status = 0;
1601 	else
1602 		return NOTIFY_DONE;
1603 
1604 	if (!cnss_bus_call_driver_modem_status(plat_priv,
1605 					       esoc_info->modem_current_status))
1606 		return NOTIFY_DONE;
1607 
1608 	return NOTIFY_OK;
1609 }
1610 
1611 static int cnss_register_esoc(struct cnss_plat_data *plat_priv)
1612 {
1613 	int ret = 0;
1614 	struct device *dev;
1615 	struct cnss_esoc_info *esoc_info;
1616 	struct esoc_desc *esoc_desc;
1617 	const char *client_desc;
1618 
1619 	dev = &plat_priv->plat_dev->dev;
1620 	esoc_info = &plat_priv->esoc_info;
1621 
1622 	esoc_info->notify_modem_status =
1623 		of_property_read_bool(dev->of_node,
1624 				      "qcom,notify-modem-status");
1625 
1626 	if (!esoc_info->notify_modem_status)
1627 		goto out;
1628 
1629 	ret = of_property_read_string_index(dev->of_node, "esoc-names", 0,
1630 					    &client_desc);
1631 	if (ret) {
1632 		cnss_pr_dbg("esoc-names is not defined in DT, skip!\n");
1633 	} else {
1634 		esoc_desc = devm_register_esoc_client(dev, client_desc);
1635 		if (IS_ERR_OR_NULL(esoc_desc)) {
1636 			ret = PTR_RET(esoc_desc);
1637 			cnss_pr_err("Failed to register esoc_desc, err = %d\n",
1638 				    ret);
1639 			goto out;
1640 		}
1641 		esoc_info->esoc_desc = esoc_desc;
1642 	}
1643 
1644 	plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb;
1645 	esoc_info->modem_current_status = 0;
1646 	esoc_info->modem_notify_handler =
1647 		subsys_notif_register_notifier(esoc_info->esoc_desc ?
1648 					       esoc_info->esoc_desc->name :
1649 					       "modem", &plat_priv->modem_nb);
1650 	if (IS_ERR(esoc_info->modem_notify_handler)) {
1651 		ret = PTR_ERR(esoc_info->modem_notify_handler);
1652 		cnss_pr_err("Failed to register esoc notifier, err = %d\n",
1653 			    ret);
1654 		goto unreg_esoc;
1655 	}
1656 
1657 	return 0;
1658 unreg_esoc:
1659 	if (esoc_info->esoc_desc)
1660 		devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
1661 out:
1662 	return ret;
1663 }
1664 
1665 static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv)
1666 {
1667 	struct device *dev;
1668 	struct cnss_esoc_info *esoc_info;
1669 
1670 	dev = &plat_priv->plat_dev->dev;
1671 	esoc_info = &plat_priv->esoc_info;
1672 
1673 	if (esoc_info->notify_modem_status)
1674 		subsys_notif_unregister_notifier
1675 		(esoc_info->modem_notify_handler,
1676 		 &plat_priv->modem_nb);
1677 	if (esoc_info->esoc_desc)
1678 		devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
1679 }
1680 #else
1681 static inline int cnss_register_esoc(struct cnss_plat_data *plat_priv)
1682 {
1683 	return 0;
1684 }
1685 
1686 static inline void cnss_unregister_esoc(struct cnss_plat_data *plat_priv) {}
1687 #endif
1688 
1689 int cnss_enable_dev_sol_irq(struct cnss_plat_data *plat_priv)
1690 {
1691 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1692 	int ret = 0;
1693 
1694 	if (sol_gpio->dev_sol_gpio < 0 || sol_gpio->dev_sol_irq <= 0)
1695 		return 0;
1696 
1697 	enable_irq(sol_gpio->dev_sol_irq);
1698 	ret = enable_irq_wake(sol_gpio->dev_sol_irq);
1699 	if (ret)
1700 		cnss_pr_err("Failed to enable device SOL as wake IRQ, err = %d\n",
1701 			    ret);
1702 
1703 	return ret;
1704 }
1705 
1706 int cnss_disable_dev_sol_irq(struct cnss_plat_data *plat_priv)
1707 {
1708 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1709 	int ret = 0;
1710 
1711 	if (sol_gpio->dev_sol_gpio < 0 || sol_gpio->dev_sol_irq <= 0)
1712 		return 0;
1713 
1714 	ret = disable_irq_wake(sol_gpio->dev_sol_irq);
1715 	if (ret)
1716 		cnss_pr_err("Failed to disable device SOL as wake IRQ, err = %d\n",
1717 			    ret);
1718 	disable_irq(sol_gpio->dev_sol_irq);
1719 
1720 	return ret;
1721 }
1722 
1723 int cnss_get_dev_sol_value(struct cnss_plat_data *plat_priv)
1724 {
1725 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1726 
1727 	if (sol_gpio->dev_sol_gpio < 0)
1728 		return -EINVAL;
1729 
1730 	return gpio_get_value(sol_gpio->dev_sol_gpio);
1731 }
1732 
1733 static irqreturn_t cnss_dev_sol_handler(int irq, void *data)
1734 {
1735 	struct cnss_plat_data *plat_priv = data;
1736 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1737 
1738 	sol_gpio->dev_sol_counter++;
1739 	cnss_pr_dbg("WLAN device SOL IRQ (%u) is asserted #%u\n",
1740 		    irq, sol_gpio->dev_sol_counter);
1741 
1742 	/* Make sure abort current suspend */
1743 	cnss_pm_stay_awake(plat_priv);
1744 	cnss_pm_relax(plat_priv);
1745 	pm_system_wakeup();
1746 
1747 	cnss_bus_handle_dev_sol_irq(plat_priv);
1748 
1749 	return IRQ_HANDLED;
1750 }
1751 
1752 static int cnss_init_dev_sol_gpio(struct cnss_plat_data *plat_priv)
1753 {
1754 	struct device *dev = &plat_priv->plat_dev->dev;
1755 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1756 	int ret = 0;
1757 
1758 	sol_gpio->dev_sol_gpio = of_get_named_gpio(dev->of_node,
1759 						   "wlan-dev-sol-gpio", 0);
1760 	if (sol_gpio->dev_sol_gpio < 0)
1761 		goto out;
1762 
1763 	cnss_pr_dbg("Get device SOL GPIO (%d) from device node\n",
1764 		    sol_gpio->dev_sol_gpio);
1765 
1766 	ret = gpio_request(sol_gpio->dev_sol_gpio, "wlan_dev_sol_gpio");
1767 	if (ret) {
1768 		cnss_pr_err("Failed to request device SOL GPIO, err = %d\n",
1769 			    ret);
1770 		goto out;
1771 	}
1772 
1773 	gpio_direction_input(sol_gpio->dev_sol_gpio);
1774 	sol_gpio->dev_sol_irq = gpio_to_irq(sol_gpio->dev_sol_gpio);
1775 
1776 	ret = request_irq(sol_gpio->dev_sol_irq, cnss_dev_sol_handler,
1777 			  IRQF_TRIGGER_FALLING, "wlan_dev_sol_irq", plat_priv);
1778 	if (ret) {
1779 		cnss_pr_err("Failed to request device SOL IRQ, err = %d\n", ret);
1780 		goto free_gpio;
1781 	}
1782 
1783 	return 0;
1784 
1785 free_gpio:
1786 	gpio_free(sol_gpio->dev_sol_gpio);
1787 out:
1788 	return ret;
1789 }
1790 
1791 static void cnss_deinit_dev_sol_gpio(struct cnss_plat_data *plat_priv)
1792 {
1793 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1794 
1795 	if (sol_gpio->dev_sol_gpio < 0)
1796 		return;
1797 
1798 	free_irq(sol_gpio->dev_sol_irq, plat_priv);
1799 	gpio_free(sol_gpio->dev_sol_gpio);
1800 }
1801 
1802 int cnss_set_host_sol_value(struct cnss_plat_data *plat_priv, int value)
1803 {
1804 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1805 
1806 	if (sol_gpio->host_sol_gpio < 0)
1807 		return -EINVAL;
1808 
1809 	if (value)
1810 		cnss_pr_dbg("Assert host SOL GPIO\n");
1811 	gpio_set_value(sol_gpio->host_sol_gpio, value);
1812 
1813 	return 0;
1814 }
1815 
1816 int cnss_get_host_sol_value(struct cnss_plat_data *plat_priv)
1817 {
1818 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1819 
1820 	if (sol_gpio->host_sol_gpio < 0)
1821 		return -EINVAL;
1822 
1823 	return gpio_get_value(sol_gpio->host_sol_gpio);
1824 }
1825 
1826 static int cnss_init_host_sol_gpio(struct cnss_plat_data *plat_priv)
1827 {
1828 	struct device *dev = &plat_priv->plat_dev->dev;
1829 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1830 	int ret = 0;
1831 
1832 	sol_gpio->host_sol_gpio = of_get_named_gpio(dev->of_node,
1833 						    "wlan-host-sol-gpio", 0);
1834 	if (sol_gpio->host_sol_gpio < 0)
1835 		goto out;
1836 
1837 	cnss_pr_dbg("Get host SOL GPIO (%d) from device node\n",
1838 		    sol_gpio->host_sol_gpio);
1839 
1840 	ret = gpio_request(sol_gpio->host_sol_gpio, "wlan_host_sol_gpio");
1841 	if (ret) {
1842 		cnss_pr_err("Failed to request host SOL GPIO, err = %d\n",
1843 			    ret);
1844 		goto out;
1845 	}
1846 
1847 	gpio_direction_output(sol_gpio->host_sol_gpio, 0);
1848 
1849 	return 0;
1850 
1851 out:
1852 	return ret;
1853 }
1854 
1855 static void cnss_deinit_host_sol_gpio(struct cnss_plat_data *plat_priv)
1856 {
1857 	struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1858 
1859 	if (sol_gpio->host_sol_gpio < 0)
1860 		return;
1861 
1862 	gpio_free(sol_gpio->host_sol_gpio);
1863 }
1864 
1865 static int cnss_init_sol_gpio(struct cnss_plat_data *plat_priv)
1866 {
1867 	int ret;
1868 
1869 	ret = cnss_init_dev_sol_gpio(plat_priv);
1870 	if (ret)
1871 		goto out;
1872 
1873 	ret = cnss_init_host_sol_gpio(plat_priv);
1874 	if (ret)
1875 		goto deinit_dev_sol;
1876 
1877 	return 0;
1878 
1879 deinit_dev_sol:
1880 	cnss_deinit_dev_sol_gpio(plat_priv);
1881 out:
1882 	return ret;
1883 }
1884 
1885 static void cnss_deinit_sol_gpio(struct cnss_plat_data *plat_priv)
1886 {
1887 	cnss_deinit_host_sol_gpio(plat_priv);
1888 	cnss_deinit_dev_sol_gpio(plat_priv);
1889 }
1890 
1891 #if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
1892 static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
1893 {
1894 	struct cnss_plat_data *plat_priv;
1895 	int ret = 0;
1896 
1897 	if (!subsys_desc->dev) {
1898 		cnss_pr_err("dev from subsys_desc is NULL\n");
1899 		return -ENODEV;
1900 	}
1901 
1902 	plat_priv = dev_get_drvdata(subsys_desc->dev);
1903 	if (!plat_priv) {
1904 		cnss_pr_err("plat_priv is NULL\n");
1905 		return -ENODEV;
1906 	}
1907 
1908 	if (!plat_priv->driver_state) {
1909 		cnss_pr_dbg("subsys powerup is ignored\n");
1910 		return 0;
1911 	}
1912 
1913 	ret = cnss_bus_dev_powerup(plat_priv);
1914 	if (ret)
1915 		__pm_relax(plat_priv->recovery_ws);
1916 	return ret;
1917 }
1918 
1919 static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
1920 				bool force_stop)
1921 {
1922 	struct cnss_plat_data *plat_priv;
1923 
1924 	if (!subsys_desc->dev) {
1925 		cnss_pr_err("dev from subsys_desc is NULL\n");
1926 		return -ENODEV;
1927 	}
1928 
1929 	plat_priv = dev_get_drvdata(subsys_desc->dev);
1930 	if (!plat_priv) {
1931 		cnss_pr_err("plat_priv is NULL\n");
1932 		return -ENODEV;
1933 	}
1934 
1935 	if (!plat_priv->driver_state) {
1936 		cnss_pr_dbg("subsys shutdown is ignored\n");
1937 		return 0;
1938 	}
1939 
1940 	return cnss_bus_dev_shutdown(plat_priv);
1941 }
1942 
1943 void cnss_device_crashed(struct device *dev)
1944 {
1945 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1946 	struct cnss_subsys_info *subsys_info;
1947 
1948 	if (!plat_priv)
1949 		return;
1950 
1951 	subsys_info = &plat_priv->subsys_info;
1952 	if (subsys_info->subsys_device) {
1953 		set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
1954 		subsys_set_crash_status(subsys_info->subsys_device, true);
1955 		subsystem_restart_dev(subsys_info->subsys_device);
1956 	}
1957 }
1958 EXPORT_SYMBOL(cnss_device_crashed);
1959 
1960 static void cnss_subsys_crash_shutdown(const struct subsys_desc *subsys_desc)
1961 {
1962 	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
1963 
1964 	if (!plat_priv) {
1965 		cnss_pr_err("plat_priv is NULL\n");
1966 		return;
1967 	}
1968 
1969 	cnss_bus_dev_crash_shutdown(plat_priv);
1970 }
1971 
1972 static int cnss_subsys_ramdump(int enable,
1973 			       const struct subsys_desc *subsys_desc)
1974 {
1975 	struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
1976 
1977 	if (!plat_priv) {
1978 		cnss_pr_err("plat_priv is NULL\n");
1979 		return -ENODEV;
1980 	}
1981 
1982 	if (!enable)
1983 		return 0;
1984 
1985 	return cnss_bus_dev_ramdump(plat_priv);
1986 }
1987 
1988 static void cnss_recovery_work_handler(struct work_struct *work)
1989 {
1990 }
1991 #else
1992 void cnss_recovery_handler(struct cnss_plat_data *plat_priv)
1993 {
1994 	int ret;
1995 
1996 	set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
1997 
1998 	if (!plat_priv->recovery_enabled)
1999 		panic("subsys-restart: Resetting the SoC wlan crashed\n");
2000 
2001 	cnss_bus_dev_shutdown(plat_priv);
2002 	cnss_bus_dev_ramdump(plat_priv);
2003 
2004 	/* If recovery is triggered before Host driver registration,
2005 	 * avoid device power up because eventually device will be
2006 	 * power up as part of driver registration.
2007 	 */
2008 	if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state) ||
2009 	    !test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
2010 		cnss_pr_dbg("Host driver not registered yet, ignore Device Power Up, 0x%lx\n",
2011 			    plat_priv->driver_state);
2012 		return;
2013 	}
2014 
2015 	msleep(POWER_RESET_MIN_DELAY_MS);
2016 
2017 	ret = cnss_bus_dev_powerup(plat_priv);
2018 	if (ret) {
2019 		__pm_relax(plat_priv->recovery_ws);
2020 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2021 	}
2022 
2023 	return;
2024 }
2025 
2026 static void cnss_recovery_work_handler(struct work_struct *work)
2027 {
2028 	struct cnss_plat_data *plat_priv =
2029 		container_of(work, struct cnss_plat_data, recovery_work);
2030 
2031 	cnss_recovery_handler(plat_priv);
2032 }
2033 
2034 void cnss_device_crashed(struct device *dev)
2035 {
2036 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2037 
2038 	if (!plat_priv)
2039 		return;
2040 
2041 	set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2042 	schedule_work(&plat_priv->recovery_work);
2043 }
2044 EXPORT_SYMBOL(cnss_device_crashed);
2045 #endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
2046 
2047 void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
2048 {
2049 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2050 	struct cnss_ramdump_info *ramdump_info;
2051 
2052 	if (!plat_priv)
2053 		return NULL;
2054 
2055 	ramdump_info = &plat_priv->ramdump_info;
2056 	*size = ramdump_info->ramdump_size;
2057 
2058 	return ramdump_info->ramdump_va;
2059 }
2060 EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
2061 
2062 static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
2063 {
2064 	switch (reason) {
2065 	case CNSS_REASON_DEFAULT:
2066 		return "DEFAULT";
2067 	case CNSS_REASON_LINK_DOWN:
2068 		return "LINK_DOWN";
2069 	case CNSS_REASON_RDDM:
2070 		return "RDDM";
2071 	case CNSS_REASON_TIMEOUT:
2072 		return "TIMEOUT";
2073 	}
2074 
2075 	return "UNKNOWN";
2076 };
2077 
2078 static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
2079 			    enum cnss_recovery_reason reason)
2080 {
2081 	int ret;
2082 
2083 	plat_priv->recovery_count++;
2084 
2085 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
2086 		goto self_recovery;
2087 
2088 	if (test_bit(SKIP_RECOVERY, &plat_priv->ctrl_params.quirks)) {
2089 		cnss_pr_dbg("Skip device recovery\n");
2090 		return 0;
2091 	}
2092 
2093 	/* FW recovery sequence has multiple steps and firmware load requires
2094 	 * linux PM in awake state. Thus hold the cnss wake source until
2095 	 * WLAN MISSION enabled. CNSS_TIMEOUT_RECOVERY option should cover all
2096 	 * time taken in this process.
2097 	 */
2098 	pm_wakeup_ws_event(plat_priv->recovery_ws,
2099 			   cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY),
2100 			   true);
2101 
2102 	switch (reason) {
2103 	case CNSS_REASON_LINK_DOWN:
2104 		if (!cnss_bus_check_link_status(plat_priv)) {
2105 			cnss_pr_dbg("Skip link down recovery as link is already up\n");
2106 			return 0;
2107 		}
2108 		if (test_bit(LINK_DOWN_SELF_RECOVERY,
2109 			     &plat_priv->ctrl_params.quirks))
2110 			goto self_recovery;
2111 		if (!cnss_bus_recover_link_down(plat_priv)) {
2112 			/* clear recovery bit here to avoid skipping
2113 			 * the recovery work for RDDM later
2114 			 */
2115 			clear_bit(CNSS_DRIVER_RECOVERY,
2116 				  &plat_priv->driver_state);
2117 			return 0;
2118 		}
2119 		break;
2120 	case CNSS_REASON_RDDM:
2121 		cnss_bus_collect_dump_info(plat_priv, false);
2122 		break;
2123 	case CNSS_REASON_DEFAULT:
2124 	case CNSS_REASON_TIMEOUT:
2125 		break;
2126 	default:
2127 		cnss_pr_err("Unsupported recovery reason: %s(%d)\n",
2128 			    cnss_recovery_reason_to_str(reason), reason);
2129 		break;
2130 	}
2131 	cnss_bus_device_crashed(plat_priv);
2132 
2133 	return 0;
2134 
2135 self_recovery:
2136 	cnss_pr_dbg("Going for self recovery\n");
2137 	cnss_bus_dev_shutdown(plat_priv);
2138 
2139 	if (test_bit(LINK_DOWN_SELF_RECOVERY, &plat_priv->ctrl_params.quirks))
2140 		clear_bit(LINK_DOWN_SELF_RECOVERY,
2141 			  &plat_priv->ctrl_params.quirks);
2142 
2143 	/* If link down self recovery is triggered before Host driver
2144 	 * registration, avoid device power up because eventually device
2145 	 * will be power up as part of driver registration.
2146 	 */
2147 
2148 	if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state) ||
2149 	    !test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
2150 		cnss_pr_dbg("Host driver not registered yet, ignore Device Power Up, 0x%lx\n",
2151 			    plat_priv->driver_state);
2152 		return 0;
2153 	}
2154 
2155 	ret = cnss_bus_dev_powerup(plat_priv);
2156 	if (ret)
2157 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2158 
2159 	return 0;
2160 }
2161 
2162 static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
2163 				     void *data)
2164 {
2165 	struct cnss_recovery_data *recovery_data = data;
2166 	int ret = 0;
2167 
2168 	cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n",
2169 		    cnss_recovery_reason_to_str(recovery_data->reason),
2170 		    recovery_data->reason);
2171 
2172 	if (!plat_priv->driver_state) {
2173 		cnss_pr_err("Improper driver state, ignore recovery\n");
2174 		ret = -EINVAL;
2175 		goto out;
2176 	}
2177 
2178 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2179 		cnss_pr_err("Reboot is in progress, ignore recovery\n");
2180 		ret = -EINVAL;
2181 		goto out;
2182 	}
2183 
2184 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2185 		cnss_pr_err("Recovery is already in progress\n");
2186 		CNSS_ASSERT(0);
2187 		ret = -EINVAL;
2188 		goto out;
2189 	}
2190 
2191 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2192 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
2193 		cnss_pr_err("Driver unload or idle shutdown is in progress, ignore recovery\n");
2194 		ret = -EINVAL;
2195 		goto out;
2196 	}
2197 
2198 	switch (plat_priv->device_id) {
2199 	case QCA6174_DEVICE_ID:
2200 		if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
2201 		    test_bit(CNSS_DRIVER_IDLE_RESTART,
2202 			     &plat_priv->driver_state)) {
2203 			cnss_pr_err("Driver load or idle restart is in progress, ignore recovery\n");
2204 			ret = -EINVAL;
2205 			goto out;
2206 		}
2207 		break;
2208 	default:
2209 		if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
2210 			set_bit(CNSS_FW_BOOT_RECOVERY,
2211 				&plat_priv->driver_state);
2212 		}
2213 		break;
2214 	}
2215 
2216 	set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2217 	ret = cnss_do_recovery(plat_priv, recovery_data->reason);
2218 
2219 out:
2220 	kfree(data);
2221 	return ret;
2222 }
2223 
2224 int cnss_self_recovery(struct device *dev,
2225 		       enum cnss_recovery_reason reason)
2226 {
2227 	cnss_schedule_recovery(dev, reason);
2228 	return 0;
2229 }
2230 EXPORT_SYMBOL(cnss_self_recovery);
2231 
2232 void cnss_schedule_recovery(struct device *dev,
2233 			    enum cnss_recovery_reason reason)
2234 {
2235 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2236 	struct cnss_recovery_data *data;
2237 	int gfp = GFP_KERNEL;
2238 
2239 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
2240 		cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
2241 
2242 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2243 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
2244 		cnss_pr_dbg("Driver unload or idle shutdown is in progress, ignore schedule recovery\n");
2245 		return;
2246 	}
2247 
2248 	if (in_interrupt() || irqs_disabled())
2249 		gfp = GFP_ATOMIC;
2250 
2251 	data = kzalloc(sizeof(*data), gfp);
2252 	if (!data)
2253 		return;
2254 
2255 	data->reason = reason;
2256 	cnss_driver_event_post(plat_priv,
2257 			       CNSS_DRIVER_EVENT_RECOVERY,
2258 			       0, data);
2259 }
2260 EXPORT_SYMBOL(cnss_schedule_recovery);
2261 
2262 int cnss_force_fw_assert(struct device *dev)
2263 {
2264 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2265 
2266 	if (!plat_priv) {
2267 		cnss_pr_err("plat_priv is NULL\n");
2268 		return -ENODEV;
2269 	}
2270 
2271 	if (plat_priv->device_id == QCA6174_DEVICE_ID) {
2272 		cnss_pr_info("Forced FW assert is not supported\n");
2273 		return -EOPNOTSUPP;
2274 	}
2275 
2276 	if (cnss_bus_is_device_down(plat_priv)) {
2277 		cnss_pr_info("Device is already in bad state, ignore force assert\n");
2278 		return 0;
2279 	}
2280 
2281 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2282 		cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
2283 		return 0;
2284 	}
2285 
2286 	if (in_interrupt() || irqs_disabled())
2287 		cnss_driver_event_post(plat_priv,
2288 				       CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
2289 				       0, NULL);
2290 	else
2291 		cnss_bus_force_fw_assert_hdlr(plat_priv);
2292 
2293 	return 0;
2294 }
2295 EXPORT_SYMBOL(cnss_force_fw_assert);
2296 
2297 int cnss_force_collect_rddm(struct device *dev)
2298 {
2299 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2300 	unsigned int timeout;
2301 	int ret = 0;
2302 
2303 	if (!plat_priv) {
2304 		cnss_pr_err("plat_priv is NULL\n");
2305 		return -ENODEV;
2306 	}
2307 
2308 	if (plat_priv->device_id == QCA6174_DEVICE_ID) {
2309 		cnss_pr_info("Force collect rddm is not supported\n");
2310 		return -EOPNOTSUPP;
2311 	}
2312 
2313 	if (cnss_bus_is_device_down(plat_priv)) {
2314 		cnss_pr_info("Device is already in bad state, wait to collect rddm\n");
2315 		goto wait_rddm;
2316 	}
2317 
2318 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2319 		cnss_pr_info("Recovery is already in progress, wait to collect rddm\n");
2320 		goto wait_rddm;
2321 	}
2322 
2323 	if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
2324 	    test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2325 	    test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
2326 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
2327 		cnss_pr_info("Loading/Unloading/idle restart/shutdown is in progress, ignore forced collect rddm\n");
2328 		return 0;
2329 	}
2330 
2331 	ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
2332 	if (ret)
2333 		return ret;
2334 
2335 wait_rddm:
2336 	reinit_completion(&plat_priv->rddm_complete);
2337 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RDDM);
2338 	ret = wait_for_completion_timeout(&plat_priv->rddm_complete,
2339 					  msecs_to_jiffies(timeout));
2340 	if (!ret) {
2341 		cnss_pr_err("Timeout (%ums) waiting for RDDM to complete\n",
2342 			    timeout);
2343 		ret = -ETIMEDOUT;
2344 	} else if (ret > 0) {
2345 		ret = 0;
2346 	}
2347 
2348 	return ret;
2349 }
2350 EXPORT_SYMBOL(cnss_force_collect_rddm);
2351 
2352 int cnss_qmi_send_get(struct device *dev)
2353 {
2354 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2355 
2356 	if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
2357 		return 0;
2358 
2359 	return cnss_bus_qmi_send_get(plat_priv);
2360 }
2361 EXPORT_SYMBOL(cnss_qmi_send_get);
2362 
2363 int cnss_qmi_send_put(struct device *dev)
2364 {
2365 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2366 
2367 	if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
2368 		return 0;
2369 
2370 	return cnss_bus_qmi_send_put(plat_priv);
2371 }
2372 EXPORT_SYMBOL(cnss_qmi_send_put);
2373 
2374 int cnss_qmi_send(struct device *dev, int type, void *cmd,
2375 		  int cmd_len, void *cb_ctx,
2376 		  int (*cb)(void *ctx, void *event, int event_len))
2377 {
2378 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2379 	int ret;
2380 
2381 	if (!plat_priv)
2382 		return -ENODEV;
2383 
2384 	if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
2385 		return -EINVAL;
2386 
2387 	plat_priv->get_info_cb = cb;
2388 	plat_priv->get_info_cb_ctx = cb_ctx;
2389 
2390 	ret = cnss_wlfw_get_info_send_sync(plat_priv, type, cmd, cmd_len);
2391 	if (ret) {
2392 		plat_priv->get_info_cb = NULL;
2393 		plat_priv->get_info_cb_ctx = NULL;
2394 	}
2395 
2396 	return ret;
2397 }
2398 EXPORT_SYMBOL(cnss_qmi_send);
2399 
2400 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
2401 {
2402 	int ret = 0;
2403 	u32 retry = 0, timeout;
2404 
2405 	if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
2406 		cnss_pr_dbg("Calibration complete. Ignore calibration req\n");
2407 		goto out;
2408 	} else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
2409 		cnss_pr_dbg("Calibration in progress. Ignore new calibration req\n");
2410 		goto out;
2411 	} else if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
2412 		cnss_pr_dbg("Calibration deferred as WLAN device disabled\n");
2413 		goto out;
2414 	}
2415 
2416 	if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
2417 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state) ||
2418 	    test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
2419 		cnss_pr_err("WLAN in mission mode before cold boot calibration\n");
2420 		CNSS_ASSERT(0);
2421 		return -EINVAL;
2422 	}
2423 
2424 	while (retry++ < CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
2425 		if (test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state))
2426 			break;
2427 		msleep(CNSS_CAL_START_PROBE_WAIT_MS);
2428 
2429 		if (retry == CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
2430 			cnss_pr_err("Calibration start failed as PCI probe not complete\n");
2431 			CNSS_ASSERT(0);
2432 			ret = -EINVAL;
2433 			goto mark_cal_fail;
2434 		}
2435 	}
2436 
2437 	switch (plat_priv->device_id) {
2438 	case QCA6290_DEVICE_ID:
2439 	case QCA6390_DEVICE_ID:
2440 	case QCA6490_DEVICE_ID:
2441 	case KIWI_DEVICE_ID:
2442 	case MANGO_DEVICE_ID:
2443 	case PEACH_DEVICE_ID:
2444 		break;
2445 	default:
2446 		cnss_pr_err("Not supported for device ID 0x%lx\n",
2447 			    plat_priv->device_id);
2448 		ret = -EINVAL;
2449 		goto mark_cal_fail;
2450 	}
2451 
2452 	set_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
2453 	if (test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state)) {
2454 		timeout = cnss_get_timeout(plat_priv,
2455 					   CNSS_TIMEOUT_CALIBRATION);
2456 		cnss_pr_dbg("Restarting calibration %ds timeout\n",
2457 			    timeout / 1000);
2458 		if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work))
2459 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
2460 					      msecs_to_jiffies(timeout));
2461 	}
2462 	reinit_completion(&plat_priv->cal_complete);
2463 	ret = cnss_bus_dev_powerup(plat_priv);
2464 mark_cal_fail:
2465 	if (ret) {
2466 		complete(&plat_priv->cal_complete);
2467 		clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
2468 		/* Set CBC done in driver state to mark attempt and note error
2469 		 * since calibration cannot be retried at boot.
2470 		 */
2471 		plat_priv->cal_done = CNSS_CAL_FAILURE;
2472 		set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
2473 
2474 		if (plat_priv->device_id == QCA6174_DEVICE_ID ||
2475 		    plat_priv->device_id == QCN7605_DEVICE_ID) {
2476 			if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state))
2477 				goto out;
2478 
2479 			cnss_pr_info("Schedule WLAN driver load\n");
2480 
2481 			if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work))
2482 				schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
2483 						      0);
2484 		}
2485 	}
2486 
2487 out:
2488 	return ret;
2489 }
2490 
2491 static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
2492 					void *data)
2493 {
2494 	struct cnss_cal_info *cal_info = data;
2495 
2496 	if (!test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
2497 	    test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
2498 		goto out;
2499 
2500 	switch (cal_info->cal_status) {
2501 	case CNSS_CAL_DONE:
2502 		cnss_pr_dbg("Calibration completed successfully\n");
2503 		plat_priv->cal_done = true;
2504 		break;
2505 	case CNSS_CAL_TIMEOUT:
2506 	case CNSS_CAL_FAILURE:
2507 		cnss_pr_dbg("Calibration failed. Status: %d, force shutdown\n",
2508 			    cal_info->cal_status);
2509 		break;
2510 	default:
2511 		cnss_pr_err("Unknown calibration status: %u\n",
2512 			    cal_info->cal_status);
2513 		break;
2514 	}
2515 
2516 	cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
2517 	cnss_bus_free_qdss_mem(plat_priv);
2518 	cnss_release_antenna_sharing(plat_priv);
2519 
2520 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
2521 		goto skip_shutdown;
2522 
2523 	cnss_bus_dev_shutdown(plat_priv);
2524 	msleep(POWER_RESET_MIN_DELAY_MS);
2525 
2526 skip_shutdown:
2527 	complete(&plat_priv->cal_complete);
2528 	clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
2529 	set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
2530 
2531 	if (cal_info->cal_status == CNSS_CAL_DONE) {
2532 		cnss_cal_mem_upload_to_file(plat_priv);
2533 		if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state))
2534 			goto out;
2535 
2536 		cnss_pr_dbg("Schedule WLAN driver load\n");
2537 		if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work))
2538 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
2539 					      0);
2540 	}
2541 out:
2542 	kfree(data);
2543 	return 0;
2544 }
2545 
2546 static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
2547 {
2548 	int ret;
2549 
2550 	ret = cnss_bus_dev_powerup(plat_priv);
2551 	if (ret)
2552 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
2553 
2554 	return ret;
2555 }
2556 
2557 static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
2558 {
2559 	cnss_bus_dev_shutdown(plat_priv);
2560 
2561 	return 0;
2562 }
2563 
2564 static int cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data *plat_priv)
2565 {
2566 	int ret = 0;
2567 
2568 	ret = cnss_bus_alloc_qdss_mem(plat_priv);
2569 	if (ret < 0)
2570 		return ret;
2571 
2572 	return cnss_wlfw_qdss_trace_mem_info_send_sync(plat_priv);
2573 }
2574 
2575 static void *cnss_get_fw_mem_pa_to_va(struct cnss_fw_mem *fw_mem,
2576 				      u32 mem_seg_len, u64 pa, u32 size)
2577 {
2578 	int i = 0;
2579 	u64 offset = 0;
2580 	void *va = NULL;
2581 	u64 local_pa;
2582 	u32 local_size;
2583 
2584 	for (i = 0; i < mem_seg_len; i++) {
2585 		if (i == QMI_WLFW_MEM_LPASS_SHARED_V01)
2586 			continue;
2587 
2588 		local_pa = (u64)fw_mem[i].pa;
2589 		local_size = (u32)fw_mem[i].size;
2590 		if (pa == local_pa && size <= local_size) {
2591 			va = fw_mem[i].va;
2592 			break;
2593 		}
2594 		if (pa > local_pa &&
2595 		    pa < local_pa + local_size &&
2596 		    pa + size <= local_pa + local_size) {
2597 			offset = pa - local_pa;
2598 			va = fw_mem[i].va + offset;
2599 			break;
2600 		}
2601 	}
2602 	return va;
2603 }
2604 
2605 static int cnss_fw_mem_file_save_hdlr(struct cnss_plat_data *plat_priv,
2606 				      void *data)
2607 {
2608 	struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
2609 	struct cnss_fw_mem *fw_mem_seg;
2610 	int ret = 0L;
2611 	void *va = NULL;
2612 	u32 i, fw_mem_seg_len;
2613 
2614 	switch (event_data->mem_type) {
2615 	case QMI_WLFW_MEM_TYPE_DDR_V01:
2616 		if (!plat_priv->fw_mem_seg_len)
2617 			goto invalid_mem_save;
2618 
2619 		fw_mem_seg = plat_priv->fw_mem;
2620 		fw_mem_seg_len = plat_priv->fw_mem_seg_len;
2621 		break;
2622 	case QMI_WLFW_MEM_QDSS_V01:
2623 		if (!plat_priv->qdss_mem_seg_len)
2624 			goto invalid_mem_save;
2625 
2626 		fw_mem_seg = plat_priv->qdss_mem;
2627 		fw_mem_seg_len = plat_priv->qdss_mem_seg_len;
2628 		break;
2629 	default:
2630 		goto invalid_mem_save;
2631 	}
2632 
2633 	for (i = 0; i < event_data->mem_seg_len; i++) {
2634 		va = cnss_get_fw_mem_pa_to_va(fw_mem_seg, fw_mem_seg_len,
2635 					      event_data->mem_seg[i].addr,
2636 					      event_data->mem_seg[i].size);
2637 		if (!va) {
2638 			cnss_pr_err("Fail to find matching va of pa %pa for mem type: %d\n",
2639 				    &event_data->mem_seg[i].addr,
2640 				    event_data->mem_type);
2641 			ret = -EINVAL;
2642 			break;
2643 		}
2644 		ret = cnss_genl_send_msg(va, CNSS_GENL_MSG_TYPE_QDSS,
2645 					 event_data->file_name,
2646 					 event_data->mem_seg[i].size);
2647 		if (ret < 0) {
2648 			cnss_pr_err("Fail to save fw mem data: %d\n",
2649 				    ret);
2650 			break;
2651 		}
2652 	}
2653 	kfree(data);
2654 	return ret;
2655 
2656 invalid_mem_save:
2657 	cnss_pr_err("FW Mem type %d not allocated. Invalid save request\n",
2658 		    event_data->mem_type);
2659 	kfree(data);
2660 	return -EINVAL;
2661 }
2662 
2663 static int cnss_qdss_trace_free_hdlr(struct cnss_plat_data *plat_priv)
2664 {
2665 	cnss_bus_free_qdss_mem(plat_priv);
2666 
2667 	return 0;
2668 }
2669 
2670 static int cnss_qdss_trace_req_data_hdlr(struct cnss_plat_data *plat_priv,
2671 					 void *data)
2672 {
2673 	int ret = 0;
2674 	struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
2675 
2676 	if (!plat_priv)
2677 		return -ENODEV;
2678 
2679 	ret = cnss_wlfw_qdss_data_send_sync(plat_priv, event_data->file_name,
2680 					    event_data->total_size);
2681 
2682 	kfree(data);
2683 	return ret;
2684 }
2685 
2686 static void cnss_driver_event_work(struct work_struct *work)
2687 {
2688 	struct cnss_plat_data *plat_priv =
2689 		container_of(work, struct cnss_plat_data, event_work);
2690 	struct cnss_driver_event *event;
2691 	unsigned long flags;
2692 	int ret = 0;
2693 
2694 	if (!plat_priv) {
2695 		cnss_pr_err("plat_priv is NULL!\n");
2696 		return;
2697 	}
2698 
2699 	cnss_pm_stay_awake(plat_priv);
2700 
2701 	spin_lock_irqsave(&plat_priv->event_lock, flags);
2702 
2703 	while (!list_empty(&plat_priv->event_list)) {
2704 		event = list_first_entry(&plat_priv->event_list,
2705 					 struct cnss_driver_event, list);
2706 		list_del(&event->list);
2707 		spin_unlock_irqrestore(&plat_priv->event_lock, flags);
2708 
2709 		cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n",
2710 			    cnss_driver_event_to_str(event->type),
2711 			    event->sync ? "-sync" : "", event->type,
2712 			    plat_priv->driver_state);
2713 
2714 		switch (event->type) {
2715 		case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
2716 			ret = cnss_wlfw_server_arrive(plat_priv, event->data);
2717 			break;
2718 		case CNSS_DRIVER_EVENT_SERVER_EXIT:
2719 			ret = cnss_wlfw_server_exit(plat_priv);
2720 			break;
2721 		case CNSS_DRIVER_EVENT_REQUEST_MEM:
2722 			ret = cnss_bus_alloc_fw_mem(plat_priv);
2723 			if (ret)
2724 				break;
2725 			ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
2726 			break;
2727 		case CNSS_DRIVER_EVENT_FW_MEM_READY:
2728 			ret = cnss_fw_mem_ready_hdlr(plat_priv);
2729 			break;
2730 		case CNSS_DRIVER_EVENT_FW_READY:
2731 			ret = cnss_fw_ready_hdlr(plat_priv);
2732 			break;
2733 		case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
2734 			ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
2735 			break;
2736 		case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
2737 			ret = cnss_cold_boot_cal_done_hdlr(plat_priv,
2738 							   event->data);
2739 			break;
2740 		case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
2741 			ret = cnss_bus_register_driver_hdlr(plat_priv,
2742 							    event->data);
2743 			break;
2744 		case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
2745 			ret = cnss_bus_unregister_driver_hdlr(plat_priv);
2746 			break;
2747 		case CNSS_DRIVER_EVENT_RECOVERY:
2748 			ret = cnss_driver_recovery_hdlr(plat_priv,
2749 							event->data);
2750 			break;
2751 		case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
2752 			ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
2753 			break;
2754 		case CNSS_DRIVER_EVENT_IDLE_RESTART:
2755 			set_bit(CNSS_DRIVER_IDLE_RESTART,
2756 				&plat_priv->driver_state);
2757 			fallthrough;
2758 		case CNSS_DRIVER_EVENT_POWER_UP:
2759 			ret = cnss_power_up_hdlr(plat_priv);
2760 			break;
2761 		case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
2762 			set_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2763 				&plat_priv->driver_state);
2764 			fallthrough;
2765 		case CNSS_DRIVER_EVENT_POWER_DOWN:
2766 			ret = cnss_power_down_hdlr(plat_priv);
2767 			break;
2768 		case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
2769 			ret = cnss_process_wfc_call_ind_event(plat_priv,
2770 							      event->data);
2771 			break;
2772 		case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
2773 			ret = cnss_process_twt_cfg_ind_event(plat_priv,
2774 							     event->data);
2775 			break;
2776 		case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
2777 			ret = cnss_qdss_trace_req_mem_hdlr(plat_priv);
2778 			break;
2779 		case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
2780 			ret = cnss_fw_mem_file_save_hdlr(plat_priv,
2781 							 event->data);
2782 			break;
2783 		case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
2784 			ret = cnss_qdss_trace_free_hdlr(plat_priv);
2785 			break;
2786 		case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
2787 			ret = cnss_qdss_trace_req_data_hdlr(plat_priv,
2788 							    event->data);
2789 			break;
2790 		default:
2791 			cnss_pr_err("Invalid driver event type: %d",
2792 				    event->type);
2793 			kfree(event);
2794 			spin_lock_irqsave(&plat_priv->event_lock, flags);
2795 			continue;
2796 		}
2797 
2798 		spin_lock_irqsave(&plat_priv->event_lock, flags);
2799 		if (event->sync) {
2800 			event->ret = ret;
2801 			complete(&event->complete);
2802 			continue;
2803 		}
2804 		spin_unlock_irqrestore(&plat_priv->event_lock, flags);
2805 
2806 		kfree(event);
2807 
2808 		spin_lock_irqsave(&plat_priv->event_lock, flags);
2809 	}
2810 	spin_unlock_irqrestore(&plat_priv->event_lock, flags);
2811 
2812 	cnss_pm_relax(plat_priv);
2813 }
2814 
2815 #if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
2816 int cnss_register_subsys(struct cnss_plat_data *plat_priv)
2817 {
2818 	int ret = 0;
2819 	struct cnss_subsys_info *subsys_info;
2820 
2821 	subsys_info = &plat_priv->subsys_info;
2822 
2823 	subsys_info->subsys_desc.name = plat_priv->device_name;
2824 	subsys_info->subsys_desc.owner = THIS_MODULE;
2825 	subsys_info->subsys_desc.powerup = cnss_subsys_powerup;
2826 	subsys_info->subsys_desc.shutdown = cnss_subsys_shutdown;
2827 	subsys_info->subsys_desc.ramdump = cnss_subsys_ramdump;
2828 	subsys_info->subsys_desc.crash_shutdown = cnss_subsys_crash_shutdown;
2829 	subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
2830 
2831 	subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
2832 	if (IS_ERR(subsys_info->subsys_device)) {
2833 		ret = PTR_ERR(subsys_info->subsys_device);
2834 		cnss_pr_err("Failed to register subsys, err = %d\n", ret);
2835 		goto out;
2836 	}
2837 
2838 	subsys_info->subsys_handle =
2839 		subsystem_get(subsys_info->subsys_desc.name);
2840 	if (!subsys_info->subsys_handle) {
2841 		cnss_pr_err("Failed to get subsys_handle!\n");
2842 		ret = -EINVAL;
2843 		goto unregister_subsys;
2844 	} else if (IS_ERR(subsys_info->subsys_handle)) {
2845 		ret = PTR_ERR(subsys_info->subsys_handle);
2846 		cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret);
2847 		goto unregister_subsys;
2848 	}
2849 
2850 	return 0;
2851 
2852 unregister_subsys:
2853 	subsys_unregister(subsys_info->subsys_device);
2854 out:
2855 	return ret;
2856 }
2857 
2858 void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
2859 {
2860 	struct cnss_subsys_info *subsys_info;
2861 
2862 	subsys_info = &plat_priv->subsys_info;
2863 	subsystem_put(subsys_info->subsys_handle);
2864 	subsys_unregister(subsys_info->subsys_device);
2865 }
2866 
2867 static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
2868 {
2869 	struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
2870 
2871 	return create_ramdump_device(subsys_info->subsys_desc.name,
2872 				     subsys_info->subsys_desc.dev);
2873 }
2874 
2875 static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
2876 					void *ramdump_dev)
2877 {
2878 	destroy_ramdump_device(ramdump_dev);
2879 }
2880 
2881 int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
2882 {
2883 	struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
2884 	struct ramdump_segment segment;
2885 
2886 	memset(&segment, 0, sizeof(segment));
2887 	segment.v_address = (void __iomem *)ramdump_info->ramdump_va;
2888 	segment.size = ramdump_info->ramdump_size;
2889 
2890 	return qcom_ramdump(ramdump_info->ramdump_dev, &segment, 1);
2891 }
2892 
2893 int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
2894 {
2895 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
2896 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
2897 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
2898 	struct ramdump_segment *ramdump_segs, *s;
2899 	struct cnss_dump_meta_info meta_info = {0};
2900 	int i, ret = 0;
2901 
2902 	ramdump_segs = kcalloc(dump_data->nentries + 1,
2903 			       sizeof(*ramdump_segs),
2904 			       GFP_KERNEL);
2905 	if (!ramdump_segs)
2906 		return -ENOMEM;
2907 
2908 	s = ramdump_segs + 1;
2909 	for (i = 0; i < dump_data->nentries; i++) {
2910 		if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
2911 			cnss_pr_err("Unsupported dump type: %d",
2912 				    dump_seg->type);
2913 			continue;
2914 		}
2915 
2916 		if (meta_info.entry[dump_seg->type].entry_start == 0) {
2917 			meta_info.entry[dump_seg->type].type = dump_seg->type;
2918 			meta_info.entry[dump_seg->type].entry_start = i + 1;
2919 		}
2920 		meta_info.entry[dump_seg->type].entry_num++;
2921 
2922 		s->address = dump_seg->address;
2923 		s->v_address = (void __iomem *)dump_seg->v_address;
2924 		s->size = dump_seg->size;
2925 		s++;
2926 		dump_seg++;
2927 	}
2928 
2929 	meta_info.magic = CNSS_RAMDUMP_MAGIC;
2930 	meta_info.version = CNSS_RAMDUMP_VERSION;
2931 	meta_info.chipset = plat_priv->device_id;
2932 	meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
2933 
2934 	ramdump_segs->v_address = (void __iomem *)(&meta_info);
2935 	ramdump_segs->size = sizeof(meta_info);
2936 
2937 	ret = qcom_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
2938 			       dump_data->nentries + 1);
2939 	kfree(ramdump_segs);
2940 
2941 	return ret;
2942 }
2943 #else
2944 static int cnss_panic_handler(struct notifier_block *nb, unsigned long action,
2945 			      void *data)
2946 {
2947 	struct cnss_plat_data *plat_priv =
2948 		container_of(nb, struct cnss_plat_data, panic_nb);
2949 
2950 	cnss_bus_dev_crash_shutdown(plat_priv);
2951 
2952 	return NOTIFY_DONE;
2953 }
2954 
2955 int cnss_register_subsys(struct cnss_plat_data *plat_priv)
2956 {
2957 	int ret;
2958 
2959 	if (!plat_priv)
2960 		return -ENODEV;
2961 
2962 	plat_priv->panic_nb.notifier_call = cnss_panic_handler;
2963 	ret = atomic_notifier_chain_register(&panic_notifier_list,
2964 					     &plat_priv->panic_nb);
2965 	if (ret) {
2966 		cnss_pr_err("Failed to register panic handler\n");
2967 		return -EINVAL;
2968 	}
2969 
2970 	return 0;
2971 }
2972 
2973 void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
2974 {
2975 	int ret;
2976 
2977 	ret = atomic_notifier_chain_unregister(&panic_notifier_list,
2978 					       &plat_priv->panic_nb);
2979 	if (ret)
2980 		cnss_pr_err("Failed to unregister panic handler\n");
2981 }
2982 
2983 #if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
2984 static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
2985 {
2986 	return &plat_priv->plat_dev->dev;
2987 }
2988 
2989 static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
2990 					void *ramdump_dev)
2991 {
2992 }
2993 #endif
2994 
2995 #if IS_ENABLED(CONFIG_QCOM_RAMDUMP)
2996 int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
2997 {
2998 	struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
2999 	struct qcom_dump_segment segment;
3000 	struct list_head head;
3001 
3002 	INIT_LIST_HEAD(&head);
3003 	memset(&segment, 0, sizeof(segment));
3004 	segment.va = ramdump_info->ramdump_va;
3005 	segment.size = ramdump_info->ramdump_size;
3006 	list_add(&segment.node, &head);
3007 
3008 	return qcom_dump(&head, ramdump_info->ramdump_dev);
3009 }
3010 #else
3011 int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
3012 {
3013 	return 0;
3014 }
3015 
3016 /* Using completion event inside dynamically allocated ramdump_desc
3017  * may result a race between freeing the event after setting it to
3018  * complete inside dev coredump free callback and the thread that is
3019  * waiting for completion.
3020  */
3021 DECLARE_COMPLETION(dump_done);
3022 #define TIMEOUT_SAVE_DUMP_MS 30000
3023 
3024 #define SIZEOF_ELF_STRUCT(__xhdr)					\
3025 static inline size_t sizeof_elf_##__xhdr(unsigned char class)		\
3026 {									\
3027 	if (class == ELFCLASS32)					\
3028 		return sizeof(struct elf32_##__xhdr);			\
3029 	else								\
3030 		return sizeof(struct elf64_##__xhdr);			\
3031 }
3032 
3033 SIZEOF_ELF_STRUCT(phdr)
3034 SIZEOF_ELF_STRUCT(hdr)
3035 
3036 #define set_xhdr_property(__xhdr, arg, class, member, value)		\
3037 do {									\
3038 	if (class == ELFCLASS32)					\
3039 		((struct elf32_##__xhdr *)arg)->member = value;		\
3040 	else								\
3041 		((struct elf64_##__xhdr *)arg)->member = value;		\
3042 } while (0)
3043 
3044 #define set_ehdr_property(arg, class, member, value) \
3045 	set_xhdr_property(hdr, arg, class, member, value)
3046 #define set_phdr_property(arg, class, member, value) \
3047 	set_xhdr_property(phdr, arg, class, member, value)
3048 
3049 /* These replace qcom_ramdump driver APIs called from common API
3050  * cnss_do_elf_dump() by the ones defined here.
3051  */
3052 #define qcom_dump_segment cnss_qcom_dump_segment
3053 #define qcom_elf_dump cnss_qcom_elf_dump
3054 #define dump_enabled cnss_dump_enabled
3055 
3056 struct cnss_qcom_dump_segment {
3057 	struct list_head node;
3058 	dma_addr_t da;
3059 	void *va;
3060 	size_t size;
3061 };
3062 
3063 struct cnss_qcom_ramdump_desc {
3064 	void *data;
3065 	struct completion dump_done;
3066 };
3067 
3068 static ssize_t cnss_qcom_devcd_readv(char *buffer, loff_t offset, size_t count,
3069 				     void *data, size_t datalen)
3070 {
3071 	struct cnss_qcom_ramdump_desc *desc = data;
3072 
3073 	return memory_read_from_buffer(buffer, count, &offset, desc->data,
3074 				       datalen);
3075 }
3076 
3077 static void cnss_qcom_devcd_freev(void *data)
3078 {
3079 	struct cnss_qcom_ramdump_desc *desc = data;
3080 
3081 	cnss_pr_dbg("Free dump data for dev coredump\n");
3082 
3083 	complete(&dump_done);
3084 	vfree(desc->data);
3085 	kfree(desc);
3086 }
3087 
3088 static int cnss_qcom_devcd_dump(struct device *dev, void *data, size_t datalen,
3089 				gfp_t gfp)
3090 {
3091 	struct cnss_qcom_ramdump_desc *desc;
3092 	unsigned int timeout = TIMEOUT_SAVE_DUMP_MS;
3093 	int ret;
3094 
3095 	desc = kmalloc(sizeof(*desc), GFP_KERNEL);
3096 	if (!desc)
3097 		return -ENOMEM;
3098 
3099 	desc->data = data;
3100 	reinit_completion(&dump_done);
3101 
3102 	dev_coredumpm(dev, NULL, desc, datalen, gfp,
3103 		      cnss_qcom_devcd_readv, cnss_qcom_devcd_freev);
3104 
3105 	ret = wait_for_completion_timeout(&dump_done,
3106 					  msecs_to_jiffies(timeout));
3107 	if (!ret)
3108 		cnss_pr_err("Timeout waiting (%dms) for saving dump to file system\n",
3109 			    timeout);
3110 
3111 	return ret ? 0 : -ETIMEDOUT;
3112 }
3113 
3114 /* Since the elf32 and elf64 identification is identical apart from
3115  * the class, use elf32 by default.
3116  */
3117 static void init_elf_identification(struct elf32_hdr *ehdr, unsigned char class)
3118 {
3119 	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
3120 	ehdr->e_ident[EI_CLASS] = class;
3121 	ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
3122 	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
3123 	ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
3124 }
3125 
3126 int cnss_qcom_elf_dump(struct list_head *segs, struct device *dev,
3127 		       unsigned char class)
3128 {
3129 	struct cnss_qcom_dump_segment *segment;
3130 	void *phdr, *ehdr;
3131 	size_t data_size, offset;
3132 	int phnum = 0;
3133 	void *data;
3134 	void __iomem *ptr;
3135 
3136 	if (!segs || list_empty(segs))
3137 		return -EINVAL;
3138 
3139 	data_size = sizeof_elf_hdr(class);
3140 	list_for_each_entry(segment, segs, node) {
3141 		data_size += sizeof_elf_phdr(class) + segment->size;
3142 		phnum++;
3143 	}
3144 
3145 	data = vmalloc(data_size);
3146 	if (!data)
3147 		return -ENOMEM;
3148 
3149 	cnss_pr_dbg("Creating ELF file with size %d\n", data_size);
3150 
3151 	ehdr = data;
3152 	memset(ehdr, 0, sizeof_elf_hdr(class));
3153 	init_elf_identification(ehdr, class);
3154 	set_ehdr_property(ehdr, class, e_type, ET_CORE);
3155 	set_ehdr_property(ehdr, class, e_machine, EM_NONE);
3156 	set_ehdr_property(ehdr, class, e_version, EV_CURRENT);
3157 	set_ehdr_property(ehdr, class, e_phoff, sizeof_elf_hdr(class));
3158 	set_ehdr_property(ehdr, class, e_ehsize, sizeof_elf_hdr(class));
3159 	set_ehdr_property(ehdr, class, e_phentsize, sizeof_elf_phdr(class));
3160 	set_ehdr_property(ehdr, class, e_phnum, phnum);
3161 
3162 	phdr = data + sizeof_elf_hdr(class);
3163 	offset = sizeof_elf_hdr(class) + sizeof_elf_phdr(class) * phnum;
3164 	list_for_each_entry(segment, segs, node) {
3165 		memset(phdr, 0, sizeof_elf_phdr(class));
3166 		set_phdr_property(phdr, class, p_type, PT_LOAD);
3167 		set_phdr_property(phdr, class, p_offset, offset);
3168 		set_phdr_property(phdr, class, p_vaddr, segment->da);
3169 		set_phdr_property(phdr, class, p_paddr, segment->da);
3170 		set_phdr_property(phdr, class, p_filesz, segment->size);
3171 		set_phdr_property(phdr, class, p_memsz, segment->size);
3172 		set_phdr_property(phdr, class, p_flags, PF_R | PF_W | PF_X);
3173 		set_phdr_property(phdr, class, p_align, 0);
3174 
3175 		if (segment->va) {
3176 			memcpy(data + offset, segment->va, segment->size);
3177 		} else {
3178 			ptr = devm_ioremap(dev, segment->da, segment->size);
3179 			if (!ptr) {
3180 				cnss_pr_err("Invalid coredump segment (%pad, %zu)\n",
3181 					    &segment->da, segment->size);
3182 				memset(data + offset, 0xff, segment->size);
3183 			} else {
3184 				memcpy_fromio(data + offset, ptr,
3185 					      segment->size);
3186 			}
3187 		}
3188 
3189 		offset += segment->size;
3190 		phdr += sizeof_elf_phdr(class);
3191 	}
3192 
3193 	return cnss_qcom_devcd_dump(dev, data, data_size, GFP_KERNEL);
3194 }
3195 
3196 /* Saving dump to file system is always needed in this case. */
3197 static bool cnss_dump_enabled(void)
3198 {
3199 	return true;
3200 }
3201 #endif /* CONFIG_QCOM_RAMDUMP */
3202 
3203 int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
3204 {
3205 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3206 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
3207 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3208 	struct qcom_dump_segment *seg;
3209 	struct cnss_dump_meta_info meta_info = {0};
3210 	struct list_head head;
3211 	int i, ret = 0;
3212 
3213 	if (!dump_enabled()) {
3214 		cnss_pr_info("Dump collection is not enabled\n");
3215 		return ret;
3216 	}
3217 
3218 	INIT_LIST_HEAD(&head);
3219 	for (i = 0; i < dump_data->nentries; i++) {
3220 		if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
3221 			cnss_pr_err("Unsupported dump type: %d",
3222 				    dump_seg->type);
3223 			continue;
3224 		}
3225 
3226 		seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3227 		if (!seg) {
3228 			cnss_pr_err("%s: Failed to allocate mem for seg %d\n",
3229 				    __func__, i);
3230 			continue;
3231 		}
3232 
3233 		if (meta_info.entry[dump_seg->type].entry_start == 0) {
3234 			meta_info.entry[dump_seg->type].type = dump_seg->type;
3235 			meta_info.entry[dump_seg->type].entry_start = i + 1;
3236 		}
3237 		meta_info.entry[dump_seg->type].entry_num++;
3238 		seg->da = dump_seg->address;
3239 		seg->va = dump_seg->v_address;
3240 		seg->size = dump_seg->size;
3241 		list_add_tail(&seg->node, &head);
3242 		dump_seg++;
3243 	}
3244 
3245 	seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3246 	if (!seg) {
3247 		cnss_pr_err("%s: Failed to allocate mem for elf ramdump seg\n",
3248 			    __func__);
3249 		goto skip_elf_dump;
3250 	}
3251 
3252 	meta_info.magic = CNSS_RAMDUMP_MAGIC;
3253 	meta_info.version = CNSS_RAMDUMP_VERSION;
3254 	meta_info.chipset = plat_priv->device_id;
3255 	meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
3256 	seg->va = &meta_info;
3257 	seg->size = sizeof(meta_info);
3258 	list_add(&seg->node, &head);
3259 
3260 	ret = qcom_elf_dump(&head, info_v2->ramdump_dev, ELF_CLASS);
3261 
3262 skip_elf_dump:
3263 	while (!list_empty(&head)) {
3264 		seg = list_first_entry(&head, struct qcom_dump_segment, node);
3265 		list_del(&seg->node);
3266 		kfree(seg);
3267 	}
3268 
3269 	return ret;
3270 }
3271 
3272 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
3273 /**
3274  * cnss_host_ramdump_dev_release() - callback function for device release
3275  * @dev: device to be released
3276  *
3277  * Return: None
3278  */
3279 static void cnss_host_ramdump_dev_release(struct device *dev)
3280 {
3281 	cnss_pr_dbg("free host ramdump device\n");
3282 	kfree(dev);
3283 }
3284 
3285 int cnss_do_host_ramdump(struct cnss_plat_data *plat_priv,
3286 			 struct cnss_ssr_driver_dump_entry *ssr_entry,
3287 			 size_t num_entries_loaded)
3288 {
3289 	struct qcom_dump_segment *seg;
3290 	struct cnss_host_dump_meta_info meta_info = {0};
3291 	struct list_head head;
3292 	int dev_ret = 0;
3293 	struct device *new_device;
3294 	static const char * const wlan_str[] = {
3295 		[CNSS_HOST_WLAN_LOGS] = "wlan_logs",
3296 		[CNSS_HOST_HTC_CREDIT] = "htc_credit",
3297 		[CNSS_HOST_WMI_TX_CMP] = "wmi_tx_cmp",
3298 		[CNSS_HOST_WMI_COMMAND_LOG] = "wmi_command_log",
3299 		[CNSS_HOST_WMI_EVENT_LOG] = "wmi_event_log",
3300 		[CNSS_HOST_WMI_RX_EVENT] = "wmi_rx_event",
3301 		[CNSS_HOST_HAL_SOC] = "hal_soc",
3302 		[CNSS_HOST_GWLAN_LOGGING] = "gwlan_logging",
3303 		[CNSS_HOST_WMI_DEBUG_LOG_INFO] = "wmi_debug_log_info",
3304 		[CNSS_HOST_HTC_CREDIT_IDX] = "htc_credit_history_idx",
3305 		[CNSS_HOST_HTC_CREDIT_LEN] = "htc_credit_history_length",
3306 		[CNSS_HOST_WMI_TX_CMP_IDX] = "wmi_tx_cmp_idx",
3307 		[CNSS_HOST_WMI_COMMAND_LOG_IDX] = "wmi_command_log_idx",
3308 		[CNSS_HOST_WMI_EVENT_LOG_IDX] = "wmi_event_log_idx",
3309 		[CNSS_HOST_WMI_RX_EVENT_IDX] = "wmi_rx_event_idx",
3310 		[CNSS_HOST_HIF_CE_DESC_HISTORY_BUFF] = "hif_ce_desc_history_buff",
3311 		[CNSS_HOST_HANG_EVENT_DATA] = "hang_event_data",
3312 		[CNSS_HOST_CE_DESC_HIST] = "hif_ce_desc_hist",
3313 		[CNSS_HOST_CE_COUNT_MAX] = "hif_ce_count_max",
3314 		[CNSS_HOST_CE_HISTORY_MAX] = "hif_ce_history_max",
3315 		[CNSS_HOST_ONLY_FOR_CRIT_CE] = "hif_ce_only_for_crit",
3316 		[CNSS_HOST_HIF_EVENT_HISTORY] = "hif_event_history",
3317 		[CNSS_HOST_HIF_EVENT_HIST_MAX] = "hif_event_hist_max",
3318 		[CNSS_HOST_DP_WBM_DESC_REL] = "wbm_desc_rel_ring",
3319 		[CNSS_HOST_DP_WBM_DESC_REL_HANDLE] = "wbm_desc_rel_ring_handle",
3320 		[CNSS_HOST_DP_TCL_CMD] = "tcl_cmd_ring",
3321 		[CNSS_HOST_DP_TCL_CMD_HANDLE] = "tcl_cmd_ring_handle",
3322 		[CNSS_HOST_DP_TCL_STATUS] = "tcl_status_ring",
3323 		[CNSS_HOST_DP_TCL_STATUS_HANDLE] = "tcl_status_ring_handle",
3324 		[CNSS_HOST_DP_REO_REINJ] = "reo_reinject_ring",
3325 		[CNSS_HOST_DP_REO_REINJ_HANDLE] = "reo_reinject_ring_handle",
3326 		[CNSS_HOST_DP_RX_REL] = "rx_rel_ring",
3327 		[CNSS_HOST_DP_RX_REL_HANDLE] = "rx_rel_ring_handle",
3328 		[CNSS_HOST_DP_REO_EXP] = "reo_exception_ring",
3329 		[CNSS_HOST_DP_REO_EXP_HANDLE] = "reo_exception_ring_handle",
3330 		[CNSS_HOST_DP_REO_CMD] = "reo_cmd_ring",
3331 		[CNSS_HOST_DP_REO_CMD_HANDLE] = "reo_cmd_ring_handle",
3332 		[CNSS_HOST_DP_REO_STATUS] = "reo_status_ring",
3333 		[CNSS_HOST_DP_REO_STATUS_HANDLE] = "reo_status_ring_handle",
3334 		[CNSS_HOST_DP_TCL_DATA_0] = "tcl_data_ring_0",
3335 		[CNSS_HOST_DP_TCL_DATA_0_HANDLE] = "tcl_data_ring_0_handle",
3336 		[CNSS_HOST_DP_TX_COMP_0] = "tx_comp_ring_0",
3337 		[CNSS_HOST_DP_TX_COMP_0_HANDLE] = "tx_comp_ring_0_handle",
3338 		[CNSS_HOST_DP_TCL_DATA_1] = "tcl_data_ring_1",
3339 		[CNSS_HOST_DP_TCL_DATA_1_HANDLE] = "tcl_data_ring_1_handle",
3340 		[CNSS_HOST_DP_TX_COMP_1] = "tx_comp_ring_1",
3341 		[CNSS_HOST_DP_TX_COMP_1_HANDLE] = "tx_comp_ring_1_handle",
3342 		[CNSS_HOST_DP_TCL_DATA_2] = "tcl_data_ring_2",
3343 		[CNSS_HOST_DP_TCL_DATA_2_HANDLE] = "tcl_data_ring_2_handle",
3344 		[CNSS_HOST_DP_TX_COMP_2] = "tx_comp_ring_2",
3345 		[CNSS_HOST_DP_TX_COMP_2_HANDLE] = "tx_comp_ring_2_handle",
3346 		[CNSS_HOST_DP_REO_DST_0] = "reo_dest_ring_0",
3347 		[CNSS_HOST_DP_REO_DST_0_HANDLE] = "reo_dest_ring_0_handle",
3348 		[CNSS_HOST_DP_REO_DST_1] = "reo_dest_ring_1",
3349 		[CNSS_HOST_DP_REO_DST_1_HANDLE] = "reo_dest_ring_1_handle",
3350 		[CNSS_HOST_DP_REO_DST_2] = "reo_dest_ring_2",
3351 		[CNSS_HOST_DP_REO_DST_2_HANDLE] = "reo_dest_ring_2_handle",
3352 		[CNSS_HOST_DP_REO_DST_3] = "reo_dest_ring_3",
3353 		[CNSS_HOST_DP_REO_DST_3_HANDLE] = "reo_dest_ring_3_handle",
3354 		[CNSS_HOST_DP_REO_DST_4] = "reo_dest_ring_4",
3355 		[CNSS_HOST_DP_REO_DST_4_HANDLE] = "reo_dest_ring_4_handle",
3356 		[CNSS_HOST_DP_REO_DST_5] = "reo_dest_ring_5",
3357 		[CNSS_HOST_DP_REO_DST_5_HANDLE] = "reo_dest_ring_5_handle",
3358 		[CNSS_HOST_DP_REO_DST_6] = "reo_dest_ring_6",
3359 		[CNSS_HOST_DP_REO_DST_6_HANDLE] = "reo_dest_ring_6_handle",
3360 		[CNSS_HOST_DP_REO_DST_7] = "reo_dest_ring_7",
3361 		[CNSS_HOST_DP_REO_DST_7_HANDLE] = "reo_dest_ring_7_handle",
3362 		[CNSS_HOST_DP_PDEV_0] = "dp_pdev_0",
3363 		[CNSS_HOST_DP_WLAN_CFG_CTX] = "wlan_cfg_ctx",
3364 		[CNSS_HOST_DP_SOC] = "dp_soc",
3365 		[CNSS_HOST_HAL_RX_FST] = "hal_rx_fst",
3366 		[CNSS_HOST_DP_FISA] = "dp_fisa",
3367 		[CNSS_HOST_DP_FISA_HW_FSE_TABLE] = "dp_fisa_hw_fse_table",
3368 		[CNSS_HOST_DP_FISA_SW_FSE_TABLE] = "dp_fisa_sw_fse_table",
3369 		[CNSS_HOST_HIF] = "hif",
3370 		[CNSS_HOST_QDF_NBUF_HIST] = "qdf_nbuf_history",
3371 		[CNSS_HOST_TCL_WBM_MAP] = "tcl_wbm_map_array",
3372 		[CNSS_HOST_RX_MAC_BUF_RING_0] = "rx_mac_buf_ring_0",
3373 		[CNSS_HOST_RX_MAC_BUF_RING_0_HANDLE] = "rx_mac_buf_ring_0_handle",
3374 		[CNSS_HOST_RX_MAC_BUF_RING_1] = "rx_mac_buf_ring_1",
3375 		[CNSS_HOST_RX_MAC_BUF_RING_1_HANDLE] = "rx_mac_buf_ring_1_handle",
3376 		[CNSS_HOST_RX_REFILL_0] = "rx_refill_buf_ring_0",
3377 		[CNSS_HOST_RX_REFILL_0_HANDLE] = "rx_refill_buf_ring_0_handle",
3378 		[CNSS_HOST_CE_0] = "ce_0",
3379 		[CNSS_HOST_CE_0_SRC_RING] = "ce_0_src_ring",
3380 		[CNSS_HOST_CE_0_SRC_RING_CTX] = "ce_0_src_ring_ctx",
3381 		[CNSS_HOST_CE_1] = "ce_1",
3382 		[CNSS_HOST_CE_1_STATUS_RING] = "ce_1_status_ring",
3383 		[CNSS_HOST_CE_1_STATUS_RING_CTX] = "ce_1_status_ring_ctx",
3384 		[CNSS_HOST_CE_1_DEST_RING] = "ce_1_dest_ring",
3385 		[CNSS_HOST_CE_1_DEST_RING_CTX] = "ce_1_dest_ring_ctx",
3386 		[CNSS_HOST_CE_2] = "ce_2",
3387 		[CNSS_HOST_CE_2_STATUS_RING] = "ce_2_status_ring",
3388 		[CNSS_HOST_CE_2_STATUS_RING_CTX] = "ce_2_status_ring_ctx",
3389 		[CNSS_HOST_CE_2_DEST_RING] = "ce_2_dest_ring",
3390 		[CNSS_HOST_CE_2_DEST_RING_CTX] = "ce_2_dest_ring_ctx",
3391 		[CNSS_HOST_CE_3] = "ce_3",
3392 		[CNSS_HOST_CE_3_SRC_RING] = "ce_3_src_ring",
3393 		[CNSS_HOST_CE_3_SRC_RING_CTX] = "ce_3_src_ring_ctx",
3394 		[CNSS_HOST_CE_4] = "ce_4",
3395 		[CNSS_HOST_CE_4_SRC_RING] = "ce_4_src_ring",
3396 		[CNSS_HOST_CE_4_SRC_RING_CTX] = "ce_4_src_ring_ctx",
3397 		[CNSS_HOST_CE_5] = "ce_5",
3398 		[CNSS_HOST_CE_6] = "ce_6",
3399 		[CNSS_HOST_CE_7] = "ce_7",
3400 		[CNSS_HOST_CE_7_STATUS_RING] = "ce_7_status_ring",
3401 		[CNSS_HOST_CE_7_STATUS_RING_CTX] = "ce_7_status_ring_ctx",
3402 		[CNSS_HOST_CE_7_DEST_RING] = "ce_7_dest_ring",
3403 		[CNSS_HOST_CE_7_DEST_RING_CTX] = "ce_7_dest_ring_ctx",
3404 		[CNSS_HOST_CE_8] = "ce_8",
3405 		[CNSS_HOST_DP_TCL_DATA_3] = "tcl_data_ring_3",
3406 		[CNSS_HOST_DP_TCL_DATA_3_HANDLE] = "tcl_data_ring_3_handle",
3407 		[CNSS_HOST_DP_TX_COMP_3] = "tx_comp_ring_3",
3408 		[CNSS_HOST_DP_TX_COMP_3_HANDLE] = "tx_comp_ring_3_handle"
3409 	};
3410 	int i;
3411 	int ret = 0;
3412 	enum cnss_host_dump_type j;
3413 
3414 	if (!dump_enabled()) {
3415 		cnss_pr_info("Dump collection is not enabled\n");
3416 		return ret;
3417 	}
3418 
3419 	new_device = kcalloc(1, sizeof(*new_device), GFP_KERNEL);
3420 	if (!new_device) {
3421 		cnss_pr_err("Failed to alloc device mem\n");
3422 		return -ENOMEM;
3423 	}
3424 
3425 	new_device->release = cnss_host_ramdump_dev_release;
3426 	device_initialize(new_device);
3427 	dev_set_name(new_device, "wlan_driver");
3428 	dev_ret = device_add(new_device);
3429 	if (dev_ret) {
3430 		cnss_pr_err("Failed to add new device\n");
3431 		goto put_device;
3432 	}
3433 
3434 	INIT_LIST_HEAD(&head);
3435 	for (i = 0; i < num_entries_loaded; i++) {
3436 		/* If region name registered by driver is not present in
3437 		 * wlan_str. type for that entry will not be set, but entry will
3438 		 * be added. Which will result in entry type being 0. Currently
3439 		 * entry type 0 is for wlan_logs, which will result in parsing
3440 		 * issue for wlan_logs as parsing is done based upon type field.
3441 		 * So initialize type with -1(Invalid) to avoid such issues.
3442 		 */
3443 		meta_info.entry[i].type = -1;
3444 		seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3445 		if (!seg) {
3446 			cnss_pr_err("Failed to alloc seg entry %d\n", i);
3447 			continue;
3448 		}
3449 
3450 		seg->va = ssr_entry[i].buffer_pointer;
3451 		seg->da = (dma_addr_t)ssr_entry[i].buffer_pointer;
3452 		seg->size = ssr_entry[i].buffer_size;
3453 
3454 		for (j = 0; j < CNSS_HOST_DUMP_TYPE_MAX; j++) {
3455 			if (strcmp(ssr_entry[i].region_name, wlan_str[j]) == 0) {
3456 				meta_info.entry[i].type = j;
3457 			}
3458 		}
3459 		meta_info.entry[i].entry_start = i + 1;
3460 		meta_info.entry[i].entry_num++;
3461 
3462 		list_add_tail(&seg->node, &head);
3463 	}
3464 
3465 	seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3466 
3467 	if (!seg) {
3468 		cnss_pr_err("%s: Failed to allocate mem for host dump seg\n",
3469 			    __func__);
3470 		goto skip_host_dump;
3471 	}
3472 
3473 	meta_info.magic = CNSS_RAMDUMP_MAGIC;
3474 	meta_info.version = CNSS_RAMDUMP_VERSION;
3475 	meta_info.chipset = plat_priv->device_id;
3476 	meta_info.total_entries = num_entries_loaded;
3477 	seg->va = &meta_info;
3478 	seg->da = (dma_addr_t)&meta_info;
3479 	seg->size = sizeof(meta_info);
3480 	list_add(&seg->node, &head);
3481 
3482 	ret = qcom_elf_dump(&head, new_device, ELF_CLASS);
3483 
3484 skip_host_dump:
3485 	while (!list_empty(&head)) {
3486 		seg = list_first_entry(&head, struct qcom_dump_segment, node);
3487 		list_del(&seg->node);
3488 		kfree(seg);
3489 	}
3490 	device_del(new_device);
3491 put_device:
3492 	put_device(new_device);
3493 	cnss_pr_dbg("host ramdump result %d\n", ret);
3494 	return ret;
3495 }
3496 #endif
3497 #endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
3498 
3499 #if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
3500 static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
3501 {
3502 	struct cnss_ramdump_info *ramdump_info;
3503 	struct msm_dump_entry dump_entry;
3504 
3505 	ramdump_info = &plat_priv->ramdump_info;
3506 	ramdump_info->dump_data.addr = ramdump_info->ramdump_pa;
3507 	ramdump_info->dump_data.len = ramdump_info->ramdump_size;
3508 	ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER;
3509 	ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
3510 	strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME,
3511 		sizeof(ramdump_info->dump_data.name));
3512 	dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3513 	dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
3514 
3515 	return msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
3516 						&dump_entry);
3517 }
3518 
3519 static int cnss_register_ramdump_v1(struct cnss_plat_data *plat_priv)
3520 {
3521 	int ret = 0;
3522 	struct device *dev;
3523 	struct cnss_ramdump_info *ramdump_info;
3524 	u32 ramdump_size = 0;
3525 
3526 	dev = &plat_priv->plat_dev->dev;
3527 	ramdump_info = &plat_priv->ramdump_info;
3528 
3529 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG) {
3530 		/* dt type: legacy or converged */
3531 		ret = of_property_read_u32(dev->of_node,
3532 					   "qcom,wlan-ramdump-dynamic",
3533 					   &ramdump_size);
3534 	} else {
3535 		ret = of_property_read_u32(plat_priv->dev_node,
3536 					   "qcom,wlan-ramdump-dynamic",
3537 					   &ramdump_size);
3538 	}
3539 	if (ret == 0) {
3540 		ramdump_info->ramdump_va =
3541 			dma_alloc_coherent(dev, ramdump_size,
3542 					   &ramdump_info->ramdump_pa,
3543 					   GFP_KERNEL);
3544 
3545 		if (ramdump_info->ramdump_va)
3546 			ramdump_info->ramdump_size = ramdump_size;
3547 	}
3548 
3549 	cnss_pr_dbg("ramdump va: %pK, pa: %pa\n",
3550 		    ramdump_info->ramdump_va, &ramdump_info->ramdump_pa);
3551 
3552 	if (ramdump_info->ramdump_size == 0) {
3553 		cnss_pr_info("Ramdump will not be collected");
3554 		goto out;
3555 	}
3556 
3557 	ret = cnss_init_dump_entry(plat_priv);
3558 	if (ret) {
3559 		cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
3560 		goto free_ramdump;
3561 	}
3562 
3563 	ramdump_info->ramdump_dev = cnss_create_ramdump_device(plat_priv);
3564 	if (!ramdump_info->ramdump_dev) {
3565 		cnss_pr_err("Failed to create ramdump device!");
3566 		ret = -ENOMEM;
3567 		goto free_ramdump;
3568 	}
3569 
3570 	return 0;
3571 free_ramdump:
3572 	dma_free_coherent(dev, ramdump_info->ramdump_size,
3573 			  ramdump_info->ramdump_va, ramdump_info->ramdump_pa);
3574 out:
3575 	return ret;
3576 }
3577 
3578 static void cnss_unregister_ramdump_v1(struct cnss_plat_data *plat_priv)
3579 {
3580 	struct device *dev;
3581 	struct cnss_ramdump_info *ramdump_info;
3582 
3583 	dev = &plat_priv->plat_dev->dev;
3584 	ramdump_info = &plat_priv->ramdump_info;
3585 
3586 	if (ramdump_info->ramdump_dev)
3587 		cnss_destroy_ramdump_device(plat_priv,
3588 					    ramdump_info->ramdump_dev);
3589 
3590 	if (ramdump_info->ramdump_va)
3591 		dma_free_coherent(dev, ramdump_info->ramdump_size,
3592 				  ramdump_info->ramdump_va,
3593 				  ramdump_info->ramdump_pa);
3594 }
3595 
3596 /**
3597  * cnss_ignore_dump_data_reg_fail - Ignore Ramdump table register failure
3598  * @ret: Error returned by msm_dump_data_register_nominidump
3599  *
3600  * For Lahaina GKI boot, we dont have support for mem dump feature. So
3601  * ignore failure.
3602  *
3603  * Return: Same given error code if mem dump feature enabled, 0 otherwise
3604  */
3605 static int cnss_ignore_dump_data_reg_fail(int ret)
3606 {
3607 	return ret;
3608 }
3609 
3610 static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
3611 {
3612 	int ret = 0;
3613 	struct cnss_ramdump_info_v2 *info_v2;
3614 	struct cnss_dump_data *dump_data;
3615 	struct msm_dump_entry dump_entry;
3616 	struct device *dev = &plat_priv->plat_dev->dev;
3617 	u32 ramdump_size = 0;
3618 
3619 	info_v2 = &plat_priv->ramdump_info_v2;
3620 	dump_data = &info_v2->dump_data;
3621 
3622 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG) {
3623 		/* dt type: legacy or converged */
3624 		ret = of_property_read_u32(dev->of_node,
3625 					   "qcom,wlan-ramdump-dynamic",
3626 					   &ramdump_size);
3627 	} else {
3628 		ret = of_property_read_u32(plat_priv->dev_node,
3629 					   "qcom,wlan-ramdump-dynamic",
3630 					   &ramdump_size);
3631 	}
3632 	if (ret == 0)
3633 		info_v2->ramdump_size = ramdump_size;
3634 
3635 	cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
3636 
3637 	info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
3638 	if (!info_v2->dump_data_vaddr)
3639 		return -ENOMEM;
3640 
3641 	dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
3642 	dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
3643 	dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
3644 	dump_data->seg_version = CNSS_DUMP_SEG_VER;
3645 	strlcpy(dump_data->name, CNSS_DUMP_NAME,
3646 		sizeof(dump_data->name));
3647 	dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3648 	dump_entry.addr = virt_to_phys(dump_data);
3649 
3650 	ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
3651 						&dump_entry);
3652 	if (ret) {
3653 		ret = cnss_ignore_dump_data_reg_fail(ret);
3654 		cnss_pr_err("Failed to setup dump table, %s (%d)\n",
3655 			    ret ? "Error" : "Ignoring", ret);
3656 		goto free_ramdump;
3657 	}
3658 
3659 	info_v2->ramdump_dev = cnss_create_ramdump_device(plat_priv);
3660 	if (!info_v2->ramdump_dev) {
3661 		cnss_pr_err("Failed to create ramdump device!\n");
3662 		ret = -ENOMEM;
3663 		goto free_ramdump;
3664 	}
3665 
3666 	return 0;
3667 
3668 free_ramdump:
3669 	kfree(info_v2->dump_data_vaddr);
3670 	info_v2->dump_data_vaddr = NULL;
3671 	return ret;
3672 }
3673 
3674 static void cnss_unregister_ramdump_v2(struct cnss_plat_data *plat_priv)
3675 {
3676 	struct cnss_ramdump_info_v2 *info_v2;
3677 
3678 	info_v2 = &plat_priv->ramdump_info_v2;
3679 
3680 	if (info_v2->ramdump_dev)
3681 		cnss_destroy_ramdump_device(plat_priv, info_v2->ramdump_dev);
3682 
3683 	kfree(info_v2->dump_data_vaddr);
3684 	info_v2->dump_data_vaddr = NULL;
3685 	info_v2->dump_data_valid = false;
3686 }
3687 
3688 int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
3689 {
3690 	int ret = 0;
3691 
3692 	switch (plat_priv->device_id) {
3693 	case QCA6174_DEVICE_ID:
3694 		ret = cnss_register_ramdump_v1(plat_priv);
3695 		break;
3696 	case QCA6290_DEVICE_ID:
3697 	case QCA6390_DEVICE_ID:
3698 	case QCN7605_DEVICE_ID:
3699 	case QCA6490_DEVICE_ID:
3700 	case KIWI_DEVICE_ID:
3701 	case MANGO_DEVICE_ID:
3702 	case PEACH_DEVICE_ID:
3703 		ret = cnss_register_ramdump_v2(plat_priv);
3704 		break;
3705 	default:
3706 		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
3707 		ret = -ENODEV;
3708 		break;
3709 	}
3710 	return ret;
3711 }
3712 
3713 void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
3714 {
3715 	switch (plat_priv->device_id) {
3716 	case QCA6174_DEVICE_ID:
3717 		cnss_unregister_ramdump_v1(plat_priv);
3718 		break;
3719 	case QCA6290_DEVICE_ID:
3720 	case QCA6390_DEVICE_ID:
3721 	case QCN7605_DEVICE_ID:
3722 	case QCA6490_DEVICE_ID:
3723 	case KIWI_DEVICE_ID:
3724 	case MANGO_DEVICE_ID:
3725 	case PEACH_DEVICE_ID:
3726 		cnss_unregister_ramdump_v2(plat_priv);
3727 		break;
3728 	default:
3729 		cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
3730 		break;
3731 	}
3732 }
3733 #else
3734 int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
3735 {
3736 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3737 	struct cnss_dump_data *dump_data = dump_data = &info_v2->dump_data;
3738 	struct device *dev = &plat_priv->plat_dev->dev;
3739 	u32 ramdump_size = 0;
3740 
3741 	if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
3742 				 &ramdump_size) == 0)
3743 		info_v2->ramdump_size = ramdump_size;
3744 
3745 	cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
3746 
3747 	info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
3748 	if (!info_v2->dump_data_vaddr)
3749 		return -ENOMEM;
3750 
3751 	dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
3752 	dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
3753 	dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
3754 	dump_data->seg_version = CNSS_DUMP_SEG_VER;
3755 	strlcpy(dump_data->name, CNSS_DUMP_NAME,
3756 		sizeof(dump_data->name));
3757 
3758 	info_v2->ramdump_dev = dev;
3759 
3760 	return 0;
3761 }
3762 
3763 void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
3764 {
3765 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3766 
3767 	info_v2->ramdump_dev = NULL;
3768 	kfree(info_v2->dump_data_vaddr);
3769 	info_v2->dump_data_vaddr = NULL;
3770 	info_v2->dump_data_valid = false;
3771 }
3772 #endif /* CONFIG_QCOM_MEMORY_DUMP_V2 */
3773 
3774 #if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
3775 int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
3776 		  phys_addr_t *pa, unsigned long attrs)
3777 {
3778 	struct sg_table sgt;
3779 	int ret;
3780 
3781 	ret = dma_get_sgtable_attrs(dev, &sgt, va, dma, size, attrs);
3782 	if (ret) {
3783 		cnss_pr_err("Failed to get sgtable for va: 0x%pK, dma: %pa, size: 0x%zx, attrs: 0x%x\n",
3784 			    va, &dma, size, attrs);
3785 		return -EINVAL;
3786 	}
3787 
3788 	*pa = page_to_phys(sg_page(sgt.sgl));
3789 	sg_free_table(&sgt);
3790 
3791 	return 0;
3792 }
3793 
3794 int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
3795 			     enum cnss_fw_dump_type type, int seg_no,
3796 			     void *va, phys_addr_t pa, size_t size)
3797 {
3798 	struct md_region md_entry;
3799 	int ret;
3800 
3801 	switch (type) {
3802 	case CNSS_FW_IMAGE:
3803 		snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
3804 			 seg_no);
3805 		break;
3806 	case CNSS_FW_RDDM:
3807 		snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
3808 			 seg_no);
3809 		break;
3810 	case CNSS_FW_REMOTE_HEAP:
3811 		snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
3812 			 seg_no);
3813 		break;
3814 	default:
3815 		cnss_pr_err("Unknown dump type ID: %d\n", type);
3816 		return -EINVAL;
3817 	}
3818 
3819 	md_entry.phys_addr = pa;
3820 	md_entry.virt_addr = (uintptr_t)va;
3821 	md_entry.size = size;
3822 	md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3823 
3824 	cnss_pr_dbg("Mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
3825 		    md_entry.name, va, &pa, size);
3826 
3827 	ret = msm_minidump_add_region(&md_entry);
3828 	if (ret < 0)
3829 		cnss_pr_err("Failed to add mini dump region, err = %d\n", ret);
3830 
3831 	return ret;
3832 }
3833 
3834 int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
3835 				enum cnss_fw_dump_type type, int seg_no,
3836 				void *va, phys_addr_t pa, size_t size)
3837 {
3838 	struct md_region md_entry;
3839 	int ret;
3840 
3841 	switch (type) {
3842 	case CNSS_FW_IMAGE:
3843 		snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
3844 			 seg_no);
3845 		break;
3846 	case CNSS_FW_RDDM:
3847 		snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
3848 			 seg_no);
3849 		break;
3850 	case CNSS_FW_REMOTE_HEAP:
3851 		snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
3852 			 seg_no);
3853 		break;
3854 	default:
3855 		cnss_pr_err("Unknown dump type ID: %d\n", type);
3856 		return -EINVAL;
3857 	}
3858 
3859 	md_entry.phys_addr = pa;
3860 	md_entry.virt_addr = (uintptr_t)va;
3861 	md_entry.size = size;
3862 	md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3863 
3864 	cnss_pr_vdbg("Remove mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
3865 		     md_entry.name, va, &pa, size);
3866 
3867 	ret = msm_minidump_remove_region(&md_entry);
3868 	if (ret)
3869 		cnss_pr_err("Failed to remove mini dump region, err = %d\n",
3870 			    ret);
3871 
3872 	return ret;
3873 }
3874 #else
3875 int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
3876 		  phys_addr_t *pa, unsigned long attrs)
3877 {
3878 	return 0;
3879 }
3880 
3881 int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
3882 			     enum cnss_fw_dump_type type, int seg_no,
3883 			     void *va, phys_addr_t pa, size_t size)
3884 {
3885 	return 0;
3886 }
3887 
3888 int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
3889 				enum cnss_fw_dump_type type, int seg_no,
3890 				void *va, phys_addr_t pa, size_t size)
3891 {
3892 	return 0;
3893 }
3894 #endif /* CONFIG_QCOM_MINIDUMP */
3895 
3896 int cnss_request_firmware_direct(struct cnss_plat_data *plat_priv,
3897 				 const struct firmware **fw_entry,
3898 				 const char *filename)
3899 {
3900 	if (IS_ENABLED(CONFIG_CNSS_REQ_FW_DIRECT))
3901 		return request_firmware_direct(fw_entry, filename,
3902 					       &plat_priv->plat_dev->dev);
3903 	else
3904 		return firmware_request_nowarn(fw_entry, filename,
3905 					       &plat_priv->plat_dev->dev);
3906 }
3907 
3908 #if IS_ENABLED(CONFIG_INTERCONNECT)
3909 /**
3910  * cnss_register_bus_scale() - Setup interconnect voting data
3911  * @plat_priv: Platform data structure
3912  *
3913  * For different interconnect path configured in device tree setup voting data
3914  * for list of bandwidth requirements.
3915  *
3916  * Result: 0 for success. -EINVAL if not configured
3917  */
3918 static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
3919 {
3920 	int ret = -EINVAL;
3921 	u32 idx, i, j, cfg_arr_size, *cfg_arr = NULL;
3922 	struct cnss_bus_bw_info *bus_bw_info, *tmp;
3923 	struct device *dev = &plat_priv->plat_dev->dev;
3924 
3925 	INIT_LIST_HEAD(&plat_priv->icc.list_head);
3926 	ret = of_property_read_u32(dev->of_node,
3927 				   "qcom,icc-path-count",
3928 				   &plat_priv->icc.path_count);
3929 	if (ret) {
3930 		cnss_pr_dbg("Platform Bus Interconnect path not configured\n");
3931 		return 0;
3932 	}
3933 	ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
3934 				   "qcom,bus-bw-cfg-count",
3935 				   &plat_priv->icc.bus_bw_cfg_count);
3936 	if (ret) {
3937 		cnss_pr_err("Failed to get Bus BW Config table size\n");
3938 		goto cleanup;
3939 	}
3940 	cfg_arr_size = plat_priv->icc.path_count *
3941 			 plat_priv->icc.bus_bw_cfg_count * CNSS_ICC_VOTE_MAX;
3942 	cfg_arr = kcalloc(cfg_arr_size, sizeof(*cfg_arr), GFP_KERNEL);
3943 	if (!cfg_arr) {
3944 		cnss_pr_err("Failed to alloc cfg table mem\n");
3945 		ret = -ENOMEM;
3946 		goto cleanup;
3947 	}
3948 
3949 	ret = of_property_read_u32_array(plat_priv->plat_dev->dev.of_node,
3950 					 "qcom,bus-bw-cfg", cfg_arr,
3951 					 cfg_arr_size);
3952 	if (ret) {
3953 		cnss_pr_err("Invalid Bus BW Config Table\n");
3954 		goto cleanup;
3955 	}
3956 
3957 	cnss_pr_dbg("ICC Path_Count: %d BW_CFG_Count: %d\n",
3958 		    plat_priv->icc.path_count, plat_priv->icc.bus_bw_cfg_count);
3959 
3960 	for (idx = 0; idx < plat_priv->icc.path_count; idx++) {
3961 		bus_bw_info = devm_kzalloc(dev, sizeof(*bus_bw_info),
3962 					   GFP_KERNEL);
3963 		if (!bus_bw_info) {
3964 			ret = -ENOMEM;
3965 			goto out;
3966 		}
3967 		ret = of_property_read_string_index(dev->of_node,
3968 						    "interconnect-names", idx,
3969 						    &bus_bw_info->icc_name);
3970 		if (ret)
3971 			goto out;
3972 
3973 		bus_bw_info->icc_path =
3974 			of_icc_get(&plat_priv->plat_dev->dev,
3975 				   bus_bw_info->icc_name);
3976 
3977 		if (IS_ERR(bus_bw_info->icc_path))  {
3978 			ret = PTR_ERR(bus_bw_info->icc_path);
3979 			if (ret != -EPROBE_DEFER) {
3980 				cnss_pr_err("Failed to get Interconnect path for %s. Err: %d\n",
3981 					    bus_bw_info->icc_name, ret);
3982 				goto out;
3983 			}
3984 		}
3985 
3986 		bus_bw_info->cfg_table =
3987 			devm_kcalloc(dev, plat_priv->icc.bus_bw_cfg_count,
3988 				     sizeof(*bus_bw_info->cfg_table),
3989 				     GFP_KERNEL);
3990 		if (!bus_bw_info->cfg_table) {
3991 			ret = -ENOMEM;
3992 			goto out;
3993 		}
3994 		cnss_pr_dbg("ICC Vote CFG for path: %s\n",
3995 			    bus_bw_info->icc_name);
3996 		for (i = 0, j = (idx * plat_priv->icc.bus_bw_cfg_count *
3997 		     CNSS_ICC_VOTE_MAX);
3998 		     i < plat_priv->icc.bus_bw_cfg_count;
3999 		     i++, j += 2) {
4000 			bus_bw_info->cfg_table[i].avg_bw = cfg_arr[j];
4001 			bus_bw_info->cfg_table[i].peak_bw = cfg_arr[j + 1];
4002 			cnss_pr_dbg("ICC Vote BW: %d avg: %d peak: %d\n",
4003 				    i, bus_bw_info->cfg_table[i].avg_bw,
4004 				    bus_bw_info->cfg_table[i].peak_bw);
4005 		}
4006 		list_add_tail(&bus_bw_info->list,
4007 			      &plat_priv->icc.list_head);
4008 	}
4009 	kfree(cfg_arr);
4010 	return 0;
4011 out:
4012 	list_for_each_entry_safe(bus_bw_info, tmp,
4013 				 &plat_priv->icc.list_head, list) {
4014 		list_del(&bus_bw_info->list);
4015 	}
4016 cleanup:
4017 	kfree(cfg_arr);
4018 	memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
4019 	return ret;
4020 }
4021 
4022 static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv)
4023 {
4024 	struct cnss_bus_bw_info *bus_bw_info, *tmp;
4025 
4026 	list_for_each_entry_safe(bus_bw_info, tmp,
4027 				 &plat_priv->icc.list_head, list) {
4028 		list_del(&bus_bw_info->list);
4029 		if (bus_bw_info->icc_path)
4030 			icc_put(bus_bw_info->icc_path);
4031 	}
4032 	memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
4033 }
4034 #else
4035 static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
4036 {
4037 	return 0;
4038 }
4039 
4040 static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv) {}
4041 #endif /* CONFIG_INTERCONNECT */
4042 
4043 void cnss_daemon_connection_update_cb(void *cb_ctx, bool status)
4044 {
4045 	struct cnss_plat_data *plat_priv = cb_ctx;
4046 
4047 	if (!plat_priv) {
4048 		cnss_pr_err("%s: Invalid context\n", __func__);
4049 		return;
4050 	}
4051 	if (status) {
4052 		cnss_pr_info("CNSS Daemon connected\n");
4053 		set_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
4054 		complete(&plat_priv->daemon_connected);
4055 	} else {
4056 		cnss_pr_info("CNSS Daemon disconnected\n");
4057 		reinit_completion(&plat_priv->daemon_connected);
4058 		clear_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
4059 	}
4060 }
4061 
4062 static ssize_t enable_hds_store(struct device *dev,
4063 				struct device_attribute *attr,
4064 				const char *buf, size_t count)
4065 {
4066 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4067 	unsigned int enable_hds = 0;
4068 
4069 	if (!plat_priv)
4070 		return -ENODEV;
4071 
4072 	if (sscanf(buf, "%du", &enable_hds) != 1) {
4073 		cnss_pr_err("Invalid enable_hds sysfs command\n");
4074 		return -EINVAL;
4075 	}
4076 
4077 	if (enable_hds)
4078 		plat_priv->hds_enabled = true;
4079 	else
4080 		plat_priv->hds_enabled = false;
4081 
4082 	cnss_pr_dbg("%s HDS file download, count is %zu\n",
4083 		    plat_priv->hds_enabled ? "Enable" : "Disable", count);
4084 
4085 	return count;
4086 }
4087 
4088 static ssize_t recovery_show(struct device *dev,
4089 			     struct device_attribute *attr,
4090 			     char *buf)
4091 {
4092 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4093 	u32 buf_size = PAGE_SIZE;
4094 	u32 curr_len = 0;
4095 	u32 buf_written = 0;
4096 
4097 	if (!plat_priv)
4098 		return -ENODEV;
4099 
4100 	buf_written = scnprintf(buf, buf_size,
4101 				"Usage: echo [recovery_bitmap] > /sys/kernel/cnss/recovery\n"
4102 				"BIT0 -- wlan fw recovery\n"
4103 				"BIT1 -- wlan pcss recovery\n"
4104 				"---------------------------------\n");
4105 	curr_len += buf_written;
4106 
4107 	buf_written = scnprintf(buf + curr_len, buf_size - curr_len,
4108 				"WLAN recovery %s[%d]\n",
4109 				plat_priv->recovery_enabled ? "Enabled" : "Disabled",
4110 				plat_priv->recovery_enabled);
4111 	curr_len += buf_written;
4112 
4113 	buf_written = scnprintf(buf + curr_len, buf_size - curr_len,
4114 				"WLAN PCSS recovery %s[%d]\n",
4115 				plat_priv->recovery_pcss_enabled ? "Enabled" : "Disabled",
4116 				plat_priv->recovery_pcss_enabled);
4117 	curr_len += buf_written;
4118 
4119 	/*
4120 	 * Now size of curr_len is not over page size for sure,
4121 	 * later if new item or none-fixed size item added, need
4122 	 * add check to make sure curr_len is not over page size.
4123 	 */
4124 	return curr_len;
4125 }
4126 
4127 static ssize_t tme_opt_file_download_show(struct device *dev,
4128 			     struct device_attribute *attr, char *buf)
4129 {
4130 	u32 buf_size = PAGE_SIZE;
4131 	u32 curr_len = 0;
4132 	u32 buf_written = 0;
4133 
4134 	buf_written = scnprintf(buf, buf_size,
4135 				"Usage: echo [file_type] > /sys/kernel/cnss/tme_opt_file_download\n"
4136 				"file_type = sec -- For OEM_FUSE file\n"
4137 				"file_type = rpr -- For RPR file\n"
4138 				"file_type = dpr -- For DPR file\n");
4139 
4140 	curr_len += buf_written;
4141 	return curr_len;
4142 }
4143 
4144 static ssize_t time_sync_period_show(struct device *dev,
4145 				     struct device_attribute *attr,
4146 				     char *buf)
4147 {
4148 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4149 
4150 	return scnprintf(buf, PAGE_SIZE, "%u ms\n",
4151 			plat_priv->ctrl_params.time_sync_period);
4152 }
4153 
4154 /**
4155  * cnss_get_min_time_sync_period_by_vote() - Get minimum time sync period
4156  * @plat_priv: Platform data structure
4157  *
4158  * Result: return minimum time sync period present in vote from wlan and sys
4159  */
4160 uint32_t cnss_get_min_time_sync_period_by_vote(struct cnss_plat_data *plat_priv)
4161 {
4162 	unsigned int i, min_time_sync_period = CNSS_TIME_SYNC_PERIOD_INVALID;
4163 	unsigned int time_sync_period;
4164 
4165 	for (i = 0; i < TIME_SYNC_VOTE_MAX; i++) {
4166 		time_sync_period = plat_priv->ctrl_params.time_sync_period_vote[i];
4167 		if (min_time_sync_period > time_sync_period)
4168 			min_time_sync_period = time_sync_period;
4169 	}
4170 
4171 	return min_time_sync_period;
4172 }
4173 
4174 static ssize_t time_sync_period_store(struct device *dev,
4175 				      struct device_attribute *attr,
4176 				      const char *buf, size_t count)
4177 {
4178 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4179 	unsigned int time_sync_period = 0;
4180 
4181 	if (!plat_priv)
4182 		return -ENODEV;
4183 
4184 	if (sscanf(buf, "%du", &time_sync_period) != 1) {
4185 		cnss_pr_err("Invalid time sync sysfs command\n");
4186 		return -EINVAL;
4187 	}
4188 
4189 	if (time_sync_period < CNSS_MIN_TIME_SYNC_PERIOD) {
4190 		cnss_pr_err("Invalid time sync value\n");
4191 		return -EINVAL;
4192 	}
4193 	plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_CNSS] =
4194 		time_sync_period;
4195 	time_sync_period = cnss_get_min_time_sync_period_by_vote(plat_priv);
4196 
4197 	if (time_sync_period == CNSS_TIME_SYNC_PERIOD_INVALID) {
4198 		cnss_pr_err("Invalid min time sync value\n");
4199 		return -EINVAL;
4200 	}
4201 
4202 	cnss_bus_update_time_sync_period(plat_priv, time_sync_period);
4203 
4204 	return count;
4205 }
4206 
4207 /**
4208  * cnss_update_time_sync_period() - Set time sync period given by driver
4209  * @dev: device structure
4210  * @time_sync_period: time sync period value
4211  *
4212  * Update time sync period vote of driver and set minimum of time sync period
4213  * from stored vote through wlan and sys config
4214  * Result: return 0 for success, error in case of invalid value and no dev
4215  */
4216 int cnss_update_time_sync_period(struct device *dev, uint32_t time_sync_period)
4217 {
4218 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4219 
4220 	if (!plat_priv)
4221 		return -ENODEV;
4222 
4223 	if (time_sync_period < CNSS_MIN_TIME_SYNC_PERIOD) {
4224 		cnss_pr_err("Invalid time sync value\n");
4225 		return -EINVAL;
4226 	}
4227 
4228 	plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_WLAN] =
4229 		time_sync_period;
4230 	time_sync_period = cnss_get_min_time_sync_period_by_vote(plat_priv);
4231 
4232 	if (time_sync_period == CNSS_TIME_SYNC_PERIOD_INVALID) {
4233 		cnss_pr_err("Invalid min time sync value\n");
4234 		return -EINVAL;
4235 	}
4236 
4237 	cnss_bus_update_time_sync_period(plat_priv, time_sync_period);
4238 	return 0;
4239 }
4240 EXPORT_SYMBOL(cnss_update_time_sync_period);
4241 
4242 /**
4243  * cnss_reset_time_sync_period() - Reset time sync period
4244  * @dev: device structure
4245  *
4246  * Update time sync period vote of driver as invalid
4247  * and reset minimum of time sync period from
4248  * stored vote through wlan and sys config
4249  * Result: return 0 for success, error in case of no dev
4250  */
4251 int cnss_reset_time_sync_period(struct device *dev)
4252 {
4253 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4254 	unsigned int time_sync_period = 0;
4255 
4256 	if (!plat_priv)
4257 		return -ENODEV;
4258 
4259 	/* Driver vote is set to invalid in case of reset
4260 	 * In this case, only vote valid to check is sys config
4261 	 */
4262 	plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_WLAN] =
4263 		CNSS_TIME_SYNC_PERIOD_INVALID;
4264 	time_sync_period = cnss_get_min_time_sync_period_by_vote(plat_priv);
4265 
4266 	if (time_sync_period == CNSS_TIME_SYNC_PERIOD_INVALID) {
4267 		cnss_pr_err("Invalid min time sync value\n");
4268 		return -EINVAL;
4269 	}
4270 
4271 	cnss_bus_update_time_sync_period(plat_priv, time_sync_period);
4272 
4273 	return 0;
4274 }
4275 EXPORT_SYMBOL(cnss_reset_time_sync_period);
4276 
4277 static ssize_t recovery_store(struct device *dev,
4278 			      struct device_attribute *attr,
4279 			      const char *buf, size_t count)
4280 {
4281 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4282 	unsigned int recovery = 0;
4283 
4284 	if (!plat_priv)
4285 		return -ENODEV;
4286 
4287 	if (sscanf(buf, "%du", &recovery) != 1) {
4288 		cnss_pr_err("Invalid recovery sysfs command\n");
4289 		return -EINVAL;
4290 	}
4291 
4292 	plat_priv->recovery_enabled = !!(recovery & CNSS_WLAN_RECOVERY);
4293 	plat_priv->recovery_pcss_enabled = !!(recovery & CNSS_PCSS_RECOVERY);
4294 
4295 	cnss_pr_dbg("%s WLAN recovery, count is %zu\n",
4296 		    plat_priv->recovery_enabled ? "Enable" : "Disable", count);
4297 	cnss_pr_dbg("%s PCSS recovery, count is %zu\n",
4298 		    plat_priv->recovery_pcss_enabled ? "Enable" : "Disable", count);
4299 
4300 	cnss_send_subsys_restart_level_msg(plat_priv);
4301 	return count;
4302 }
4303 
4304 static ssize_t shutdown_store(struct device *dev,
4305 			      struct device_attribute *attr,
4306 			      const char *buf, size_t count)
4307 {
4308 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4309 
4310 	cnss_pr_dbg("Received shutdown notification\n");
4311 	if (plat_priv) {
4312 		set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
4313 		cnss_bus_update_status(plat_priv, CNSS_SYS_REBOOT);
4314 		del_timer(&plat_priv->fw_boot_timer);
4315 		complete_all(&plat_priv->power_up_complete);
4316 		complete_all(&plat_priv->cal_complete);
4317 		cnss_pr_dbg("Shutdown notification handled\n");
4318 	}
4319 
4320 	return count;
4321 }
4322 
4323 static ssize_t fs_ready_store(struct device *dev,
4324 			      struct device_attribute *attr,
4325 			      const char *buf, size_t count)
4326 {
4327 	int fs_ready = 0;
4328 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4329 
4330 	if (sscanf(buf, "%du", &fs_ready) != 1)
4331 		return -EINVAL;
4332 
4333 	cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n",
4334 		    fs_ready, count);
4335 
4336 	if (!plat_priv) {
4337 		cnss_pr_err("plat_priv is NULL\n");
4338 		return count;
4339 	}
4340 
4341 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
4342 		cnss_pr_dbg("QMI is bypassed\n");
4343 		return count;
4344 	}
4345 
4346 	set_bit(CNSS_FS_READY, &plat_priv->driver_state);
4347 	if (fs_ready == FILE_SYSTEM_READY && plat_priv->cbc_enabled) {
4348 		cnss_driver_event_post(plat_priv,
4349 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
4350 				       0, NULL);
4351 	}
4352 
4353 	return count;
4354 }
4355 
4356 static ssize_t qdss_trace_start_store(struct device *dev,
4357 				      struct device_attribute *attr,
4358 				      const char *buf, size_t count)
4359 {
4360 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4361 
4362 	wlfw_qdss_trace_start(plat_priv);
4363 	cnss_pr_dbg("Received QDSS start command\n");
4364 	return count;
4365 }
4366 
4367 static ssize_t qdss_trace_stop_store(struct device *dev,
4368 				     struct device_attribute *attr,
4369 				     const char *buf, size_t count)
4370 {
4371 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4372 	u32 option = 0;
4373 
4374 	if (sscanf(buf, "%du", &option) != 1)
4375 		return -EINVAL;
4376 
4377 	wlfw_qdss_trace_stop(plat_priv, option);
4378 	cnss_pr_dbg("Received QDSS stop command\n");
4379 	return count;
4380 }
4381 
4382 static ssize_t qdss_conf_download_store(struct device *dev,
4383 					struct device_attribute *attr,
4384 					const char *buf, size_t count)
4385 {
4386 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4387 
4388 	cnss_wlfw_qdss_dnld_send_sync(plat_priv);
4389 	cnss_pr_dbg("Received QDSS download config command\n");
4390 	return count;
4391 }
4392 
4393 static ssize_t tme_opt_file_download_store(struct device *dev,
4394 					struct device_attribute *attr,
4395 					const char *buf, size_t count)
4396 {
4397 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4398 	char cmd[5];
4399 
4400 	if (sscanf(buf, "%s", cmd) != 1)
4401 		return -EINVAL;
4402 
4403 	if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
4404 		cnss_pr_err("Firmware is not ready yet\n");
4405 		return 0;
4406 	}
4407 
4408 	if (plat_priv->device_id == PEACH_DEVICE_ID &&
4409 	    cnss_bus_runtime_pm_get_sync(plat_priv) < 0)
4410 		goto runtime_pm_put;
4411 
4412 	if (strcmp(cmd, "sec") == 0) {
4413 		cnss_bus_load_tme_opt_file(plat_priv, WLFW_TME_LITE_OEM_FUSE_FILE_V01);
4414 		cnss_wlfw_tme_opt_file_dnld_send_sync(plat_priv, WLFW_TME_LITE_OEM_FUSE_FILE_V01);
4415 	} else if (strcmp(cmd, "rpr") == 0) {
4416 		cnss_bus_load_tme_opt_file(plat_priv, WLFW_TME_LITE_RPR_FILE_V01);
4417 		cnss_wlfw_tme_opt_file_dnld_send_sync(plat_priv, WLFW_TME_LITE_RPR_FILE_V01);
4418 	} else if (strcmp(cmd, "dpr") == 0) {
4419 		cnss_bus_load_tme_opt_file(plat_priv, WLFW_TME_LITE_DPR_FILE_V01);
4420 		cnss_wlfw_tme_opt_file_dnld_send_sync(plat_priv, WLFW_TME_LITE_DPR_FILE_V01);
4421 	}
4422 
4423 	cnss_pr_dbg("Received tme_opt_file_download indication cmd: %s\n", cmd);
4424 
4425 runtime_pm_put:
4426 	if (plat_priv->device_id == PEACH_DEVICE_ID)
4427 		cnss_bus_runtime_pm_put(plat_priv);
4428 	return count;
4429 }
4430 
4431 static ssize_t hw_trace_override_store(struct device *dev,
4432 				       struct device_attribute *attr,
4433 				       const char *buf, size_t count)
4434 {
4435 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4436 	int tmp = 0;
4437 
4438 	if (sscanf(buf, "%du", &tmp) != 1)
4439 		return -EINVAL;
4440 
4441 	plat_priv->hw_trc_override = tmp;
4442 	cnss_pr_dbg("Received QDSS hw_trc_override indication\n");
4443 	return count;
4444 }
4445 
4446 static ssize_t charger_mode_store(struct device *dev,
4447 				  struct device_attribute *attr,
4448 				  const char *buf, size_t count)
4449 {
4450 	struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4451 	int tmp = 0;
4452 
4453 	if (sscanf(buf, "%du", &tmp) != 1)
4454 		return -EINVAL;
4455 
4456 	plat_priv->charger_mode = tmp;
4457 	cnss_pr_dbg("Received Charger Mode: %d\n", tmp);
4458 	return count;
4459 }
4460 
4461 static DEVICE_ATTR_WO(fs_ready);
4462 static DEVICE_ATTR_WO(shutdown);
4463 static DEVICE_ATTR_RW(recovery);
4464 static DEVICE_ATTR_WO(enable_hds);
4465 static DEVICE_ATTR_WO(qdss_trace_start);
4466 static DEVICE_ATTR_WO(qdss_trace_stop);
4467 static DEVICE_ATTR_WO(qdss_conf_download);
4468 static DEVICE_ATTR_RW(tme_opt_file_download);
4469 static DEVICE_ATTR_WO(hw_trace_override);
4470 static DEVICE_ATTR_WO(charger_mode);
4471 static DEVICE_ATTR_RW(time_sync_period);
4472 
4473 static struct attribute *cnss_attrs[] = {
4474 	&dev_attr_fs_ready.attr,
4475 	&dev_attr_shutdown.attr,
4476 	&dev_attr_recovery.attr,
4477 	&dev_attr_enable_hds.attr,
4478 	&dev_attr_qdss_trace_start.attr,
4479 	&dev_attr_qdss_trace_stop.attr,
4480 	&dev_attr_qdss_conf_download.attr,
4481 	&dev_attr_tme_opt_file_download.attr,
4482 	&dev_attr_hw_trace_override.attr,
4483 	&dev_attr_charger_mode.attr,
4484 	&dev_attr_time_sync_period.attr,
4485 	NULL,
4486 };
4487 
4488 static struct attribute_group cnss_attr_group = {
4489 	.attrs = cnss_attrs,
4490 };
4491 
4492 static int cnss_create_sysfs_link(struct cnss_plat_data *plat_priv)
4493 {
4494 	struct device *dev = &plat_priv->plat_dev->dev;
4495 	int ret;
4496 	char cnss_name[CNSS_FS_NAME_SIZE];
4497 	char shutdown_name[32];
4498 
4499 	if (cnss_is_dual_wlan_enabled()) {
4500 		snprintf(cnss_name, CNSS_FS_NAME_SIZE,
4501 			 CNSS_FS_NAME "_%d", plat_priv->plat_idx);
4502 		snprintf(shutdown_name, sizeof(shutdown_name),
4503 			 "shutdown_wlan_%d", plat_priv->plat_idx);
4504 	} else {
4505 		snprintf(cnss_name, CNSS_FS_NAME_SIZE, CNSS_FS_NAME);
4506 		snprintf(shutdown_name, sizeof(shutdown_name),
4507 			 "shutdown_wlan");
4508 	}
4509 
4510 	ret = sysfs_create_link(kernel_kobj, &dev->kobj, cnss_name);
4511 	if (ret) {
4512 		cnss_pr_err("Failed to create cnss link, err = %d\n",
4513 			    ret);
4514 		goto out;
4515 	}
4516 
4517 	/* This is only for backward compatibility. */
4518 	ret = sysfs_create_link(kernel_kobj, &dev->kobj, shutdown_name);
4519 	if (ret) {
4520 		cnss_pr_err("Failed to create shutdown_wlan link, err = %d\n",
4521 			    ret);
4522 		goto rm_cnss_link;
4523 	}
4524 
4525 	return 0;
4526 
4527 rm_cnss_link:
4528 	sysfs_remove_link(kernel_kobj, cnss_name);
4529 out:
4530 	return ret;
4531 }
4532 
4533 static void cnss_remove_sysfs_link(struct cnss_plat_data *plat_priv)
4534 {
4535 	char cnss_name[CNSS_FS_NAME_SIZE];
4536 	char shutdown_name[32];
4537 
4538 	if (cnss_is_dual_wlan_enabled()) {
4539 		snprintf(cnss_name, CNSS_FS_NAME_SIZE,
4540 			 CNSS_FS_NAME "_%d", plat_priv->plat_idx);
4541 		snprintf(shutdown_name, sizeof(shutdown_name),
4542 			 "shutdown_wlan_%d", plat_priv->plat_idx);
4543 	} else {
4544 		snprintf(cnss_name, CNSS_FS_NAME_SIZE, CNSS_FS_NAME);
4545 		snprintf(shutdown_name, sizeof(shutdown_name),
4546 			 "shutdown_wlan");
4547 	}
4548 
4549 	sysfs_remove_link(kernel_kobj, shutdown_name);
4550 	sysfs_remove_link(kernel_kobj, cnss_name);
4551 }
4552 
4553 static int cnss_create_sysfs(struct cnss_plat_data *plat_priv)
4554 {
4555 	int ret = 0;
4556 
4557 	ret = devm_device_add_group(&plat_priv->plat_dev->dev,
4558 				    &cnss_attr_group);
4559 	if (ret) {
4560 		cnss_pr_err("Failed to create cnss device group, err = %d\n",
4561 			    ret);
4562 		goto out;
4563 	}
4564 
4565 	cnss_create_sysfs_link(plat_priv);
4566 
4567 	return 0;
4568 out:
4569 	return ret;
4570 }
4571 
4572 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0))
4573 union cnss_device_group_devres {
4574 	const struct attribute_group *group;
4575 };
4576 
4577 static void devm_cnss_group_remove(struct device *dev, void *res)
4578 {
4579 	union cnss_device_group_devres *devres = res;
4580 	const struct attribute_group *group = devres->group;
4581 
4582 	cnss_pr_dbg("%s: removing group %p\n", __func__, group);
4583 	sysfs_remove_group(&dev->kobj, group);
4584 }
4585 
4586 static int devm_cnss_group_match(struct device *dev, void *res, void *data)
4587 {
4588 	return ((union cnss_device_group_devres *)res) == data;
4589 }
4590 
4591 static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
4592 {
4593 	cnss_remove_sysfs_link(plat_priv);
4594 	WARN_ON(devres_release(&plat_priv->plat_dev->dev,
4595 			       devm_cnss_group_remove, devm_cnss_group_match,
4596 			       (void *)&cnss_attr_group));
4597 }
4598 #else
4599 static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
4600 {
4601 	cnss_remove_sysfs_link(plat_priv);
4602 	devm_device_remove_group(&plat_priv->plat_dev->dev, &cnss_attr_group);
4603 }
4604 #endif
4605 
4606 static int cnss_event_work_init(struct cnss_plat_data *plat_priv)
4607 {
4608 	spin_lock_init(&plat_priv->event_lock);
4609 	plat_priv->event_wq = alloc_workqueue("cnss_driver_event",
4610 					      WQ_UNBOUND, 1);
4611 	if (!plat_priv->event_wq) {
4612 		cnss_pr_err("Failed to create event workqueue!\n");
4613 		return -EFAULT;
4614 	}
4615 
4616 	INIT_WORK(&plat_priv->event_work, cnss_driver_event_work);
4617 	INIT_LIST_HEAD(&plat_priv->event_list);
4618 
4619 	return 0;
4620 }
4621 
4622 static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
4623 {
4624 	destroy_workqueue(plat_priv->event_wq);
4625 }
4626 
4627 static int cnss_reboot_notifier(struct notifier_block *nb,
4628 				unsigned long action,
4629 				void *data)
4630 {
4631 	struct cnss_plat_data *plat_priv =
4632 		container_of(nb, struct cnss_plat_data, reboot_nb);
4633 
4634 	set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
4635 	cnss_bus_update_status(plat_priv, CNSS_SYS_REBOOT);
4636 	del_timer(&plat_priv->fw_boot_timer);
4637 	complete_all(&plat_priv->power_up_complete);
4638 	complete_all(&plat_priv->cal_complete);
4639 	cnss_pr_dbg("Reboot is in progress with action %d\n", action);
4640 
4641 	return NOTIFY_DONE;
4642 }
4643 
4644 #ifdef CONFIG_CNSS_HW_SECURE_DISABLE
4645 #ifdef CONFIG_CNSS_HW_SECURE_SMEM
4646 int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv)
4647 {
4648 	uint32_t *peripheralStateInfo = NULL;
4649 	size_t size = 0;
4650 
4651 	/* Once this flag is set, secure peripheral feature
4652 	 * will not be supported till next reboot
4653 	 */
4654 	if (plat_priv->sec_peri_feature_disable)
4655 		return 0;
4656 
4657 	peripheralStateInfo = qcom_smem_get(QCOM_SMEM_HOST_ANY, PERISEC_SMEM_ID, &size);
4658 	if (IS_ERR_OR_NULL(peripheralStateInfo)) {
4659 		if (PTR_ERR(peripheralStateInfo) != -ENOENT)
4660 			CNSS_ASSERT(0);
4661 
4662 		cnss_pr_dbg("Secure HW feature not enabled. ret = %d\n",
4663 			    PTR_ERR(peripheralStateInfo));
4664 		plat_priv->sec_peri_feature_disable = true;
4665 		return 0;
4666 	}
4667 
4668 	cnss_pr_dbg("Secure HW state: %d\n", *peripheralStateInfo);
4669 	if ((*peripheralStateInfo >> (HW_WIFI_UID - 0x500)) & 0x1)
4670 		set_bit(CNSS_WLAN_HW_DISABLED,
4671 			&plat_priv->driver_state);
4672 	else
4673 		clear_bit(CNSS_WLAN_HW_DISABLED,
4674 			  &plat_priv->driver_state);
4675 
4676 	return 0;
4677 }
4678 #else
4679 int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv)
4680 {
4681 	struct Object client_env;
4682 	struct Object app_object;
4683 	u32 wifi_uid = HW_WIFI_UID;
4684 	union ObjectArg obj_arg[2] = {{{0, 0}}};
4685 	int ret;
4686 	u8 state = 0;
4687 
4688 	/* Once this flag is set, secure peripheral feature
4689 	 * will not be supported till next reboot
4690 	 */
4691 	if (plat_priv->sec_peri_feature_disable)
4692 		return 0;
4693 
4694 	/* get rootObj */
4695 	ret = get_client_env_object(&client_env);
4696 	if (ret) {
4697 		cnss_pr_dbg("Failed to get client_env_object, ret: %d\n", ret);
4698 		goto end;
4699 	}
4700 	ret = IClientEnv_open(client_env, HW_STATE_UID, &app_object);
4701 	if (ret) {
4702 		cnss_pr_dbg("Failed to get app_object, ret: %d\n",  ret);
4703 		if (ret == FEATURE_NOT_SUPPORTED) {
4704 			ret = 0; /* Do not Assert */
4705 			plat_priv->sec_peri_feature_disable = true;
4706 			cnss_pr_dbg("Secure HW feature not supported\n");
4707 		}
4708 		goto exit_release_clientenv;
4709 	}
4710 
4711 	obj_arg[0].b = (struct ObjectBuf) {&wifi_uid, sizeof(u32)};
4712 	obj_arg[1].b = (struct ObjectBuf) {&state, sizeof(u8)};
4713 	ret = Object_invoke(app_object, HW_OP_GET_STATE, obj_arg,
4714 			    ObjectCounts_pack(1, 1, 0, 0));
4715 
4716 	cnss_pr_dbg("SMC invoke ret: %d state: %d\n", ret, state);
4717 	if (ret) {
4718 		if (ret == PERIPHERAL_NOT_FOUND) {
4719 			ret = 0; /* Do not Assert */
4720 			plat_priv->sec_peri_feature_disable = true;
4721 			cnss_pr_dbg("Secure HW mode is not updated. Peripheral not found\n");
4722 		}
4723 		goto exit_release_app_obj;
4724 	}
4725 
4726 	if (state == 1)
4727 		set_bit(CNSS_WLAN_HW_DISABLED,
4728 			&plat_priv->driver_state);
4729 	else
4730 		clear_bit(CNSS_WLAN_HW_DISABLED,
4731 			  &plat_priv->driver_state);
4732 
4733 exit_release_app_obj:
4734 	Object_release(app_object);
4735 exit_release_clientenv:
4736 	Object_release(client_env);
4737 end:
4738 	if (ret) {
4739 		cnss_pr_err("Unable to get HW disable status\n");
4740 		CNSS_ASSERT(0);
4741 	}
4742 	return ret;
4743 }
4744 #endif
4745 #else
4746 int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv)
4747 {
4748 	return 0;
4749 }
4750 #endif
4751 
4752 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
4753 static void cnss_sram_dump_init(struct cnss_plat_data *plat_priv)
4754 {
4755 }
4756 #else
4757 static void cnss_sram_dump_init(struct cnss_plat_data *plat_priv)
4758 {
4759 	if (plat_priv->device_id == QCA6490_DEVICE_ID &&
4760 	    cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
4761 		plat_priv->sram_dump = kcalloc(SRAM_DUMP_SIZE, 1, GFP_KERNEL);
4762 }
4763 #endif
4764 
4765 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
4766 static void cnss_initialize_mem_pool(unsigned long device_id)
4767 {
4768 	cnss_initialize_prealloc_pool(device_id);
4769 }
4770 static void cnss_deinitialize_mem_pool(void)
4771 {
4772 	cnss_deinitialize_prealloc_pool();
4773 }
4774 #else
4775 static void cnss_initialize_mem_pool(unsigned long device_id)
4776 {
4777 }
4778 static void cnss_deinitialize_mem_pool(void)
4779 {
4780 }
4781 #endif
4782 
4783 static int cnss_misc_init(struct cnss_plat_data *plat_priv)
4784 {
4785 	int ret;
4786 
4787 	ret = cnss_init_sol_gpio(plat_priv);
4788 	if (ret)
4789 		return ret;
4790 
4791 	timer_setup(&plat_priv->fw_boot_timer,
4792 		    cnss_bus_fw_boot_timeout_hdlr, 0);
4793 
4794 	ret = device_init_wakeup(&plat_priv->plat_dev->dev, true);
4795 	if (ret)
4796 		cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
4797 			    ret);
4798 
4799 	INIT_WORK(&plat_priv->recovery_work, cnss_recovery_work_handler);
4800 	init_completion(&plat_priv->power_up_complete);
4801 	init_completion(&plat_priv->cal_complete);
4802 	init_completion(&plat_priv->rddm_complete);
4803 	init_completion(&plat_priv->recovery_complete);
4804 	init_completion(&plat_priv->daemon_connected);
4805 	mutex_init(&plat_priv->dev_lock);
4806 	mutex_init(&plat_priv->driver_ops_lock);
4807 
4808 	plat_priv->reboot_nb.notifier_call = cnss_reboot_notifier;
4809 	ret = register_reboot_notifier(&plat_priv->reboot_nb);
4810 	if (ret)
4811 		cnss_pr_err("Failed to register reboot notifier, err = %d\n",
4812 			    ret);
4813 
4814 	plat_priv->recovery_ws =
4815 		wakeup_source_register(&plat_priv->plat_dev->dev,
4816 				       "CNSS_FW_RECOVERY");
4817 	if (!plat_priv->recovery_ws)
4818 		cnss_pr_err("Failed to setup FW recovery wake source\n");
4819 
4820 	ret = cnss_plat_ipc_register(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
4821 				     cnss_daemon_connection_update_cb,
4822 				     plat_priv);
4823 	if (ret)
4824 		cnss_pr_err("QMI IPC connection call back register failed, err = %d\n",
4825 			    ret);
4826 
4827 	cnss_sram_dump_init(plat_priv);
4828 
4829 	if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
4830 				  "qcom,rc-ep-short-channel"))
4831 		cnss_set_feature_list(plat_priv, CNSS_RC_EP_ULTRASHORT_CHANNEL_V01);
4832 	if (plat_priv->device_id == PEACH_DEVICE_ID)
4833 		cnss_set_feature_list(plat_priv, CNSS_AUX_UC_SUPPORT_V01);
4834 
4835 	return 0;
4836 }
4837 
4838 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
4839 static void cnss_sram_dump_deinit(struct cnss_plat_data *plat_priv)
4840 {
4841 }
4842 #else
4843 static void cnss_sram_dump_deinit(struct cnss_plat_data *plat_priv)
4844 {
4845 	if (plat_priv->device_id == QCA6490_DEVICE_ID &&
4846 	    cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
4847 		kfree(plat_priv->sram_dump);
4848 }
4849 #endif
4850 
4851 static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
4852 {
4853 	cnss_plat_ipc_unregister(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
4854 				 plat_priv);
4855 	complete_all(&plat_priv->recovery_complete);
4856 	complete_all(&plat_priv->rddm_complete);
4857 	complete_all(&plat_priv->cal_complete);
4858 	complete_all(&plat_priv->power_up_complete);
4859 	complete_all(&plat_priv->daemon_connected);
4860 	device_init_wakeup(&plat_priv->plat_dev->dev, false);
4861 	unregister_reboot_notifier(&plat_priv->reboot_nb);
4862 	del_timer(&plat_priv->fw_boot_timer);
4863 	wakeup_source_unregister(plat_priv->recovery_ws);
4864 	cnss_deinit_sol_gpio(plat_priv);
4865 	cnss_sram_dump_deinit(plat_priv);
4866 	kfree(plat_priv->on_chip_pmic_board_ids);
4867 }
4868 
4869 static void cnss_init_time_sync_period_default(struct cnss_plat_data *plat_priv)
4870 {
4871 	plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_WLAN] =
4872 		CNSS_TIME_SYNC_PERIOD_INVALID;
4873 	plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_CNSS] =
4874 		CNSS_TIME_SYNC_PERIOD_DEFAULT;
4875 }
4876 
4877 static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
4878 {
4879 	plat_priv->ctrl_params.quirks = CNSS_QUIRKS_DEFAULT;
4880 
4881 	plat_priv->cbc_enabled = !IS_ENABLED(CONFIG_CNSS_EMULATION) &&
4882 		of_property_read_bool(plat_priv->plat_dev->dev.of_node,
4883 				      "qcom,wlan-cbc-enabled");
4884 
4885 	plat_priv->ctrl_params.mhi_timeout = CNSS_MHI_TIMEOUT_DEFAULT;
4886 	plat_priv->ctrl_params.mhi_m2_timeout = CNSS_MHI_M2_TIMEOUT_DEFAULT;
4887 	plat_priv->ctrl_params.qmi_timeout = CNSS_QMI_TIMEOUT_DEFAULT;
4888 	plat_priv->ctrl_params.bdf_type = CNSS_BDF_TYPE_DEFAULT;
4889 	plat_priv->ctrl_params.time_sync_period = CNSS_TIME_SYNC_PERIOD_DEFAULT;
4890 	cnss_init_time_sync_period_default(plat_priv);
4891 	/* Set adsp_pc_enabled default value to true as ADSP pc is always
4892 	 * enabled by default
4893 	 */
4894 	plat_priv->adsp_pc_enabled = true;
4895 }
4896 
4897 static void cnss_get_pm_domain_info(struct cnss_plat_data *plat_priv)
4898 {
4899 	struct device *dev = &plat_priv->plat_dev->dev;
4900 
4901 	plat_priv->use_pm_domain =
4902 		of_property_read_bool(dev->of_node, "use-pm-domain");
4903 
4904 	cnss_pr_dbg("use-pm-domain is %d\n", plat_priv->use_pm_domain);
4905 }
4906 
4907 static void cnss_get_wlaon_pwr_ctrl_info(struct cnss_plat_data *plat_priv)
4908 {
4909 	struct device *dev = &plat_priv->plat_dev->dev;
4910 
4911 	plat_priv->set_wlaon_pwr_ctrl =
4912 		of_property_read_bool(dev->of_node, "qcom,set-wlaon-pwr-ctrl");
4913 
4914 	cnss_pr_dbg("set_wlaon_pwr_ctrl is %d\n",
4915 		    plat_priv->set_wlaon_pwr_ctrl);
4916 }
4917 
4918 static bool cnss_use_fw_path_with_prefix(struct cnss_plat_data *plat_priv)
4919 {
4920 	return (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
4921 				      "qcom,converged-dt") ||
4922 		of_property_read_bool(plat_priv->plat_dev->dev.of_node,
4923 				      "qcom,same-dt-multi-dev") ||
4924 		of_property_read_bool(plat_priv->plat_dev->dev.of_node,
4925 				      "qcom,multi-wlan-exchg"));
4926 }
4927 
4928 static const struct platform_device_id cnss_platform_id_table[] = {
4929 	{ .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
4930 	{ .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
4931 	{ .name = "qca6390", .driver_data = QCA6390_DEVICE_ID, },
4932 	{ .name = "qca6490", .driver_data = QCA6490_DEVICE_ID, },
4933 	{ .name = "kiwi", .driver_data = KIWI_DEVICE_ID, },
4934 	{ .name = "mango", .driver_data = MANGO_DEVICE_ID, },
4935 	{ .name = "peach", .driver_data = PEACH_DEVICE_ID, },
4936 	{ .name = "qcaconv", .driver_data = 0, },
4937 	{ },
4938 };
4939 
4940 static const struct of_device_id cnss_of_match_table[] = {
4941 	{
4942 		.compatible = "qcom,cnss",
4943 		.data = (void *)&cnss_platform_id_table[0]},
4944 	{
4945 		.compatible = "qcom,cnss-qca6290",
4946 		.data = (void *)&cnss_platform_id_table[1]},
4947 	{
4948 		.compatible = "qcom,cnss-qca6390",
4949 		.data = (void *)&cnss_platform_id_table[2]},
4950 	{
4951 		.compatible = "qcom,cnss-qca6490",
4952 		.data = (void *)&cnss_platform_id_table[3]},
4953 	{
4954 		.compatible = "qcom,cnss-kiwi",
4955 		.data = (void *)&cnss_platform_id_table[4]},
4956 	{
4957 		.compatible = "qcom,cnss-mango",
4958 		.data = (void *)&cnss_platform_id_table[5]},
4959 	{
4960 		.compatible = "qcom,cnss-peach",
4961 		.data = (void *)&cnss_platform_id_table[6]},
4962 	{
4963 		.compatible = "qcom,cnss-qca-converged",
4964 		.data = (void *)&cnss_platform_id_table[7]},
4965 	{ },
4966 };
4967 MODULE_DEVICE_TABLE(of, cnss_of_match_table);
4968 
4969 static inline bool
4970 cnss_use_nv_mac(struct cnss_plat_data *plat_priv)
4971 {
4972 	return of_property_read_bool(plat_priv->plat_dev->dev.of_node,
4973 				     "use-nv-mac");
4974 }
4975 
4976 static int cnss_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
4977 {
4978 	struct device_node *child;
4979 	u32 id, i;
4980 	int id_n,  device_identifier_gpio, ret;
4981 	u8 gpio_value;
4982 
4983 
4984 	if (plat_priv->dt_type != CNSS_DTT_CONVERGED)
4985 		return 0;
4986 
4987 	/* Parses the wlan_sw_ctrl gpio which is used to identify device */
4988 	ret = cnss_get_wlan_sw_ctrl(plat_priv);
4989 	if (ret) {
4990 		cnss_pr_dbg("Failed to parse wlan_sw_ctrl gpio, error:%d", ret);
4991 		return ret;
4992 	}
4993 
4994 	device_identifier_gpio = plat_priv->pinctrl_info.wlan_sw_ctrl_gpio;
4995 
4996 	gpio_value = gpio_get_value(device_identifier_gpio);
4997 	cnss_pr_dbg("Value of Device Identifier GPIO: %d\n", gpio_value);
4998 
4999 	for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
5000 					 child) {
5001 		if (strcmp(child->name, "chip_cfg"))
5002 			continue;
5003 
5004 		id_n = of_property_count_u32_elems(child, "supported-ids");
5005 		if (id_n <= 0) {
5006 			cnss_pr_err("Device id is NOT set\n");
5007 			return -EINVAL;
5008 		}
5009 
5010 		for (i = 0; i < id_n; i++) {
5011 			ret = of_property_read_u32_index(child,
5012 							 "supported-ids",
5013 							 i, &id);
5014 			if (ret) {
5015 				cnss_pr_err("Failed to read supported ids\n");
5016 				return -EINVAL;
5017 			}
5018 
5019 			if (gpio_value && id == QCA6490_DEVICE_ID) {
5020 				plat_priv->plat_dev->dev.of_node = child;
5021 				plat_priv->device_id = QCA6490_DEVICE_ID;
5022 				cnss_utils_update_device_type(CNSS_HSP_DEVICE_TYPE);
5023 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
5024 					    child->name, i, id);
5025 				return 0;
5026 			} else if (!gpio_value && id == KIWI_DEVICE_ID) {
5027 				plat_priv->plat_dev->dev.of_node = child;
5028 				plat_priv->device_id = KIWI_DEVICE_ID;
5029 				cnss_utils_update_device_type(CNSS_HMT_DEVICE_TYPE);
5030 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
5031 					    child->name, i, id);
5032 				return 0;
5033 			}
5034 		}
5035 	}
5036 
5037 	return -EINVAL;
5038 }
5039 
5040 static inline u32
5041 cnss_dt_type(struct cnss_plat_data *plat_priv)
5042 {
5043 	bool is_converged_dt = of_property_read_bool(
5044 		plat_priv->plat_dev->dev.of_node, "qcom,converged-dt");
5045 	bool is_multi_wlan_xchg;
5046 
5047 	if (is_converged_dt)
5048 		return CNSS_DTT_CONVERGED;
5049 
5050 	is_multi_wlan_xchg = of_property_read_bool(
5051 		plat_priv->plat_dev->dev.of_node, "qcom,multi-wlan-exchg");
5052 
5053 	if (is_multi_wlan_xchg)
5054 		return CNSS_DTT_MULTIEXCHG;
5055 	return CNSS_DTT_LEGACY;
5056 }
5057 
5058 static int cnss_wlan_device_init(struct cnss_plat_data *plat_priv)
5059 {
5060 	int ret = 0;
5061 	int retry = 0;
5062 
5063 	if (test_bit(SKIP_DEVICE_BOOT, &plat_priv->ctrl_params.quirks))
5064 		return 0;
5065 
5066 retry:
5067 	ret = cnss_power_on_device(plat_priv, true);
5068 	if (ret)
5069 		goto end;
5070 
5071 	ret = cnss_bus_init(plat_priv);
5072 	if (ret) {
5073 		if ((ret != -EPROBE_DEFER) &&
5074 		    retry++ < POWER_ON_RETRY_MAX_TIMES) {
5075 			cnss_power_off_device(plat_priv);
5076 			cnss_pr_dbg("Retry cnss_bus_init #%d\n", retry);
5077 			msleep(POWER_ON_RETRY_DELAY_MS * retry);
5078 			goto retry;
5079 		}
5080 		goto power_off;
5081 	}
5082 	return 0;
5083 
5084 power_off:
5085 	cnss_power_off_device(plat_priv);
5086 end:
5087 	return ret;
5088 }
5089 
5090 int cnss_wlan_hw_enable(void)
5091 {
5092 	struct cnss_plat_data *plat_priv;
5093 	int ret = 0;
5094 
5095 	if (cnss_is_dual_wlan_enabled())
5096 		plat_priv = cnss_get_first_plat_priv(NULL);
5097 	else
5098 		plat_priv = cnss_get_plat_priv(NULL);
5099 
5100 	if (!plat_priv)
5101 		return -ENODEV;
5102 
5103 	clear_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state);
5104 
5105 	if (test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state))
5106 		goto register_driver;
5107 	ret = cnss_wlan_device_init(plat_priv);
5108 	if (ret) {
5109 		if (!test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
5110 			CNSS_ASSERT(0);
5111 		return ret;
5112 	}
5113 
5114 	if (test_bit(CNSS_FS_READY, &plat_priv->driver_state))
5115 		cnss_driver_event_post(plat_priv,
5116 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
5117 				       0, NULL);
5118 
5119 register_driver:
5120 	if (plat_priv->driver_ops)
5121 		ret = cnss_wlan_register_driver(plat_priv->driver_ops);
5122 
5123 	return ret;
5124 }
5125 EXPORT_SYMBOL(cnss_wlan_hw_enable);
5126 
5127 int cnss_set_wfc_mode(struct device *dev, struct cnss_wfc_cfg cfg)
5128 {
5129 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
5130 	int ret = 0;
5131 
5132 	if (!plat_priv)
5133 		return -ENODEV;
5134 
5135 	/* If IMS server is connected, return success without QMI send */
5136 	if (test_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state)) {
5137 		cnss_pr_dbg("Ignore host request as IMS server is connected");
5138 		return ret;
5139 	}
5140 
5141 	ret = cnss_wlfw_send_host_wfc_call_status(plat_priv, cfg);
5142 
5143 	return ret;
5144 }
5145 EXPORT_SYMBOL(cnss_set_wfc_mode);
5146 
5147 static int cnss_tcdev_get_max_state(struct thermal_cooling_device *tcdev,
5148 				    unsigned long *thermal_state)
5149 {
5150 	struct cnss_thermal_cdev *cnss_tcdev = NULL;
5151 
5152 	if (!tcdev || !tcdev->devdata) {
5153 		cnss_pr_err("tcdev or tcdev->devdata is null!\n");
5154 		return -EINVAL;
5155 	}
5156 
5157 	cnss_tcdev = tcdev->devdata;
5158 	*thermal_state = cnss_tcdev->max_thermal_state;
5159 
5160 	return 0;
5161 }
5162 
5163 static int cnss_tcdev_get_cur_state(struct thermal_cooling_device *tcdev,
5164 				    unsigned long *thermal_state)
5165 {
5166 	struct cnss_thermal_cdev *cnss_tcdev = NULL;
5167 
5168 	if (!tcdev || !tcdev->devdata) {
5169 		cnss_pr_err("tcdev or tcdev->devdata is null!\n");
5170 		return -EINVAL;
5171 	}
5172 
5173 	cnss_tcdev = tcdev->devdata;
5174 	*thermal_state = cnss_tcdev->curr_thermal_state;
5175 
5176 	return 0;
5177 }
5178 
5179 static int cnss_tcdev_set_cur_state(struct thermal_cooling_device *tcdev,
5180 				    unsigned long thermal_state)
5181 {
5182 	struct cnss_thermal_cdev *cnss_tcdev = NULL;
5183 	struct cnss_plat_data *plat_priv =  cnss_get_plat_priv(NULL);
5184 	int ret = 0;
5185 
5186 	if (!tcdev || !tcdev->devdata) {
5187 		cnss_pr_err("tcdev or tcdev->devdata is null!\n");
5188 		return -EINVAL;
5189 	}
5190 
5191 	cnss_tcdev = tcdev->devdata;
5192 
5193 	if (thermal_state > cnss_tcdev->max_thermal_state)
5194 		return -EINVAL;
5195 
5196 	cnss_pr_vdbg("Cooling device set current state: %ld,for cdev id %d",
5197 		     thermal_state, cnss_tcdev->tcdev_id);
5198 
5199 	mutex_lock(&plat_priv->tcdev_lock);
5200 	ret = cnss_bus_set_therm_cdev_state(plat_priv,
5201 					    thermal_state,
5202 					    cnss_tcdev->tcdev_id);
5203 	if (!ret)
5204 		cnss_tcdev->curr_thermal_state = thermal_state;
5205 	mutex_unlock(&plat_priv->tcdev_lock);
5206 	if (ret) {
5207 		cnss_pr_err("Setting Current Thermal State Failed: %d,for cdev id %d",
5208 			    ret, cnss_tcdev->tcdev_id);
5209 		return ret;
5210 	}
5211 
5212 	return 0;
5213 }
5214 
5215 static struct thermal_cooling_device_ops cnss_cooling_ops = {
5216 	.get_max_state = cnss_tcdev_get_max_state,
5217 	.get_cur_state = cnss_tcdev_get_cur_state,
5218 	.set_cur_state = cnss_tcdev_set_cur_state,
5219 };
5220 
5221 int cnss_thermal_cdev_register(struct device *dev, unsigned long max_state,
5222 			       int tcdev_id)
5223 {
5224 	struct cnss_plat_data *priv = cnss_get_plat_priv(NULL);
5225 	struct cnss_thermal_cdev *cnss_tcdev = NULL;
5226 	char cdev_node_name[THERMAL_NAME_LENGTH] = "";
5227 	struct device_node *dev_node;
5228 	int ret = 0;
5229 
5230 	if (!priv) {
5231 		cnss_pr_err("Platform driver is not initialized!\n");
5232 		return -ENODEV;
5233 	}
5234 
5235 	cnss_tcdev = kzalloc(sizeof(*cnss_tcdev), GFP_KERNEL);
5236 	if (!cnss_tcdev) {
5237 		cnss_pr_err("Failed to allocate cnss_tcdev object!\n");
5238 		return -ENOMEM;
5239 	}
5240 
5241 	cnss_tcdev->tcdev_id = tcdev_id;
5242 	cnss_tcdev->max_thermal_state = max_state;
5243 
5244 	snprintf(cdev_node_name, THERMAL_NAME_LENGTH,
5245 		 "qcom,cnss_cdev%d", tcdev_id);
5246 
5247 	dev_node = of_find_node_by_name(NULL, cdev_node_name);
5248 	if (!dev_node) {
5249 		cnss_pr_err("Failed to get cooling device node\n");
5250 		kfree(cnss_tcdev);
5251 		return -EINVAL;
5252 	}
5253 
5254 	cnss_pr_dbg("tcdev node->name=%s\n", dev_node->name);
5255 
5256 	if (of_find_property(dev_node, "#cooling-cells", NULL)) {
5257 		cnss_tcdev->tcdev = thermal_of_cooling_device_register(dev_node,
5258 								       cdev_node_name,
5259 								       cnss_tcdev,
5260 								       &cnss_cooling_ops);
5261 		if (IS_ERR_OR_NULL(cnss_tcdev->tcdev)) {
5262 			ret = PTR_ERR(cnss_tcdev->tcdev);
5263 			cnss_pr_err("Cooling device register failed: %d, for cdev id %d\n",
5264 				    ret, cnss_tcdev->tcdev_id);
5265 			kfree(cnss_tcdev);
5266 		} else {
5267 			cnss_pr_dbg("Cooling device registered for cdev id %d",
5268 				    cnss_tcdev->tcdev_id);
5269 			mutex_lock(&priv->tcdev_lock);
5270 			list_add(&cnss_tcdev->tcdev_list,
5271 				 &priv->cnss_tcdev_list);
5272 			mutex_unlock(&priv->tcdev_lock);
5273 		}
5274 	} else {
5275 		cnss_pr_dbg("Cooling device registration not supported");
5276 		kfree(cnss_tcdev);
5277 		ret = -EOPNOTSUPP;
5278 	}
5279 
5280 	return ret;
5281 }
5282 EXPORT_SYMBOL(cnss_thermal_cdev_register);
5283 
5284 void cnss_thermal_cdev_unregister(struct device *dev, int tcdev_id)
5285 {
5286 	struct cnss_plat_data *priv = cnss_get_plat_priv(NULL);
5287 	struct cnss_thermal_cdev *cnss_tcdev = NULL;
5288 
5289 	if (!priv) {
5290 		cnss_pr_err("Platform driver is not initialized!\n");
5291 		return;
5292 	}
5293 
5294 	mutex_lock(&priv->tcdev_lock);
5295 	while (!list_empty(&priv->cnss_tcdev_list)) {
5296 		cnss_tcdev = list_first_entry(&priv->cnss_tcdev_list,
5297 					      struct cnss_thermal_cdev,
5298 					      tcdev_list);
5299 		thermal_cooling_device_unregister(cnss_tcdev->tcdev);
5300 		list_del(&cnss_tcdev->tcdev_list);
5301 		kfree(cnss_tcdev);
5302 	}
5303 	mutex_unlock(&priv->tcdev_lock);
5304 }
5305 EXPORT_SYMBOL(cnss_thermal_cdev_unregister);
5306 
5307 int cnss_get_curr_therm_cdev_state(struct device *dev,
5308 				   unsigned long *thermal_state,
5309 				   int tcdev_id)
5310 {
5311 	struct cnss_plat_data *priv = cnss_get_plat_priv(NULL);
5312 	struct cnss_thermal_cdev *cnss_tcdev = NULL;
5313 
5314 	if (!priv) {
5315 		cnss_pr_err("Platform driver is not initialized!\n");
5316 		return -ENODEV;
5317 	}
5318 
5319 	mutex_lock(&priv->tcdev_lock);
5320 	list_for_each_entry(cnss_tcdev, &priv->cnss_tcdev_list, tcdev_list) {
5321 		if (cnss_tcdev->tcdev_id != tcdev_id)
5322 			continue;
5323 
5324 		*thermal_state = cnss_tcdev->curr_thermal_state;
5325 		mutex_unlock(&priv->tcdev_lock);
5326 		cnss_pr_dbg("Cooling device current state: %ld, for cdev id %d",
5327 			    cnss_tcdev->curr_thermal_state, tcdev_id);
5328 		return 0;
5329 	}
5330 	mutex_unlock(&priv->tcdev_lock);
5331 	cnss_pr_dbg("Cooling device ID not found: %d", tcdev_id);
5332 	return -EINVAL;
5333 }
5334 EXPORT_SYMBOL(cnss_get_curr_therm_cdev_state);
5335 
5336 static int cnss_probe(struct platform_device *plat_dev)
5337 {
5338 	int ret = 0;
5339 	struct cnss_plat_data *plat_priv;
5340 	const struct of_device_id *of_id;
5341 	const struct platform_device_id *device_id;
5342 
5343 	if (cnss_get_plat_priv(plat_dev)) {
5344 		cnss_pr_err("Driver is already initialized!\n");
5345 		ret = -EEXIST;
5346 		goto out;
5347 	}
5348 
5349 	ret = cnss_plat_env_available();
5350 	if (ret)
5351 		goto out;
5352 
5353 	of_id = of_match_device(cnss_of_match_table, &plat_dev->dev);
5354 	if (!of_id || !of_id->data) {
5355 		cnss_pr_err("Failed to find of match device!\n");
5356 		ret = -ENODEV;
5357 		goto out;
5358 	}
5359 
5360 	device_id = of_id->data;
5361 
5362 	plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv),
5363 				 GFP_KERNEL);
5364 	if (!plat_priv) {
5365 		ret = -ENOMEM;
5366 		goto out;
5367 	}
5368 
5369 	plat_priv->plat_dev = plat_dev;
5370 	plat_priv->dev_node = NULL;
5371 	plat_priv->device_id = device_id->driver_data;
5372 	plat_priv->dt_type = cnss_dt_type(plat_priv);
5373 	cnss_pr_dbg("Probing platform driver from dt type: %d\n",
5374 		    plat_priv->dt_type);
5375 
5376 	plat_priv->use_fw_path_with_prefix =
5377 		cnss_use_fw_path_with_prefix(plat_priv);
5378 
5379 	ret = cnss_get_dev_cfg_node(plat_priv);
5380 	if (ret) {
5381 		cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
5382 		goto reset_plat_dev;
5383 	}
5384 
5385 	cnss_initialize_mem_pool(plat_priv->device_id);
5386 
5387 	ret = cnss_get_pld_bus_ops_name(plat_priv);
5388 	if (ret)
5389 		cnss_pr_vdbg("Failed to find bus ops name, err = %d\n",
5390 			     ret);
5391 
5392 	ret = cnss_get_rc_num(plat_priv);
5393 
5394 	if (ret)
5395 		cnss_pr_err("Failed to find PCIe RC number, err = %d\n", ret);
5396 
5397 	cnss_pr_dbg("rc_num=%d\n", plat_priv->rc_num);
5398 
5399 	plat_priv->bus_type = cnss_get_bus_type(plat_priv);
5400 	plat_priv->use_nv_mac = cnss_use_nv_mac(plat_priv);
5401 	plat_priv->driver_mode = CNSS_DRIVER_MODE_MAX;
5402 	cnss_set_plat_priv(plat_dev, plat_priv);
5403 	cnss_set_device_name(plat_priv);
5404 	platform_set_drvdata(plat_dev, plat_priv);
5405 	INIT_LIST_HEAD(&plat_priv->vreg_list);
5406 	INIT_LIST_HEAD(&plat_priv->clk_list);
5407 
5408 	cnss_get_pm_domain_info(plat_priv);
5409 	cnss_get_wlaon_pwr_ctrl_info(plat_priv);
5410 	cnss_power_misc_params_init(plat_priv);
5411 	cnss_get_tcs_info(plat_priv);
5412 	cnss_get_cpr_info(plat_priv);
5413 	cnss_aop_interface_init(plat_priv);
5414 	cnss_init_control_params(plat_priv);
5415 
5416 	ret = cnss_get_resources(plat_priv);
5417 	if (ret)
5418 		goto reset_ctx;
5419 
5420 	ret = cnss_register_esoc(plat_priv);
5421 	if (ret)
5422 		goto free_res;
5423 
5424 	ret = cnss_register_bus_scale(plat_priv);
5425 	if (ret)
5426 		goto unreg_esoc;
5427 
5428 	ret = cnss_create_sysfs(plat_priv);
5429 	if (ret)
5430 		goto unreg_bus_scale;
5431 
5432 	ret = cnss_event_work_init(plat_priv);
5433 	if (ret)
5434 		goto remove_sysfs;
5435 
5436 	ret = cnss_dms_init(plat_priv);
5437 	if (ret)
5438 		goto deinit_event_work;
5439 
5440 	ret = cnss_debugfs_create(plat_priv);
5441 	if (ret)
5442 		goto deinit_dms;
5443 
5444 	ret = cnss_misc_init(plat_priv);
5445 	if (ret)
5446 		goto destroy_debugfs;
5447 
5448 	ret = cnss_wlan_hw_disable_check(plat_priv);
5449 	if (ret)
5450 		goto deinit_misc;
5451 
5452 	/* Make sure all platform related init are done before
5453 	 * device power on and bus init.
5454 	 */
5455 	if (!test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
5456 		ret = cnss_wlan_device_init(plat_priv);
5457 		if (ret)
5458 			goto deinit_misc;
5459 	} else {
5460 		cnss_pr_info("WLAN HW Disabled. Defer PCI enumeration\n");
5461 	}
5462 	cnss_register_coex_service(plat_priv);
5463 	cnss_register_ims_service(plat_priv);
5464 
5465 	mutex_init(&plat_priv->tcdev_lock);
5466 	INIT_LIST_HEAD(&plat_priv->cnss_tcdev_list);
5467 
5468 	cnss_pr_info("Platform driver probed successfully.\n");
5469 
5470 	return 0;
5471 
5472 deinit_misc:
5473 	cnss_misc_deinit(plat_priv);
5474 destroy_debugfs:
5475 	cnss_debugfs_destroy(plat_priv);
5476 deinit_dms:
5477 	cnss_dms_deinit(plat_priv);
5478 deinit_event_work:
5479 	cnss_event_work_deinit(plat_priv);
5480 remove_sysfs:
5481 	cnss_remove_sysfs(plat_priv);
5482 unreg_bus_scale:
5483 	cnss_unregister_bus_scale(plat_priv);
5484 unreg_esoc:
5485 	cnss_unregister_esoc(plat_priv);
5486 free_res:
5487 	cnss_put_resources(plat_priv);
5488 reset_ctx:
5489 	cnss_aop_interface_deinit(plat_priv);
5490 	platform_set_drvdata(plat_dev, NULL);
5491 	cnss_deinitialize_mem_pool();
5492 reset_plat_dev:
5493 	cnss_clear_plat_priv(plat_priv);
5494 out:
5495 	return ret;
5496 }
5497 
5498 static int cnss_remove(struct platform_device *plat_dev)
5499 {
5500 	struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
5501 
5502 	plat_priv->audio_iommu_domain = NULL;
5503 	cnss_genl_exit();
5504 	cnss_unregister_ims_service(plat_priv);
5505 	cnss_unregister_coex_service(plat_priv);
5506 	cnss_bus_deinit(plat_priv);
5507 	cnss_misc_deinit(plat_priv);
5508 	cnss_debugfs_destroy(plat_priv);
5509 	cnss_dms_deinit(plat_priv);
5510 	cnss_qmi_deinit(plat_priv);
5511 	cnss_event_work_deinit(plat_priv);
5512 	cnss_cancel_dms_work();
5513 	cnss_remove_sysfs(plat_priv);
5514 	cnss_unregister_bus_scale(plat_priv);
5515 	cnss_unregister_esoc(plat_priv);
5516 	cnss_put_resources(plat_priv);
5517 	cnss_aop_interface_deinit(plat_priv);
5518 	cnss_deinitialize_mem_pool();
5519 	platform_set_drvdata(plat_dev, NULL);
5520 	cnss_clear_plat_priv(plat_priv);
5521 
5522 	return 0;
5523 }
5524 
5525 static struct platform_driver cnss_platform_driver = {
5526 	.probe  = cnss_probe,
5527 	.remove = cnss_remove,
5528 	.driver = {
5529 		.name = "cnss2",
5530 		.of_match_table = cnss_of_match_table,
5531 #ifdef CONFIG_CNSS_ASYNC
5532 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
5533 #endif
5534 	},
5535 };
5536 
5537 static bool cnss_check_compatible_node(void)
5538 {
5539 	struct device_node *dn = NULL;
5540 
5541 	for_each_matching_node(dn, cnss_of_match_table) {
5542 		if (of_device_is_available(dn)) {
5543 			cnss_allow_driver_loading = true;
5544 			return true;
5545 		}
5546 	}
5547 
5548 	return false;
5549 }
5550 
5551 /**
5552  * cnss_is_valid_dt_node_found - Check if valid device tree node present
5553  *
5554  * Valid device tree node means a node with "compatible" property from the
5555  * device match table and "status" property is not disabled.
5556  *
5557  * Return: true if valid device tree node found, false if not found
5558  */
5559 static bool cnss_is_valid_dt_node_found(void)
5560 {
5561 	struct device_node *dn = NULL;
5562 
5563 	for_each_matching_node(dn, cnss_of_match_table) {
5564 		if (of_device_is_available(dn))
5565 			break;
5566 	}
5567 
5568 	if (dn)
5569 		return true;
5570 
5571 	return false;
5572 }
5573 
5574 static int __init cnss_initialize(void)
5575 {
5576 	int ret = 0;
5577 
5578 	if (!cnss_is_valid_dt_node_found())
5579 		return -ENODEV;
5580 
5581 	if (!cnss_check_compatible_node())
5582 		return ret;
5583 
5584 	cnss_debug_init();
5585 	ret = platform_driver_register(&cnss_platform_driver);
5586 	if (ret)
5587 		cnss_debug_deinit();
5588 
5589 	ret = cnss_genl_init();
5590 	if (ret < 0)
5591 		cnss_pr_err("CNSS genl init failed %d\n", ret);
5592 
5593 	return ret;
5594 }
5595 
5596 static void __exit cnss_exit(void)
5597 {
5598 	cnss_genl_exit();
5599 	platform_driver_unregister(&cnss_platform_driver);
5600 	cnss_debug_deinit();
5601 }
5602 
5603 module_init(cnss_initialize);
5604 module_exit(cnss_exit);
5605 
5606 MODULE_LICENSE("GPL v2");
5607 MODULE_DESCRIPTION("CNSS2 Platform Driver");
5608