xref: /wlan-dirver/platform/cnss2/pci.c (revision 87ee900083e70197b9b730006c66e66df9b052a8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/completion.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/memblock.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/suspend.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 
20 #include "main.h"
21 #include "bus.h"
22 #include "debug.h"
23 #include "pci.h"
24 #include "pci_platform.h"
25 #include "reg.h"
26 
27 #define PCI_LINK_UP			1
28 #define PCI_LINK_DOWN			0
29 
30 #define SAVE_PCI_CONFIG_SPACE		1
31 #define RESTORE_PCI_CONFIG_SPACE	0
32 
33 #define PCI_BAR_NUM			0
34 #define PCI_INVALID_READ(val)		((val) == U32_MAX)
35 
36 #define PCI_DMA_MASK_32_BIT		DMA_BIT_MASK(32)
37 #define PCI_DMA_MASK_36_BIT		DMA_BIT_MASK(36)
38 #define PCI_DMA_MASK_64_BIT		DMA_BIT_MASK(64)
39 
40 #define MHI_NODE_NAME			"qcom,mhi"
41 #define MHI_MSI_NAME			"MHI"
42 
43 #define QCA6390_PATH_PREFIX		"qca6390/"
44 #define QCA6490_PATH_PREFIX		"qca6490/"
45 #define QCN7605_PATH_PREFIX             "qcn7605/"
46 #define KIWI_PATH_PREFIX		"kiwi/"
47 #define MANGO_PATH_PREFIX		"mango/"
48 #define PEACH_PATH_PREFIX		"peach/"
49 #define DEFAULT_PHY_M3_FILE_NAME	"m3.bin"
50 #define DEFAULT_AUX_FILE_NAME		"aux_ucode.elf"
51 #define DEFAULT_PHY_UCODE_FILE_NAME	"phy_ucode.elf"
52 #define TME_PATCH_FILE_NAME		"tmel_patch.elf"
53 #define PHY_UCODE_V2_FILE_NAME		"phy_ucode20.elf"
54 #define DEFAULT_FW_FILE_NAME		"amss.bin"
55 #define FW_V2_FILE_NAME			"amss20.bin"
56 #define FW_V2_FTM_FILE_NAME		"amss20_ftm.bin"
57 #define DEVICE_MAJOR_VERSION_MASK	0xF
58 
59 #define WAKE_MSI_NAME			"WAKE"
60 
61 #define DEV_RDDM_TIMEOUT		5000
62 #define WAKE_EVENT_TIMEOUT		5000
63 
64 #ifdef CONFIG_CNSS_EMULATION
65 #define EMULATION_HW			1
66 #else
67 #define EMULATION_HW			0
68 #endif
69 
70 #define RAMDUMP_SIZE_DEFAULT		0x420000
71 #define CNSS_256KB_SIZE			0x40000
72 #define DEVICE_RDDM_COOKIE		0xCAFECACE
73 
74 static bool cnss_driver_registered;
75 
76 static DEFINE_SPINLOCK(pci_link_down_lock);
77 static DEFINE_SPINLOCK(pci_reg_window_lock);
78 static DEFINE_SPINLOCK(time_sync_lock);
79 
80 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
81 #define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
82 
83 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US	1000
84 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US	2000
85 
86 #define FORCE_WAKE_DELAY_MIN_US			4000
87 #define FORCE_WAKE_DELAY_MAX_US			6000
88 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
89 
90 #define REG_RETRY_MAX_TIMES		3
91 
92 #define MHI_SUSPEND_RETRY_MAX_TIMES		3
93 #define MHI_SUSPEND_RETRY_DELAY_US		5000
94 
95 #define BOOT_DEBUG_TIMEOUT_MS			7000
96 
97 #define HANG_DATA_LENGTH		384
98 #define HST_HANG_DATA_OFFSET		((3 * 1024 * 1024) - HANG_DATA_LENGTH)
99 #define HSP_HANG_DATA_OFFSET		((2 * 1024 * 1024) - HANG_DATA_LENGTH)
100 
101 #define AFC_SLOT_SIZE                   0x1000
102 #define AFC_MAX_SLOT                    2
103 #define AFC_MEM_SIZE                    (AFC_SLOT_SIZE * AFC_MAX_SLOT)
104 #define AFC_AUTH_STATUS_OFFSET          1
105 #define AFC_AUTH_SUCCESS                1
106 #define AFC_AUTH_ERROR                  0
107 
108 static const struct mhi_channel_config cnss_mhi_channels[] = {
109 	{
110 		.num = 0,
111 		.name = "LOOPBACK",
112 		.num_elements = 32,
113 		.event_ring = 1,
114 		.dir = DMA_TO_DEVICE,
115 		.ee_mask = 0x4,
116 		.pollcfg = 0,
117 		.doorbell = MHI_DB_BRST_DISABLE,
118 		.lpm_notify = false,
119 		.offload_channel = false,
120 		.doorbell_mode_switch = false,
121 		.auto_queue = false,
122 	},
123 	{
124 		.num = 1,
125 		.name = "LOOPBACK",
126 		.num_elements = 32,
127 		.event_ring = 1,
128 		.dir = DMA_FROM_DEVICE,
129 		.ee_mask = 0x4,
130 		.pollcfg = 0,
131 		.doorbell = MHI_DB_BRST_DISABLE,
132 		.lpm_notify = false,
133 		.offload_channel = false,
134 		.doorbell_mode_switch = false,
135 		.auto_queue = false,
136 	},
137 	{
138 		.num = 4,
139 		.name = "DIAG",
140 		.num_elements = 64,
141 		.event_ring = 1,
142 		.dir = DMA_TO_DEVICE,
143 		.ee_mask = 0x4,
144 		.pollcfg = 0,
145 		.doorbell = MHI_DB_BRST_DISABLE,
146 		.lpm_notify = false,
147 		.offload_channel = false,
148 		.doorbell_mode_switch = false,
149 		.auto_queue = false,
150 	},
151 	{
152 		.num = 5,
153 		.name = "DIAG",
154 		.num_elements = 64,
155 		.event_ring = 1,
156 		.dir = DMA_FROM_DEVICE,
157 		.ee_mask = 0x4,
158 		.pollcfg = 0,
159 		.doorbell = MHI_DB_BRST_DISABLE,
160 		.lpm_notify = false,
161 		.offload_channel = false,
162 		.doorbell_mode_switch = false,
163 		.auto_queue = false,
164 	},
165 	{
166 		.num = 20,
167 		.name = "IPCR",
168 		.num_elements = 64,
169 		.event_ring = 1,
170 		.dir = DMA_TO_DEVICE,
171 		.ee_mask = 0x4,
172 		.pollcfg = 0,
173 		.doorbell = MHI_DB_BRST_DISABLE,
174 		.lpm_notify = false,
175 		.offload_channel = false,
176 		.doorbell_mode_switch = false,
177 		.auto_queue = false,
178 	},
179 	{
180 		.num = 21,
181 		.name = "IPCR",
182 		.num_elements = 64,
183 		.event_ring = 1,
184 		.dir = DMA_FROM_DEVICE,
185 		.ee_mask = 0x4,
186 		.pollcfg = 0,
187 		.doorbell = MHI_DB_BRST_DISABLE,
188 		.lpm_notify = false,
189 		.offload_channel = false,
190 		.doorbell_mode_switch = false,
191 		.auto_queue = true,
192 	},
193 /* All MHI satellite config to be at the end of data struct */
194 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
195 	{
196 		.num = 50,
197 		.name = "ADSP_0",
198 		.num_elements = 64,
199 		.event_ring = 3,
200 		.dir = DMA_BIDIRECTIONAL,
201 		.ee_mask = 0x4,
202 		.pollcfg = 0,
203 		.doorbell = MHI_DB_BRST_DISABLE,
204 		.lpm_notify = false,
205 		.offload_channel = true,
206 		.doorbell_mode_switch = false,
207 		.auto_queue = false,
208 	},
209 	{
210 		.num = 51,
211 		.name = "ADSP_1",
212 		.num_elements = 64,
213 		.event_ring = 3,
214 		.dir = DMA_BIDIRECTIONAL,
215 		.ee_mask = 0x4,
216 		.pollcfg = 0,
217 		.doorbell = MHI_DB_BRST_DISABLE,
218 		.lpm_notify = false,
219 		.offload_channel = true,
220 		.doorbell_mode_switch = false,
221 		.auto_queue = false,
222 	},
223 	{
224 		.num = 70,
225 		.name = "ADSP_2",
226 		.num_elements = 64,
227 		.event_ring = 3,
228 		.dir = DMA_BIDIRECTIONAL,
229 		.ee_mask = 0x4,
230 		.pollcfg = 0,
231 		.doorbell = MHI_DB_BRST_DISABLE,
232 		.lpm_notify = false,
233 		.offload_channel = true,
234 		.doorbell_mode_switch = false,
235 		.auto_queue = false,
236 	},
237 	{
238 		.num = 71,
239 		.name = "ADSP_3",
240 		.num_elements = 64,
241 		.event_ring = 3,
242 		.dir = DMA_BIDIRECTIONAL,
243 		.ee_mask = 0x4,
244 		.pollcfg = 0,
245 		.doorbell = MHI_DB_BRST_DISABLE,
246 		.lpm_notify = false,
247 		.offload_channel = true,
248 		.doorbell_mode_switch = false,
249 		.auto_queue = false,
250 	},
251 #endif
252 };
253 
254 static const struct mhi_channel_config cnss_mhi_channels_genoa[] = {
255 	{
256 		.num = 0,
257 		.name = "LOOPBACK",
258 		.num_elements = 32,
259 		.event_ring = 1,
260 		.dir = DMA_TO_DEVICE,
261 		.ee_mask = 0x4,
262 		.pollcfg = 0,
263 		.doorbell = MHI_DB_BRST_DISABLE,
264 		.lpm_notify = false,
265 		.offload_channel = false,
266 		.doorbell_mode_switch = false,
267 		.auto_queue = false,
268 	},
269 	{
270 		.num = 1,
271 		.name = "LOOPBACK",
272 		.num_elements = 32,
273 		.event_ring = 1,
274 		.dir = DMA_FROM_DEVICE,
275 		.ee_mask = 0x4,
276 		.pollcfg = 0,
277 		.doorbell = MHI_DB_BRST_DISABLE,
278 		.lpm_notify = false,
279 		.offload_channel = false,
280 		.doorbell_mode_switch = false,
281 		.auto_queue = false,
282 	},
283 	{
284 		.num = 4,
285 		.name = "DIAG",
286 		.num_elements = 64,
287 		.event_ring = 1,
288 		.dir = DMA_TO_DEVICE,
289 		.ee_mask = 0x4,
290 		.pollcfg = 0,
291 		.doorbell = MHI_DB_BRST_DISABLE,
292 		.lpm_notify = false,
293 		.offload_channel = false,
294 		.doorbell_mode_switch = false,
295 		.auto_queue = false,
296 	},
297 	{
298 		.num = 5,
299 		.name = "DIAG",
300 		.num_elements = 64,
301 		.event_ring = 1,
302 		.dir = DMA_FROM_DEVICE,
303 		.ee_mask = 0x4,
304 		.pollcfg = 0,
305 		.doorbell = MHI_DB_BRST_DISABLE,
306 		.lpm_notify = false,
307 		.offload_channel = false,
308 		.doorbell_mode_switch = false,
309 		.auto_queue = false,
310 	},
311 	{
312 		.num = 16,
313 		.name = "IPCR",
314 		.num_elements = 64,
315 		.event_ring = 1,
316 		.dir = DMA_TO_DEVICE,
317 		.ee_mask = 0x4,
318 		.pollcfg = 0,
319 		.doorbell = MHI_DB_BRST_DISABLE,
320 		.lpm_notify = false,
321 		.offload_channel = false,
322 		.doorbell_mode_switch = false,
323 		.auto_queue = false,
324 	},
325 	{
326 		.num = 17,
327 		.name = "IPCR",
328 		.num_elements = 64,
329 		.event_ring = 1,
330 		.dir = DMA_FROM_DEVICE,
331 		.ee_mask = 0x4,
332 		.pollcfg = 0,
333 		.doorbell = MHI_DB_BRST_DISABLE,
334 		.lpm_notify = false,
335 		.offload_channel = false,
336 		.doorbell_mode_switch = false,
337 		.auto_queue = true,
338 	},
339 };
340 
341 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
342 static struct mhi_event_config cnss_mhi_events[] = {
343 #else
344 static const struct mhi_event_config cnss_mhi_events[] = {
345 #endif
346 	{
347 		.num_elements = 32,
348 		.irq_moderation_ms = 0,
349 		.irq = 1,
350 		.mode = MHI_DB_BRST_DISABLE,
351 		.data_type = MHI_ER_CTRL,
352 		.priority = 0,
353 		.hardware_event = false,
354 		.client_managed = false,
355 		.offload_channel = false,
356 	},
357 	{
358 		.num_elements = 256,
359 		.irq_moderation_ms = 0,
360 		.irq = 2,
361 		.mode = MHI_DB_BRST_DISABLE,
362 		.priority = 1,
363 		.hardware_event = false,
364 		.client_managed = false,
365 		.offload_channel = false,
366 	},
367 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
368 	{
369 		.num_elements = 32,
370 		.irq_moderation_ms = 0,
371 		.irq = 1,
372 		.mode = MHI_DB_BRST_DISABLE,
373 		.data_type = MHI_ER_BW_SCALE,
374 		.priority = 2,
375 		.hardware_event = false,
376 		.client_managed = false,
377 		.offload_channel = false,
378 	},
379 #endif
380 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
381 	{
382 		.num_elements = 256,
383 		.irq_moderation_ms = 0,
384 		.irq = 2,
385 		.mode = MHI_DB_BRST_DISABLE,
386 		.data_type = MHI_ER_DATA,
387 		.priority = 1,
388 		.hardware_event = false,
389 		.client_managed = true,
390 		.offload_channel = true,
391 	},
392 #endif
393 };
394 
395 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
396 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
397 #define CNSS_MHI_SATELLITE_EVT_COUNT 1
398 #else
399 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
400 #define CNSS_MHI_SATELLITE_EVT_COUNT 0
401 #endif
402 
403 static const struct mhi_controller_config cnss_mhi_config_default = {
404 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
405 	.max_channels = 72,
406 #else
407 	.max_channels = 32,
408 #endif
409 	.timeout_ms = 10000,
410 	.use_bounce_buf = false,
411 	.buf_len = 0x8000,
412 	.num_channels = ARRAY_SIZE(cnss_mhi_channels),
413 	.ch_cfg = cnss_mhi_channels,
414 	.num_events = ARRAY_SIZE(cnss_mhi_events),
415 	.event_cfg = cnss_mhi_events,
416 	.m2_no_db = true,
417 };
418 
419 static const struct mhi_controller_config cnss_mhi_config_genoa = {
420 	.max_channels = 32,
421 	.timeout_ms = 10000,
422 	.use_bounce_buf = false,
423 	.buf_len = 0x8000,
424 	.num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa),
425 	.ch_cfg = cnss_mhi_channels_genoa,
426 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
427 		CNSS_MHI_SATELLITE_EVT_COUNT,
428 	.event_cfg = cnss_mhi_events,
429 	.m2_no_db = true,
430 	.bhie_offset = 0x0324,
431 };
432 
433 static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
434 	.max_channels = 32,
435 	.timeout_ms = 10000,
436 	.use_bounce_buf = false,
437 	.buf_len = 0x8000,
438 	.num_channels = ARRAY_SIZE(cnss_mhi_channels) -
439 			CNSS_MHI_SATELLITE_CH_CFG_COUNT,
440 	.ch_cfg = cnss_mhi_channels,
441 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
442 			CNSS_MHI_SATELLITE_EVT_COUNT,
443 	.event_cfg = cnss_mhi_events,
444 	.m2_no_db = true,
445 };
446 
447 static struct cnss_pci_reg ce_src[] = {
448 	{ "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
449 	{ "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
450 	{ "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
451 	{ "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
452 	{ "SRC_CTRL", CE_SRC_CTRL_OFFSET },
453 	{ "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
454 	{ "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
455 	{ "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
456 	{ NULL },
457 };
458 
459 static struct cnss_pci_reg ce_dst[] = {
460 	{ "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
461 	{ "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
462 	{ "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
463 	{ "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
464 	{ "DEST_CTRL", CE_DEST_CTRL_OFFSET },
465 	{ "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
466 	{ "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
467 	{ "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
468 	{ "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
469 	{ "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
470 	{ "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
471 	{ "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
472 	{ "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
473 	{ "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
474 	{ "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
475 	{ NULL },
476 };
477 
478 static struct cnss_pci_reg ce_cmn[] = {
479 	{ "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
480 	{ "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
481 	{ "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
482 	{ "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
483 	{ "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
484 	{ NULL },
485 };
486 
487 static struct cnss_pci_reg qdss_csr[] = {
488 	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
489 	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
490 	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
491 	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
492 	{ NULL },
493 };
494 
495 static struct cnss_pci_reg pci_scratch[] = {
496 	{ "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
497 	{ "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
498 	{ "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
499 	{ NULL },
500 };
501 
502 /* First field of the structure is the device bit mask. Use
503  * enum cnss_pci_reg_mask as reference for the value.
504  */
505 static struct cnss_misc_reg wcss_reg_access_seq[] = {
506 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
507 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
508 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
509 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
510 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
511 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
512 	{1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
513 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
514 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
515 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
516 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
517 	{1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
518 	{1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
519 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
520 	{1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
521 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
522 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
523 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
524 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
525 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
526 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
527 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
528 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
529 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
530 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
531 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
532 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
533 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
534 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
535 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
536 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
537 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
538 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
539 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
540 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
541 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
542 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
543 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
544 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
545 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
546 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
547 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
548 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
549 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
550 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
551 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
552 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
553 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
554 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
555 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
556 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
557 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
558 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
559 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
560 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
561 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
562 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
563 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
564 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
565 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
566 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
567 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
568 	{1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
569 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
570 };
571 
572 static struct cnss_misc_reg pcie_reg_access_seq[] = {
573 	{1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
574 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
575 	{1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
576 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
577 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
578 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
579 	{1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
580 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
581 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
582 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
583 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
584 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
585 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
586 	{1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
587 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
588 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
589 	{1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
590 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
591 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
592 	{1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
593 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
594 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
595 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
596 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
597 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
598 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
599 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
600 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
601 	{1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
602 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
603 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
604 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
605 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
606 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
607 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
608 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
609 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
610 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
611 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
612 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
613 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
614 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
615 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
616 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
617 	{1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
618 };
619 
620 static struct cnss_misc_reg wlaon_reg_access_seq[] = {
621 	{3, 0, WLAON_SOC_POWER_CTRL, 0},
622 	{3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
623 	{3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
624 	{3, 0, WLAON_SW_COLD_RESET, 0},
625 	{3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
626 	{3, 0, WLAON_GDSC_DELAY_SETTING, 0},
627 	{3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
628 	{3, 0, WLAON_WL_PWR_STATUS_REG, 0},
629 	{3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
630 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
631 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
632 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
633 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
634 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
635 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
636 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
637 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
638 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
639 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
640 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
641 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
642 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
643 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
644 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
645 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
646 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
647 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
648 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
649 	{2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
650 	{2, 0, WLAON_WL_AON_CXPC_REG, 0},
651 	{2, 0, WLAON_WL_AON_APM_STATUS0, 0},
652 	{2, 0, WLAON_WL_AON_APM_STATUS1, 0},
653 	{2, 0, WLAON_WL_AON_APM_STATUS2, 0},
654 	{2, 0, WLAON_WL_AON_APM_STATUS3, 0},
655 	{2, 0, WLAON_WL_AON_APM_STATUS4, 0},
656 	{2, 0, WLAON_WL_AON_APM_STATUS5, 0},
657 	{2, 0, WLAON_WL_AON_APM_STATUS6, 0},
658 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
659 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
660 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
661 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
662 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
663 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
664 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
665 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
666 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
667 	{3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
668 	{3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
669 	{3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
670 	{3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
671 	{3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
672 	{3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
673 	{3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
674 	{3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
675 	{3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
676 	{3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
677 	{3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
678 	{3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
679 	{3, 0, WLAON_QDSS_WCSS_REG, 0},
680 	{3, 0, WLAON_QDSS_WCSS_ACK, 0},
681 	{3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
682 	{3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
683 	{3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
684 	{3, 0, WLAON_DLY_CONFIG, 0},
685 	{3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
686 	{3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
687 	{3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
688 	{3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
689 	{3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
690 	{3, 0, WLAON_Q6_COOKIE_BIT, 0},
691 	{3, 0, WLAON_WARM_SW_ENTRY, 0},
692 	{3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
693 	{3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
694 	{3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
695 	{3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
696 	{3, 0, WLAON_DEBUG, 0},
697 	{3, 0, WLAON_SOC_PARAMETERS, 0},
698 	{3, 0, WLAON_WLPM_SIGNAL, 0},
699 	{3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
700 	{3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
701 	{3, 0, WLAON_PBL_STACK_CANARY, 0},
702 	{3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
703 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
704 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
705 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
706 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
707 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
708 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
709 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
710 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
711 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
712 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
713 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
714 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
715 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
716 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
717 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
718 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
719 	{3, 0, WLAON_MEM_CNT_SEL_REG, 0},
720 	{3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
721 	{3, 0, WLAON_MEM_DEBUG_REG, 0},
722 	{3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
723 	{3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
724 	{3, 0, WLAON_WL_AON_SPARE2, 0},
725 	{3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
726 	{3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
727 	{3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
728 	{3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
729 	{3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
730 	{3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
731 	{3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
732 	{3, 0, WLAON_POWERCTRL_PMU_REG, 0},
733 	{3, 0, WLAON_POWERCTRL_MEM_REG, 0},
734 	{3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
735 	{3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
736 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
737 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
738 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
739 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
740 	{3, 0, WLAON_MEM_SVS_CFG_REG, 0},
741 	{3, 0, WLAON_CMN_AON_MISC_REG, 0},
742 	{3, 0, WLAON_INTR_STATUS, 0},
743 	{2, 0, WLAON_INTR_ENABLE, 0},
744 	{2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
745 	{2, 0, WLAON_NOC_DBG_BUS_REG, 0},
746 	{2, 0, WLAON_WL_CTRL_MISC_REG, 0},
747 	{2, 0, WLAON_DBG_STATUS0, 0},
748 	{2, 0, WLAON_DBG_STATUS1, 0},
749 	{2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
750 	{2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
751 	{2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
752 };
753 
754 static struct cnss_misc_reg syspm_reg_access_seq[] = {
755 	{1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
756 	{1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
757 	{1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
758 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
759 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
760 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
761 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
762 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
763 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
764 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
765 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
766 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
767 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
768 };
769 
770 static struct cnss_print_optimize print_optimize;
771 
772 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
773 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
774 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
775 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
776 
777 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
778 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev);
779 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev);
780 
781 
782 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
783 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
784 {
785 	mhi_debug_reg_dump(pci_priv->mhi_ctrl);
786 }
787 
788 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
789 {
790 	mhi_dump_sfr(pci_priv->mhi_ctrl);
791 }
792 
793 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
794 				      u32 cookie)
795 {
796 	return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
797 }
798 
799 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
800 				    bool notify_clients)
801 {
802 	return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
803 }
804 
805 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
806 				   bool notify_clients)
807 {
808 	return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
809 }
810 
811 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
812 				       u32 timeout)
813 {
814 	return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
815 }
816 
817 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
818 					   int timeout_us, bool in_panic)
819 {
820 	return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
821 					  timeout_us, in_panic);
822 }
823 
824 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
825 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
826 {
827 	return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl);
828 }
829 #endif
830 
831 static void
832 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
833 				    int (*cb)(struct mhi_controller *mhi_ctrl,
834 					      struct mhi_link_info *link_info))
835 {
836 	mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
837 }
838 
839 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
840 {
841 	return mhi_force_reset(pci_priv->mhi_ctrl);
842 }
843 
844 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
845 				  phys_addr_t base)
846 {
847 	return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
848 }
849 #else
850 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
851 {
852 }
853 
854 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
855 {
856 }
857 
858 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
859 				      u32 cookie)
860 {
861 	return false;
862 }
863 
864 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
865 				    bool notify_clients)
866 {
867 	return -EOPNOTSUPP;
868 }
869 
870 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
871 				   bool notify_clients)
872 {
873 	return -EOPNOTSUPP;
874 }
875 
876 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
877 				       u32 timeout)
878 {
879 }
880 
881 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
882 					   int timeout_us, bool in_panic)
883 {
884 	return -EOPNOTSUPP;
885 }
886 
887 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
888 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
889 {
890 	return -EOPNOTSUPP;
891 }
892 #endif
893 
894 static void
895 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
896 				    int (*cb)(struct mhi_controller *mhi_ctrl,
897 					      struct mhi_link_info *link_info))
898 {
899 }
900 
901 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
902 {
903 	return -EOPNOTSUPP;
904 }
905 
906 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
907 				  phys_addr_t base)
908 {
909 }
910 #endif /* CONFIG_MHI_BUS_MISC */
911 
912 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
913 #define CNSS_MHI_WAKE_TIMEOUT		500000
914 
915 static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv,
916 					     enum cnss_smmu_fault_time id)
917 {
918 	if (id >= SMMU_CB_MAX)
919 		return;
920 
921 	pci_priv->smmu_fault_timestamp[id] = sched_clock();
922 }
923 
924 static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain,
925 					    void *handler_token)
926 {
927 	struct cnss_pci_data *pci_priv = handler_token;
928 	int ret = 0;
929 
930 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY);
931 	ret = cnss_mhi_device_get_sync_atomic(pci_priv,
932 					      CNSS_MHI_WAKE_TIMEOUT, true);
933 	if (ret < 0) {
934 		cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret);
935 		return;
936 	}
937 
938 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING);
939 	ret = cnss_mhi_host_notify_db_disable_trace(pci_priv);
940 	if (ret < 0)
941 		cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret);
942 
943 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT);
944 }
945 
946 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
947 {
948 	qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain,
949 					 cnss_pci_smmu_fault_handler_irq, pci_priv);
950 }
951 #else
952 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
953 {
954 }
955 #endif
956 
957 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
958 {
959 	u16 device_id;
960 
961 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
962 		cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
963 			    (void *)_RET_IP_);
964 		return -EACCES;
965 	}
966 
967 	if (pci_priv->pci_link_down_ind) {
968 		cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
969 		return -EIO;
970 	}
971 
972 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
973 	if (device_id != pci_priv->device_id)  {
974 		cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
975 			       (void *)_RET_IP_, device_id,
976 			       pci_priv->device_id);
977 		return -EIO;
978 	}
979 
980 	return 0;
981 }
982 
983 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
984 {
985 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
986 
987 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
988 	u32 window_enable = WINDOW_ENABLE_BIT | window;
989 	u32 val;
990 
991 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
992 		writel_relaxed(window_enable, pci_priv->bar +
993 			       PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
994 	} else {
995 		writel_relaxed(window_enable, pci_priv->bar +
996 			       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
997 	}
998 
999 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
1000 		window_enable = QCN7605_WINDOW_ENABLE_BIT | window;
1001 
1002 	if (window != pci_priv->remap_window) {
1003 		pci_priv->remap_window = window;
1004 		cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
1005 			    window_enable);
1006 	}
1007 
1008 	/* Read it back to make sure the write has taken effect */
1009 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
1010 		val = readl_relaxed(pci_priv->bar +
1011 			PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1012 	} else {
1013 		val = readl_relaxed(pci_priv->bar +
1014 			QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1015 	}
1016 	if (val != window_enable) {
1017 		cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
1018 			    window_enable, val);
1019 		if (!cnss_pci_check_link_status(pci_priv) &&
1020 		    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
1021 			CNSS_ASSERT(0);
1022 	}
1023 }
1024 
1025 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
1026 			     u32 offset, u32 *val)
1027 {
1028 	int ret;
1029 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1030 
1031 	if (!in_interrupt() && !irqs_disabled()) {
1032 		ret = cnss_pci_check_link_status(pci_priv);
1033 		if (ret)
1034 			return ret;
1035 	}
1036 
1037 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1038 	    offset < MAX_UNWINDOWED_ADDRESS) {
1039 		*val = readl_relaxed(pci_priv->bar + offset);
1040 		return 0;
1041 	}
1042 
1043 	/* If in panic, assumption is kernel panic handler will hold all threads
1044 	 * and interrupts. Further pci_reg_window_lock could be held before
1045 	 * panic. So only lock during normal operation.
1046 	 */
1047 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1048 		cnss_pci_select_window(pci_priv, offset);
1049 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1050 				     (offset & WINDOW_RANGE_MASK));
1051 	} else {
1052 		spin_lock_bh(&pci_reg_window_lock);
1053 		cnss_pci_select_window(pci_priv, offset);
1054 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1055 				     (offset & WINDOW_RANGE_MASK));
1056 		spin_unlock_bh(&pci_reg_window_lock);
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1063 			      u32 val)
1064 {
1065 	int ret;
1066 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1067 
1068 	if (!in_interrupt() && !irqs_disabled()) {
1069 		ret = cnss_pci_check_link_status(pci_priv);
1070 		if (ret)
1071 			return ret;
1072 	}
1073 
1074 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1075 	    offset < MAX_UNWINDOWED_ADDRESS) {
1076 		writel_relaxed(val, pci_priv->bar + offset);
1077 		return 0;
1078 	}
1079 
1080 	/* Same constraint as PCI register read in panic */
1081 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1082 		cnss_pci_select_window(pci_priv, offset);
1083 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1084 			  (offset & WINDOW_RANGE_MASK));
1085 	} else {
1086 		spin_lock_bh(&pci_reg_window_lock);
1087 		cnss_pci_select_window(pci_priv, offset);
1088 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1089 			  (offset & WINDOW_RANGE_MASK));
1090 		spin_unlock_bh(&pci_reg_window_lock);
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
1097 {
1098 	struct device *dev = &pci_priv->pci_dev->dev;
1099 	int ret;
1100 
1101 	ret = cnss_pci_force_wake_request_sync(dev,
1102 					       FORCE_WAKE_DELAY_TIMEOUT_US);
1103 	if (ret) {
1104 		if (ret != -EAGAIN)
1105 			cnss_pr_err("Failed to request force wake\n");
1106 		return ret;
1107 	}
1108 
1109 	/* If device's M1 state-change event races here, it can be ignored,
1110 	 * as the device is expected to immediately move from M2 to M0
1111 	 * without entering low power state.
1112 	 */
1113 	if (cnss_pci_is_device_awake(dev) != true)
1114 		cnss_pr_warn("MHI not in M0, while reg still accessible\n");
1115 
1116 	return 0;
1117 }
1118 
1119 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
1120 {
1121 	struct device *dev = &pci_priv->pci_dev->dev;
1122 	int ret;
1123 
1124 	ret = cnss_pci_force_wake_release(dev);
1125 	if (ret && ret != -EAGAIN)
1126 		cnss_pr_err("Failed to release force wake\n");
1127 
1128 	return ret;
1129 }
1130 
1131 #if IS_ENABLED(CONFIG_INTERCONNECT)
1132 /**
1133  * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
1134  * @plat_priv: Platform private data struct
1135  * @bw: bandwidth
1136  * @save: toggle flag to save bandwidth to current_bw_vote
1137  *
1138  * Setup bandwidth votes for configured interconnect paths
1139  *
1140  * Return: 0 for success
1141  */
1142 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1143 				    u32 bw, bool save)
1144 {
1145 	int ret = 0;
1146 	struct cnss_bus_bw_info *bus_bw_info;
1147 
1148 	if (!plat_priv->icc.path_count)
1149 		return -EOPNOTSUPP;
1150 
1151 	if (bw >= plat_priv->icc.bus_bw_cfg_count) {
1152 		cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
1153 		return -EINVAL;
1154 	}
1155 
1156 	cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
1157 
1158 	list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
1159 		ret = icc_set_bw(bus_bw_info->icc_path,
1160 				 bus_bw_info->cfg_table[bw].avg_bw,
1161 				 bus_bw_info->cfg_table[bw].peak_bw);
1162 		if (ret) {
1163 			cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
1164 				    bw, ret, bus_bw_info->icc_name,
1165 				    bus_bw_info->cfg_table[bw].avg_bw,
1166 				    bus_bw_info->cfg_table[bw].peak_bw);
1167 			break;
1168 		}
1169 	}
1170 	if (ret == 0 && save)
1171 		plat_priv->icc.current_bw_vote = bw;
1172 	return ret;
1173 }
1174 
1175 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1176 {
1177 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1178 
1179 	if (!plat_priv)
1180 		return -ENODEV;
1181 
1182 	if (bandwidth < 0)
1183 		return -EINVAL;
1184 
1185 	return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
1186 }
1187 #else
1188 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1189 				    u32 bw, bool save)
1190 {
1191 	return 0;
1192 }
1193 
1194 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1195 {
1196 	return 0;
1197 }
1198 #endif
1199 EXPORT_SYMBOL(cnss_request_bus_bandwidth);
1200 
1201 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
1202 			    u32 *val, bool raw_access)
1203 {
1204 	int ret = 0;
1205 	bool do_force_wake_put = true;
1206 
1207 	if (raw_access) {
1208 		ret = cnss_pci_reg_read(pci_priv, offset, val);
1209 		goto out;
1210 	}
1211 
1212 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1213 	if (ret)
1214 		goto out;
1215 
1216 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1217 	if (ret < 0)
1218 		goto runtime_pm_put;
1219 
1220 	ret = cnss_pci_force_wake_get(pci_priv);
1221 	if (ret)
1222 		do_force_wake_put = false;
1223 
1224 	ret = cnss_pci_reg_read(pci_priv, offset, val);
1225 	if (ret) {
1226 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
1227 			    offset, ret);
1228 		goto force_wake_put;
1229 	}
1230 
1231 force_wake_put:
1232 	if (do_force_wake_put)
1233 		cnss_pci_force_wake_put(pci_priv);
1234 runtime_pm_put:
1235 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1236 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1237 out:
1238 	return ret;
1239 }
1240 
1241 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1242 			     u32 val, bool raw_access)
1243 {
1244 	int ret = 0;
1245 	bool do_force_wake_put = true;
1246 
1247 	if (raw_access) {
1248 		ret = cnss_pci_reg_write(pci_priv, offset, val);
1249 		goto out;
1250 	}
1251 
1252 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1253 	if (ret)
1254 		goto out;
1255 
1256 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1257 	if (ret < 0)
1258 		goto runtime_pm_put;
1259 
1260 	ret = cnss_pci_force_wake_get(pci_priv);
1261 	if (ret)
1262 		do_force_wake_put = false;
1263 
1264 	ret = cnss_pci_reg_write(pci_priv, offset, val);
1265 	if (ret) {
1266 		cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
1267 			    val, offset, ret);
1268 		goto force_wake_put;
1269 	}
1270 
1271 force_wake_put:
1272 	if (do_force_wake_put)
1273 		cnss_pci_force_wake_put(pci_priv);
1274 runtime_pm_put:
1275 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1276 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1277 out:
1278 	return ret;
1279 }
1280 
1281 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
1282 {
1283 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1284 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1285 	bool link_down_or_recovery;
1286 
1287 	if (!plat_priv)
1288 		return -ENODEV;
1289 
1290 	link_down_or_recovery = pci_priv->pci_link_down_ind ||
1291 		(test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
1292 
1293 	if (save) {
1294 		if (link_down_or_recovery) {
1295 			pci_priv->saved_state = NULL;
1296 		} else {
1297 			pci_save_state(pci_dev);
1298 			pci_priv->saved_state = pci_store_saved_state(pci_dev);
1299 		}
1300 	} else {
1301 		if (link_down_or_recovery) {
1302 			pci_load_saved_state(pci_dev, pci_priv->default_state);
1303 			pci_restore_state(pci_dev);
1304 		} else if (pci_priv->saved_state) {
1305 			pci_load_and_free_saved_state(pci_dev,
1306 						      &pci_priv->saved_state);
1307 			pci_restore_state(pci_dev);
1308 		}
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv)
1315 {
1316 	int ret = 0;
1317 	struct pci_dev *root_port;
1318 	struct device_node *root_of_node;
1319 	struct cnss_plat_data *plat_priv;
1320 
1321 	if (!pci_priv)
1322 		return -EINVAL;
1323 
1324 	if (pci_priv->device_id != KIWI_DEVICE_ID)
1325 		return ret;
1326 
1327 	plat_priv = pci_priv->plat_priv;
1328 	root_port = pcie_find_root_port(pci_priv->pci_dev);
1329 
1330 	if (!root_port) {
1331 		cnss_pr_err("PCIe root port is null\n");
1332 		return -EINVAL;
1333 	}
1334 
1335 	root_of_node = root_port->dev.of_node;
1336 	if (root_of_node && root_of_node->parent) {
1337 		ret = of_property_read_u32(root_of_node->parent,
1338 					   "qcom,target-link-speed",
1339 					   &plat_priv->supported_link_speed);
1340 		if (!ret)
1341 			cnss_pr_dbg("Supported PCIe Link Speed: %d\n",
1342 				    plat_priv->supported_link_speed);
1343 		else
1344 			plat_priv->supported_link_speed = 0;
1345 	}
1346 
1347 	return ret;
1348 }
1349 
1350 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
1351 {
1352 	u16 link_status;
1353 	int ret;
1354 
1355 	ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
1356 					&link_status);
1357 	if (ret)
1358 		return ret;
1359 
1360 	cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
1361 
1362 	pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
1363 	pci_priv->def_link_width =
1364 		(link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
1365 	pci_priv->cur_link_speed = pci_priv->def_link_speed;
1366 
1367 	cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
1368 		    pci_priv->def_link_speed, pci_priv->def_link_width);
1369 
1370 	return 0;
1371 }
1372 
1373 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
1374 {
1375 	u32 reg_offset, val;
1376 	int i;
1377 
1378 	switch (pci_priv->device_id) {
1379 	case QCA6390_DEVICE_ID:
1380 	case QCA6490_DEVICE_ID:
1381 	case KIWI_DEVICE_ID:
1382 	case MANGO_DEVICE_ID:
1383 	case PEACH_DEVICE_ID:
1384 		break;
1385 	default:
1386 		return;
1387 	}
1388 
1389 	if (in_interrupt() || irqs_disabled())
1390 		return;
1391 
1392 	if (cnss_pci_check_link_status(pci_priv))
1393 		return;
1394 
1395 	cnss_pr_dbg("Start to dump SOC Scratch registers\n");
1396 
1397 	for (i = 0; pci_scratch[i].name; i++) {
1398 		reg_offset = pci_scratch[i].offset;
1399 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1400 			return;
1401 		cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
1402 			    pci_scratch[i].name, val);
1403 	}
1404 }
1405 
1406 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
1407 {
1408 	int ret = 0;
1409 
1410 	if (!pci_priv)
1411 		return -ENODEV;
1412 
1413 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1414 		cnss_pr_info("PCI link is already suspended\n");
1415 		goto out;
1416 	}
1417 
1418 	pci_clear_master(pci_priv->pci_dev);
1419 
1420 	ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
1421 	if (ret)
1422 		goto out;
1423 
1424 	pci_disable_device(pci_priv->pci_dev);
1425 
1426 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1427 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
1428 		if (ret)
1429 			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
1430 	}
1431 
1432 	/* Always do PCIe L2 suspend during power off/PCIe link recovery */
1433 	pci_priv->drv_connected_last = 0;
1434 
1435 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
1436 	if (ret)
1437 		goto out;
1438 
1439 	pci_priv->pci_link_state = PCI_LINK_DOWN;
1440 
1441 	return 0;
1442 out:
1443 	return ret;
1444 }
1445 
1446 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
1447 {
1448 	int ret = 0;
1449 
1450 	if (!pci_priv)
1451 		return -ENODEV;
1452 
1453 	if (pci_priv->pci_link_state == PCI_LINK_UP) {
1454 		cnss_pr_info("PCI link is already resumed\n");
1455 		goto out;
1456 	}
1457 
1458 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
1459 	if (ret) {
1460 		ret = -EAGAIN;
1461 		goto out;
1462 	}
1463 
1464 	pci_priv->pci_link_state = PCI_LINK_UP;
1465 
1466 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1467 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
1468 		if (ret) {
1469 			cnss_pr_err("Failed to set D0, err = %d\n", ret);
1470 			goto out;
1471 		}
1472 	}
1473 
1474 	ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
1475 	if (ret)
1476 		goto out;
1477 
1478 	ret = pci_enable_device(pci_priv->pci_dev);
1479 	if (ret) {
1480 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
1481 		goto out;
1482 	}
1483 
1484 	pci_set_master(pci_priv->pci_dev);
1485 
1486 	if (pci_priv->pci_link_down_ind)
1487 		pci_priv->pci_link_down_ind = false;
1488 
1489 	return 0;
1490 out:
1491 	return ret;
1492 }
1493 
1494 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
1495 {
1496 	int ret;
1497 
1498 	switch (pci_priv->device_id) {
1499 	case QCA6390_DEVICE_ID:
1500 	case QCA6490_DEVICE_ID:
1501 	case KIWI_DEVICE_ID:
1502 	case MANGO_DEVICE_ID:
1503 	case PEACH_DEVICE_ID:
1504 		break;
1505 	default:
1506 		return -EOPNOTSUPP;
1507 	}
1508 
1509 	/* Always wait here to avoid missing WAKE assert for RDDM
1510 	 * before link recovery
1511 	 */
1512 	msleep(WAKE_EVENT_TIMEOUT);
1513 
1514 	ret = cnss_suspend_pci_link(pci_priv);
1515 	if (ret)
1516 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
1517 
1518 	ret = cnss_resume_pci_link(pci_priv);
1519 	if (ret) {
1520 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
1521 		del_timer(&pci_priv->dev_rddm_timer);
1522 		return ret;
1523 	}
1524 
1525 	mod_timer(&pci_priv->dev_rddm_timer,
1526 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1527 
1528 	cnss_mhi_debug_reg_dump(pci_priv);
1529 	cnss_pci_soc_scratch_reg_dump(pci_priv);
1530 
1531 	return 0;
1532 }
1533 
1534 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
1535 				       enum cnss_bus_event_type type,
1536 				       void *data)
1537 {
1538 	struct cnss_bus_event bus_event;
1539 
1540 	bus_event.etype = type;
1541 	bus_event.event_data = data;
1542 	cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
1543 }
1544 
1545 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
1546 {
1547 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1548 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1549 	unsigned long flags;
1550 
1551 	if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
1552 		     &plat_priv->ctrl_params.quirks))
1553 		panic("cnss: PCI link is down\n");
1554 
1555 	spin_lock_irqsave(&pci_link_down_lock, flags);
1556 	if (pci_priv->pci_link_down_ind) {
1557 		cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
1558 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
1559 		return;
1560 	}
1561 	pci_priv->pci_link_down_ind = true;
1562 	spin_unlock_irqrestore(&pci_link_down_lock, flags);
1563 
1564 	if (pci_priv->mhi_ctrl) {
1565 		/* Notify MHI about link down*/
1566 		mhi_report_error(pci_priv->mhi_ctrl);
1567 	}
1568 
1569 	if (pci_dev->device == QCA6174_DEVICE_ID)
1570 		disable_irq(pci_dev->irq);
1571 
1572 	/* Notify bus related event. Now for all supported chips.
1573 	 * Here PCIe LINK_DOWN notification taken care.
1574 	 * uevent buffer can be extended later, to cover more bus info.
1575 	 */
1576 	cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
1577 
1578 	cnss_fatal_err("PCI link down, schedule recovery\n");
1579 	cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
1580 }
1581 
1582 int cnss_pci_link_down(struct device *dev)
1583 {
1584 	struct pci_dev *pci_dev = to_pci_dev(dev);
1585 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1586 	struct cnss_plat_data *plat_priv = NULL;
1587 	int ret;
1588 
1589 	if (!pci_priv) {
1590 		cnss_pr_err("pci_priv is NULL\n");
1591 		return -EINVAL;
1592 	}
1593 
1594 	plat_priv = pci_priv->plat_priv;
1595 	if (!plat_priv) {
1596 		cnss_pr_err("plat_priv is NULL\n");
1597 		return -ENODEV;
1598 	}
1599 
1600 	if (pci_priv->pci_link_down_ind) {
1601 		cnss_pr_dbg("PCI link down recovery is already in progress\n");
1602 		return -EBUSY;
1603 	}
1604 
1605 	if (pci_priv->drv_connected_last &&
1606 	    of_property_read_bool(plat_priv->plat_dev->dev.of_node,
1607 				  "cnss-enable-self-recovery"))
1608 		plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
1609 
1610 	cnss_pr_err("PCI link down is detected by drivers\n");
1611 
1612 	ret = cnss_pci_assert_perst(pci_priv);
1613 	if (ret)
1614 		cnss_pci_handle_linkdown(pci_priv);
1615 
1616 	return ret;
1617 }
1618 EXPORT_SYMBOL(cnss_pci_link_down);
1619 
1620 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
1621 {
1622 	struct pci_dev *pci_dev = to_pci_dev(dev);
1623 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1624 
1625 	if (!pci_priv) {
1626 		cnss_pr_err("pci_priv is NULL\n");
1627 		return -ENODEV;
1628 	}
1629 
1630 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1631 		cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
1632 		return -EACCES;
1633 	}
1634 
1635 	cnss_pr_dbg("Start to get PCIe reg dump\n");
1636 
1637 	return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
1638 }
1639 EXPORT_SYMBOL(cnss_pci_get_reg_dump);
1640 
1641 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
1642 {
1643 	struct cnss_plat_data *plat_priv;
1644 
1645 	if (!pci_priv) {
1646 		cnss_pr_err("pci_priv is NULL\n");
1647 		return -ENODEV;
1648 	}
1649 
1650 	plat_priv = pci_priv->plat_priv;
1651 	if (!plat_priv) {
1652 		cnss_pr_err("plat_priv is NULL\n");
1653 		return -ENODEV;
1654 	}
1655 
1656 	return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
1657 		pci_priv->pci_link_down_ind;
1658 }
1659 
1660 int cnss_pci_is_device_down(struct device *dev)
1661 {
1662 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
1663 
1664 	return cnss_pcie_is_device_down(pci_priv);
1665 }
1666 EXPORT_SYMBOL(cnss_pci_is_device_down);
1667 
1668 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
1669 {
1670 	spin_lock_bh(&pci_reg_window_lock);
1671 }
1672 EXPORT_SYMBOL(cnss_pci_lock_reg_window);
1673 
1674 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
1675 {
1676 	spin_unlock_bh(&pci_reg_window_lock);
1677 }
1678 EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
1679 
1680 int cnss_get_pci_slot(struct device *dev)
1681 {
1682 	struct pci_dev *pci_dev = to_pci_dev(dev);
1683 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1684 	struct cnss_plat_data *plat_priv = NULL;
1685 
1686 	if (!pci_priv) {
1687 		cnss_pr_err("pci_priv is NULL\n");
1688 		return -EINVAL;
1689 	}
1690 
1691 	plat_priv = pci_priv->plat_priv;
1692 	if (!plat_priv) {
1693 		cnss_pr_err("plat_priv is NULL\n");
1694 		return -ENODEV;
1695 	}
1696 
1697 	return plat_priv->rc_num;
1698 }
1699 EXPORT_SYMBOL(cnss_get_pci_slot);
1700 
1701 /**
1702  * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
1703  * @pci_priv: driver PCI bus context pointer
1704  *
1705  * Dump primary and secondary bootloader debug log data. For SBL check the
1706  * log struct address and size for validity.
1707  *
1708  * Return: None
1709  */
1710 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
1711 {
1712 	enum mhi_ee_type ee;
1713 	u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
1714 	u32 pbl_log_sram_start;
1715 	u32 pbl_stage, sbl_log_start, sbl_log_size;
1716 	u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
1717 	u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
1718 	u32 sbl_log_def_start = SRAM_START;
1719 	u32 sbl_log_def_end = SRAM_END;
1720 	int i;
1721 
1722 	switch (pci_priv->device_id) {
1723 	case QCA6390_DEVICE_ID:
1724 		pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
1725 		pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1726 		sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1727 		break;
1728 	case QCA6490_DEVICE_ID:
1729 		pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
1730 		pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1731 		sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1732 		break;
1733 	case KIWI_DEVICE_ID:
1734 		pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
1735 		pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
1736 		pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1737 		sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1738 		break;
1739 	case MANGO_DEVICE_ID:
1740 		pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
1741 		pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
1742 		pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1743 		sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1744 		break;
1745 	case PEACH_DEVICE_ID:
1746 		pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS;
1747 		pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START;
1748 		pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1749 		sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1750 		break;
1751 	default:
1752 		return;
1753 	}
1754 
1755 	if (cnss_pci_check_link_status(pci_priv))
1756 		return;
1757 
1758 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1759 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1760 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1761 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1762 	cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
1763 			  &pbl_bootstrap_status);
1764 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
1765 		    pbl_stage, sbl_log_start, sbl_log_size);
1766 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
1767 		    pbl_wlan_boot_cfg, pbl_bootstrap_status);
1768 
1769 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1770 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1771 		cnss_pr_dbg("Avoid Dumping PBL log data in Mission mode\n");
1772 		return;
1773 	}
1774 
1775 	cnss_pr_dbg("Dumping PBL log data\n");
1776 	for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
1777 		mem_addr = pbl_log_sram_start + i;
1778 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1779 			break;
1780 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1781 	}
1782 
1783 	sbl_log_size = (sbl_log_size > sbl_log_max_size ?
1784 			sbl_log_max_size : sbl_log_size);
1785 	if (sbl_log_start < sbl_log_def_start ||
1786 	    sbl_log_start > sbl_log_def_end ||
1787 	    (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
1788 		cnss_pr_err("Invalid SBL log data\n");
1789 		return;
1790 	}
1791 
1792 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1793 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1794 		cnss_pr_dbg("Avoid Dumping SBL log data in Mission mode\n");
1795 		return;
1796 	}
1797 
1798 	cnss_pr_dbg("Dumping SBL log data\n");
1799 	for (i = 0; i < sbl_log_size; i += sizeof(val)) {
1800 		mem_addr = sbl_log_start + i;
1801 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1802 			break;
1803 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1804 	}
1805 }
1806 
1807 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
1808 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1809 {
1810 }
1811 #else
1812 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1813 {
1814 	struct cnss_plat_data *plat_priv;
1815 	u32 i, mem_addr;
1816 	u32 *dump_ptr;
1817 
1818 	plat_priv = pci_priv->plat_priv;
1819 
1820 	if (plat_priv->device_id != QCA6490_DEVICE_ID ||
1821 	    cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
1822 		return;
1823 
1824 	if (!plat_priv->sram_dump) {
1825 		cnss_pr_err("SRAM dump memory is not allocated\n");
1826 		return;
1827 	}
1828 
1829 	if (cnss_pci_check_link_status(pci_priv))
1830 		return;
1831 
1832 	cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
1833 
1834 	for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
1835 		mem_addr = SRAM_START + i;
1836 		dump_ptr = (u32 *)(plat_priv->sram_dump + i);
1837 		if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
1838 			cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
1839 			break;
1840 		}
1841 		/* Relinquish CPU after dumping 256KB chunks*/
1842 		if (!(i % CNSS_256KB_SIZE))
1843 			cond_resched();
1844 	}
1845 }
1846 #endif
1847 
1848 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
1849 {
1850 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1851 
1852 	cnss_fatal_err("MHI power up returns timeout\n");
1853 
1854 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
1855 	    cnss_get_dev_sol_value(plat_priv) > 0) {
1856 		/* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
1857 		 * high. If RDDM times out, PBL/SBL error region may have been
1858 		 * erased so no need to dump them either.
1859 		 */
1860 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
1861 		    !pci_priv->pci_link_down_ind) {
1862 			mod_timer(&pci_priv->dev_rddm_timer,
1863 				  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1864 		}
1865 	} else {
1866 		cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
1867 		cnss_mhi_debug_reg_dump(pci_priv);
1868 		cnss_pci_soc_scratch_reg_dump(pci_priv);
1869 		/* Dump PBL/SBL error log if RDDM cookie is not set */
1870 		cnss_pci_dump_bl_sram_mem(pci_priv);
1871 		cnss_pci_dump_sram(pci_priv);
1872 		return -ETIMEDOUT;
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
1879 {
1880 	switch (mhi_state) {
1881 	case CNSS_MHI_INIT:
1882 		return "INIT";
1883 	case CNSS_MHI_DEINIT:
1884 		return "DEINIT";
1885 	case CNSS_MHI_POWER_ON:
1886 		return "POWER_ON";
1887 	case CNSS_MHI_POWERING_OFF:
1888 		return "POWERING_OFF";
1889 	case CNSS_MHI_POWER_OFF:
1890 		return "POWER_OFF";
1891 	case CNSS_MHI_FORCE_POWER_OFF:
1892 		return "FORCE_POWER_OFF";
1893 	case CNSS_MHI_SUSPEND:
1894 		return "SUSPEND";
1895 	case CNSS_MHI_RESUME:
1896 		return "RESUME";
1897 	case CNSS_MHI_TRIGGER_RDDM:
1898 		return "TRIGGER_RDDM";
1899 	case CNSS_MHI_RDDM_DONE:
1900 		return "RDDM_DONE";
1901 	default:
1902 		return "UNKNOWN";
1903 	}
1904 };
1905 
1906 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
1907 					enum cnss_mhi_state mhi_state)
1908 {
1909 	switch (mhi_state) {
1910 	case CNSS_MHI_INIT:
1911 		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
1912 			return 0;
1913 		break;
1914 	case CNSS_MHI_DEINIT:
1915 	case CNSS_MHI_POWER_ON:
1916 		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
1917 		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1918 			return 0;
1919 		break;
1920 	case CNSS_MHI_FORCE_POWER_OFF:
1921 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1922 			return 0;
1923 		break;
1924 	case CNSS_MHI_POWER_OFF:
1925 	case CNSS_MHI_SUSPEND:
1926 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1927 		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1928 			return 0;
1929 		break;
1930 	case CNSS_MHI_RESUME:
1931 		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1932 			return 0;
1933 		break;
1934 	case CNSS_MHI_TRIGGER_RDDM:
1935 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1936 		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
1937 			return 0;
1938 		break;
1939 	case CNSS_MHI_RDDM_DONE:
1940 		return 0;
1941 	default:
1942 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
1943 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
1944 	}
1945 
1946 	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
1947 		    cnss_mhi_state_to_str(mhi_state), mhi_state,
1948 		    pci_priv->mhi_state);
1949 	if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
1950 		CNSS_ASSERT(0);
1951 
1952 	return -EINVAL;
1953 }
1954 
1955 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
1956 {
1957 	int read_val, ret;
1958 
1959 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1960 		return -EOPNOTSUPP;
1961 
1962 	if (cnss_pci_check_link_status(pci_priv))
1963 		return -EINVAL;
1964 
1965 	cnss_pr_err("Write GCC Spare with ACE55 Pattern");
1966 	cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
1967 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1968 	cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
1969 	ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
1970 				&read_val);
1971 	cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
1972 	return ret;
1973 }
1974 
1975 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
1976 {
1977 	int read_val, ret;
1978 	u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg;
1979 
1980 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1981 		return -EOPNOTSUPP;
1982 
1983 	if (cnss_pci_check_link_status(pci_priv))
1984 		return -EINVAL;
1985 
1986 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1987 	cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
1988 		    read_val, ret);
1989 
1990 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1991 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1992 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1993 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1994 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n",
1995 		    pbl_stage, sbl_log_start, sbl_log_size);
1996 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg);
1997 
1998 	return ret;
1999 }
2000 
2001 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
2002 				       enum cnss_mhi_state mhi_state)
2003 {
2004 	switch (mhi_state) {
2005 	case CNSS_MHI_INIT:
2006 		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2007 		break;
2008 	case CNSS_MHI_DEINIT:
2009 		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2010 		break;
2011 	case CNSS_MHI_POWER_ON:
2012 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2013 		break;
2014 	case CNSS_MHI_POWERING_OFF:
2015 		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2016 		break;
2017 	case CNSS_MHI_POWER_OFF:
2018 	case CNSS_MHI_FORCE_POWER_OFF:
2019 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2020 		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2021 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2022 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2023 		break;
2024 	case CNSS_MHI_SUSPEND:
2025 		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2026 		break;
2027 	case CNSS_MHI_RESUME:
2028 		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2029 		break;
2030 	case CNSS_MHI_TRIGGER_RDDM:
2031 		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2032 		break;
2033 	case CNSS_MHI_RDDM_DONE:
2034 		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2035 		break;
2036 	default:
2037 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2038 	}
2039 }
2040 
2041 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
2042 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2043 {
2044 	return mhi_pm_resume_force(pci_priv->mhi_ctrl);
2045 }
2046 #else
2047 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2048 {
2049 	return mhi_pm_resume(pci_priv->mhi_ctrl);
2050 }
2051 #endif
2052 
2053 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
2054 				  enum cnss_mhi_state mhi_state)
2055 {
2056 	int ret = 0, retry = 0;
2057 
2058 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
2059 		return 0;
2060 
2061 	if (mhi_state < 0) {
2062 		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
2063 		return -EINVAL;
2064 	}
2065 
2066 	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
2067 	if (ret)
2068 		goto out;
2069 
2070 	cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
2071 		     cnss_mhi_state_to_str(mhi_state), mhi_state);
2072 
2073 	switch (mhi_state) {
2074 	case CNSS_MHI_INIT:
2075 		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
2076 		break;
2077 	case CNSS_MHI_DEINIT:
2078 		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
2079 		ret = 0;
2080 		break;
2081 	case CNSS_MHI_POWER_ON:
2082 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
2083 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
2084 		/* Only set img_pre_alloc when power up succeeds */
2085 		if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
2086 			cnss_pr_dbg("Notify MHI to use already allocated images\n");
2087 			pci_priv->mhi_ctrl->img_pre_alloc = true;
2088 		}
2089 #endif
2090 		break;
2091 	case CNSS_MHI_POWER_OFF:
2092 		mhi_power_down(pci_priv->mhi_ctrl, true);
2093 		ret = 0;
2094 		break;
2095 	case CNSS_MHI_FORCE_POWER_OFF:
2096 		mhi_power_down(pci_priv->mhi_ctrl, false);
2097 		ret = 0;
2098 		break;
2099 	case CNSS_MHI_SUSPEND:
2100 retry_mhi_suspend:
2101 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2102 		if (pci_priv->drv_connected_last)
2103 			ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
2104 		else
2105 			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
2106 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2107 		if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
2108 			cnss_pr_dbg("Retry MHI suspend #%d\n", retry);
2109 			usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
2110 				     MHI_SUSPEND_RETRY_DELAY_US + 1000);
2111 			goto retry_mhi_suspend;
2112 		}
2113 		break;
2114 	case CNSS_MHI_RESUME:
2115 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2116 		if (pci_priv->drv_connected_last) {
2117 			ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
2118 			if (ret) {
2119 				mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2120 				break;
2121 			}
2122 			ret = cnss_mhi_pm_fast_resume(pci_priv, true);
2123 			cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
2124 		} else {
2125 			if (pci_priv->device_id == QCA6390_DEVICE_ID)
2126 				ret = cnss_mhi_pm_force_resume(pci_priv);
2127 			else
2128 				ret = mhi_pm_resume(pci_priv->mhi_ctrl);
2129 		}
2130 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2131 		break;
2132 	case CNSS_MHI_TRIGGER_RDDM:
2133 		cnss_rddm_trigger_debug(pci_priv);
2134 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
2135 		if (ret) {
2136 			cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
2137 
2138 			cnss_pr_dbg("Sending host reset req\n");
2139 			ret = cnss_mhi_force_reset(pci_priv);
2140 			cnss_rddm_trigger_check(pci_priv);
2141 		}
2142 		break;
2143 	case CNSS_MHI_RDDM_DONE:
2144 		break;
2145 	default:
2146 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2147 		ret = -EINVAL;
2148 	}
2149 
2150 	if (ret)
2151 		goto out;
2152 
2153 	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
2154 
2155 	return 0;
2156 
2157 out:
2158 	cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
2159 		    cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
2160 	return ret;
2161 }
2162 
2163 static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv)
2164 {
2165 	int ret = 0;
2166 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2167 	struct cnss_plat_data *plat_priv;
2168 
2169 	if (!pci_dev)
2170 		return -ENODEV;
2171 
2172 	if (!pci_dev->msix_enabled)
2173 		return ret;
2174 
2175 	plat_priv = pci_priv->plat_priv;
2176 	if (!plat_priv) {
2177 		cnss_pr_err("plat_priv is NULL\n");
2178 		return -ENODEV;
2179 	}
2180 
2181 	ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
2182 				   "msix-match-addr",
2183 				   &pci_priv->msix_addr);
2184 	cnss_pr_dbg("MSI-X Match address is 0x%X\n",
2185 		    pci_priv->msix_addr);
2186 
2187 	return ret;
2188 }
2189 
2190 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
2191 {
2192 	struct msi_desc *msi_desc;
2193 	struct cnss_msi_config *msi_config;
2194 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2195 
2196 	msi_config = pci_priv->msi_config;
2197 
2198 	if (pci_dev->msix_enabled) {
2199 		pci_priv->msi_ep_base_data = msi_config->users[0].base_vector;
2200 		cnss_pr_dbg("MSI-X base data is %d\n",
2201 			    pci_priv->msi_ep_base_data);
2202 		return 0;
2203 	}
2204 
2205 	msi_desc = irq_get_msi_desc(pci_dev->irq);
2206 	if (!msi_desc) {
2207 		cnss_pr_err("msi_desc is NULL!\n");
2208 		return -EINVAL;
2209 	}
2210 
2211 	pci_priv->msi_ep_base_data = msi_desc->msg.data;
2212 	cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
2213 
2214 	return 0;
2215 }
2216 
2217 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
2218 #define PLC_PCIE_NAME_LEN		14
2219 
2220 static struct cnss_plat_data *
2221 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2222 {
2223 	int plat_env_count = cnss_get_plat_env_count();
2224 	struct cnss_plat_data *plat_env;
2225 	struct cnss_pci_data *pci_priv;
2226 	int i = 0;
2227 
2228 	if (!driver_ops) {
2229 		cnss_pr_err("No cnss driver\n");
2230 		return NULL;
2231 	}
2232 
2233 	for (i = 0; i < plat_env_count; i++) {
2234 		plat_env = cnss_get_plat_env(i);
2235 		if (!plat_env)
2236 			continue;
2237 		if (driver_ops->name && plat_env->pld_bus_ops_name) {
2238 			/* driver_ops->name = PLD_PCIE_OPS_NAME
2239 			 * #ifdef MULTI_IF_NAME
2240 			 * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME
2241 			 * #else
2242 			 * #define PLD_PCIE_OPS_NAME "pld_pcie"
2243 			 * #endif
2244 			 */
2245 			if (memcmp(driver_ops->name,
2246 				   plat_env->pld_bus_ops_name,
2247 				   PLC_PCIE_NAME_LEN) == 0)
2248 				return plat_env;
2249 		}
2250 	}
2251 
2252 	cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name);
2253 	/* in the dual wlan card case, the pld_bus_ops_name from dts
2254 	 * and driver_ops-> name from ko should match, otherwise
2255 	 * wlanhost driver don't know which plat_env it can use;
2256 	 * if doesn't find the match one, then get first available
2257 	 * instance insteadly.
2258 	 */
2259 
2260 	for (i = 0; i < plat_env_count; i++) {
2261 		plat_env = cnss_get_plat_env(i);
2262 
2263 		if (!plat_env)
2264 			continue;
2265 
2266 		pci_priv = plat_env->bus_priv;
2267 		if (!pci_priv) {
2268 			cnss_pr_err("pci_priv is NULL\n");
2269 			continue;
2270 		}
2271 
2272 		if (driver_ops == pci_priv->driver_ops)
2273 			return plat_env;
2274 	}
2275 	/* Doesn't find the existing instance,
2276 	 * so return the fist empty instance
2277 	 */
2278 	for (i = 0; i < plat_env_count; i++) {
2279 		plat_env = cnss_get_plat_env(i);
2280 
2281 		if (!plat_env)
2282 			continue;
2283 		pci_priv = plat_env->bus_priv;
2284 		if (!pci_priv) {
2285 			cnss_pr_err("pci_priv is NULL\n");
2286 			continue;
2287 		}
2288 
2289 		if (!pci_priv->driver_ops)
2290 			return plat_env;
2291 	}
2292 
2293 	return NULL;
2294 }
2295 
2296 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2297 {
2298 	int ret = 0;
2299 	u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG;
2300 	struct cnss_plat_data *plat_priv;
2301 
2302 	if (!pci_priv) {
2303 		cnss_pr_err("pci_priv is NULL\n");
2304 		return -ENODEV;
2305 	}
2306 
2307 	plat_priv = pci_priv->plat_priv;
2308 	/**
2309 	 * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0,
2310 	 * wlan fw will use the hardcode 7 as the qrtr node id.
2311 	 * in the dual Hastings case, we will read qrtr node id
2312 	 * from device tree and pass to get plat_priv->qrtr_node_id,
2313 	 * which always is not zero. And then store this new value
2314 	 * to pcie register, wlan fw will read out this qrtr node id
2315 	 * from this register and overwrite to the hardcode one
2316 	 * while do initialization for ipc router.
2317 	 * without this change, two Hastings will use the same
2318 	 * qrtr node instance id, which will mess up qmi message
2319 	 * exchange. According to qrtr spec, every node should
2320 	 * have unique qrtr node id
2321 	 */
2322 	if (plat_priv->device_id == QCA6390_DEVICE_ID &&
2323 	    plat_priv->qrtr_node_id) {
2324 		u32 val;
2325 
2326 		cnss_pr_dbg("write 0x%x to SCRATCH REG\n",
2327 			    plat_priv->qrtr_node_id);
2328 		ret = cnss_pci_reg_write(pci_priv, scratch,
2329 					 plat_priv->qrtr_node_id);
2330 		if (ret) {
2331 			cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2332 				    scratch, ret);
2333 			goto out;
2334 		}
2335 
2336 		ret = cnss_pci_reg_read(pci_priv, scratch, &val);
2337 		if (ret) {
2338 			cnss_pr_err("Failed to read SCRATCH REG");
2339 			goto out;
2340 		}
2341 
2342 		if (val != plat_priv->qrtr_node_id) {
2343 			cnss_pr_err("qrtr node id write to register doesn't match with readout value");
2344 			return -ERANGE;
2345 		}
2346 	}
2347 out:
2348 	return ret;
2349 }
2350 #else
2351 static struct cnss_plat_data *
2352 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2353 {
2354 	return cnss_bus_dev_to_plat_priv(NULL);
2355 }
2356 
2357 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2358 {
2359 	return 0;
2360 }
2361 #endif
2362 
2363 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
2364 {
2365 	int ret = 0;
2366 	struct cnss_plat_data *plat_priv;
2367 	unsigned int timeout = 0;
2368 	int retry = 0;
2369 
2370 	if (!pci_priv) {
2371 		cnss_pr_err("pci_priv is NULL\n");
2372 		return -ENODEV;
2373 	}
2374 
2375 	plat_priv = pci_priv->plat_priv;
2376 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2377 		return 0;
2378 
2379 	if (MHI_TIMEOUT_OVERWRITE_MS)
2380 		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
2381 	cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
2382 
2383 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
2384 	if (ret)
2385 		return ret;
2386 
2387 	timeout = pci_priv->mhi_ctrl->timeout_ms;
2388 	/* For non-perf builds the timeout is 10 (default) * 6 seconds */
2389 	if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2390 		pci_priv->mhi_ctrl->timeout_ms *= 6;
2391 	else /* For perf builds the timeout is 10 (default) * 3 seconds */
2392 		pci_priv->mhi_ctrl->timeout_ms *= 3;
2393 
2394 retry:
2395 	ret = cnss_pci_store_qrtr_node_id(pci_priv);
2396 	if (ret) {
2397 		if (retry++ < REG_RETRY_MAX_TIMES)
2398 			goto retry;
2399 		else
2400 			return ret;
2401 	}
2402 
2403 	/* Start the timer to dump MHI/PBL/SBL debug data periodically */
2404 	mod_timer(&pci_priv->boot_debug_timer,
2405 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
2406 
2407 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
2408 	del_timer_sync(&pci_priv->boot_debug_timer);
2409 	if (ret == 0)
2410 		cnss_wlan_adsp_pc_enable(pci_priv, false);
2411 
2412 	pci_priv->mhi_ctrl->timeout_ms = timeout;
2413 
2414 	if (ret == -ETIMEDOUT) {
2415 		/* This is a special case needs to be handled that if MHI
2416 		 * power on returns -ETIMEDOUT, controller needs to take care
2417 		 * the cleanup by calling MHI power down. Force to set the bit
2418 		 * for driver internal MHI state to make sure it can be handled
2419 		 * properly later.
2420 		 */
2421 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2422 		ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
2423 	} else if (!ret) {
2424 		/* kernel may allocate a dummy vector before request_irq and
2425 		 * then allocate a real vector when request_irq is called.
2426 		 * So get msi_data here again to avoid spurious interrupt
2427 		 * as msi_data will configured to srngs.
2428 		 */
2429 		if (cnss_pci_is_one_msi(pci_priv))
2430 			ret = cnss_pci_config_msi_data(pci_priv);
2431 	}
2432 
2433 	return ret;
2434 }
2435 
2436 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
2437 {
2438 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2439 
2440 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2441 		return;
2442 
2443 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
2444 		cnss_pr_dbg("MHI is already powered off\n");
2445 		return;
2446 	}
2447 	cnss_wlan_adsp_pc_enable(pci_priv, true);
2448 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
2449 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
2450 
2451 	if (!pci_priv->pci_link_down_ind)
2452 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
2453 	else
2454 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
2455 }
2456 
2457 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
2458 {
2459 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2460 
2461 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2462 		return;
2463 
2464 	if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
2465 		cnss_pr_dbg("MHI is already deinited\n");
2466 		return;
2467 	}
2468 
2469 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
2470 }
2471 
2472 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
2473 					bool set_vddd4blow, bool set_shutdown,
2474 					bool do_force_wake)
2475 {
2476 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2477 	int ret;
2478 	u32 val;
2479 
2480 	if (!plat_priv->set_wlaon_pwr_ctrl)
2481 		return;
2482 
2483 	if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
2484 	    pci_priv->pci_link_down_ind)
2485 		return;
2486 
2487 	if (do_force_wake)
2488 		if (cnss_pci_force_wake_get(pci_priv))
2489 			return;
2490 
2491 	ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
2492 	if (ret) {
2493 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
2494 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2495 		goto force_wake_put;
2496 	}
2497 
2498 	cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
2499 		    WLAON_QFPROM_PWR_CTRL_REG, val);
2500 
2501 	if (set_vddd4blow)
2502 		val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2503 	else
2504 		val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2505 
2506 	if (set_shutdown)
2507 		val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2508 	else
2509 		val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2510 
2511 	ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
2512 	if (ret) {
2513 		cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2514 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2515 		goto force_wake_put;
2516 	}
2517 
2518 	cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
2519 		    WLAON_QFPROM_PWR_CTRL_REG);
2520 
2521 	if (set_shutdown)
2522 		usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
2523 			     WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
2524 
2525 force_wake_put:
2526 	if (do_force_wake)
2527 		cnss_pci_force_wake_put(pci_priv);
2528 }
2529 
2530 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
2531 					 u64 *time_us)
2532 {
2533 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2534 	u32 low, high;
2535 	u64 device_ticks;
2536 
2537 	if (!plat_priv->device_freq_hz) {
2538 		cnss_pr_err("Device time clock frequency is not valid\n");
2539 		return -EINVAL;
2540 	}
2541 
2542 	switch (pci_priv->device_id) {
2543 	case KIWI_DEVICE_ID:
2544 	case MANGO_DEVICE_ID:
2545 	case PEACH_DEVICE_ID:
2546 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
2547 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
2548 		break;
2549 	default:
2550 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
2551 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
2552 		break;
2553 	}
2554 
2555 	device_ticks = (u64)high << 32 | low;
2556 	do_div(device_ticks, plat_priv->device_freq_hz / 100000);
2557 	*time_us = device_ticks * 10;
2558 
2559 	return 0;
2560 }
2561 
2562 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
2563 {
2564 	switch (pci_priv->device_id) {
2565 	case KIWI_DEVICE_ID:
2566 	case MANGO_DEVICE_ID:
2567 	case PEACH_DEVICE_ID:
2568 		return;
2569 	default:
2570 		break;
2571 	}
2572 
2573 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2574 			   TIME_SYNC_ENABLE);
2575 }
2576 
2577 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
2578 {
2579 	switch (pci_priv->device_id) {
2580 	case KIWI_DEVICE_ID:
2581 	case MANGO_DEVICE_ID:
2582 	case PEACH_DEVICE_ID:
2583 		return;
2584 	default:
2585 		break;
2586 	}
2587 
2588 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2589 			   TIME_SYNC_CLEAR);
2590 }
2591 
2592 
2593 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
2594 					  u32 low, u32 high)
2595 {
2596 	u32 time_reg_low;
2597 	u32 time_reg_high;
2598 
2599 	switch (pci_priv->device_id) {
2600 	case KIWI_DEVICE_ID:
2601 	case MANGO_DEVICE_ID:
2602 	case PEACH_DEVICE_ID:
2603 		/* Use the next two shadow registers after host's usage */
2604 		time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
2605 				(pci_priv->plat_priv->num_shadow_regs_v3 *
2606 				 SHADOW_REG_LEN_BYTES);
2607 		time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
2608 		break;
2609 	default:
2610 		time_reg_low = PCIE_SHADOW_REG_VALUE_34;
2611 		time_reg_high = PCIE_SHADOW_REG_VALUE_35;
2612 		break;
2613 	}
2614 
2615 	cnss_pci_reg_write(pci_priv, time_reg_low, low);
2616 	cnss_pci_reg_write(pci_priv, time_reg_high, high);
2617 
2618 	cnss_pci_reg_read(pci_priv, time_reg_low, &low);
2619 	cnss_pci_reg_read(pci_priv, time_reg_high, &high);
2620 
2621 	cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
2622 		    time_reg_low, low, time_reg_high, high);
2623 }
2624 
2625 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
2626 {
2627 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2628 	struct device *dev = &pci_priv->pci_dev->dev;
2629 	unsigned long flags = 0;
2630 	u64 host_time_us, device_time_us, offset;
2631 	u32 low, high;
2632 	int ret;
2633 
2634 	ret = cnss_pci_prevent_l1(dev);
2635 	if (ret)
2636 		goto out;
2637 
2638 	ret = cnss_pci_force_wake_get(pci_priv);
2639 	if (ret)
2640 		goto allow_l1;
2641 
2642 	spin_lock_irqsave(&time_sync_lock, flags);
2643 	cnss_pci_clear_time_sync_counter(pci_priv);
2644 	cnss_pci_enable_time_sync_counter(pci_priv);
2645 	host_time_us = cnss_get_host_timestamp(plat_priv);
2646 	ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
2647 	cnss_pci_clear_time_sync_counter(pci_priv);
2648 	spin_unlock_irqrestore(&time_sync_lock, flags);
2649 	if (ret)
2650 		goto force_wake_put;
2651 
2652 	if (host_time_us < device_time_us) {
2653 		cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
2654 			    host_time_us, device_time_us);
2655 		ret = -EINVAL;
2656 		goto force_wake_put;
2657 	}
2658 
2659 	offset = host_time_us - device_time_us;
2660 	cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
2661 		    host_time_us, device_time_us, offset);
2662 
2663 	low = offset & 0xFFFFFFFF;
2664 	high = offset >> 32;
2665 
2666 	cnss_pci_time_sync_reg_update(pci_priv, low, high);
2667 
2668 force_wake_put:
2669 	cnss_pci_force_wake_put(pci_priv);
2670 allow_l1:
2671 	cnss_pci_allow_l1(dev);
2672 out:
2673 	return ret;
2674 }
2675 
2676 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
2677 {
2678 	struct cnss_pci_data *pci_priv =
2679 		container_of(work, struct cnss_pci_data, time_sync_work.work);
2680 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2681 	unsigned int time_sync_period_ms =
2682 		plat_priv->ctrl_params.time_sync_period;
2683 
2684 	if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
2685 		cnss_pr_dbg("Time sync is disabled\n");
2686 		return;
2687 	}
2688 
2689 	if (!time_sync_period_ms) {
2690 		cnss_pr_dbg("Skip time sync as time period is 0\n");
2691 		return;
2692 	}
2693 
2694 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
2695 		return;
2696 
2697 	if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
2698 		goto runtime_pm_put;
2699 
2700 	mutex_lock(&pci_priv->bus_lock);
2701 	cnss_pci_update_timestamp(pci_priv);
2702 	mutex_unlock(&pci_priv->bus_lock);
2703 	schedule_delayed_work(&pci_priv->time_sync_work,
2704 			      msecs_to_jiffies(time_sync_period_ms));
2705 
2706 runtime_pm_put:
2707 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
2708 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
2709 }
2710 
2711 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
2712 {
2713 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2714 
2715 	switch (pci_priv->device_id) {
2716 	case QCA6390_DEVICE_ID:
2717 	case QCA6490_DEVICE_ID:
2718 	case KIWI_DEVICE_ID:
2719 	case MANGO_DEVICE_ID:
2720 	case PEACH_DEVICE_ID:
2721 		break;
2722 	default:
2723 		return -EOPNOTSUPP;
2724 	}
2725 
2726 	if (!plat_priv->device_freq_hz) {
2727 		cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
2728 		return -EINVAL;
2729 	}
2730 
2731 	cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
2732 
2733 	return 0;
2734 }
2735 
2736 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
2737 {
2738 	switch (pci_priv->device_id) {
2739 	case QCA6390_DEVICE_ID:
2740 	case QCA6490_DEVICE_ID:
2741 	case KIWI_DEVICE_ID:
2742 	case MANGO_DEVICE_ID:
2743 	case PEACH_DEVICE_ID:
2744 		break;
2745 	default:
2746 		return;
2747 	}
2748 
2749 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
2750 }
2751 
2752 int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv,
2753 				  unsigned long thermal_state,
2754 				  int tcdev_id)
2755 {
2756 	if (!pci_priv) {
2757 		cnss_pr_err("pci_priv is NULL!\n");
2758 		return -ENODEV;
2759 	}
2760 
2761 	if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) {
2762 		cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n");
2763 		return -EINVAL;
2764 	}
2765 
2766 	return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev,
2767 							 thermal_state,
2768 							 tcdev_id);
2769 }
2770 
2771 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
2772 				     unsigned int time_sync_period)
2773 {
2774 	struct cnss_plat_data *plat_priv;
2775 
2776 	if (!pci_priv)
2777 		return -ENODEV;
2778 
2779 	plat_priv = pci_priv->plat_priv;
2780 
2781 	cnss_pci_stop_time_sync_update(pci_priv);
2782 	plat_priv->ctrl_params.time_sync_period = time_sync_period;
2783 	cnss_pci_start_time_sync_update(pci_priv);
2784 	cnss_pr_dbg("WLAN time sync period %u ms\n",
2785 		    plat_priv->ctrl_params.time_sync_period);
2786 
2787 	return 0;
2788 }
2789 
2790 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
2791 {
2792 	int ret = 0;
2793 	struct cnss_plat_data *plat_priv;
2794 
2795 	if (!pci_priv)
2796 		return -ENODEV;
2797 
2798 	plat_priv = pci_priv->plat_priv;
2799 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2800 		cnss_pr_err("Reboot is in progress, skip driver probe\n");
2801 		return -EINVAL;
2802 	}
2803 
2804 	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2805 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2806 		cnss_pr_dbg("Skip driver probe\n");
2807 		goto out;
2808 	}
2809 
2810 	if (!pci_priv->driver_ops) {
2811 		cnss_pr_err("driver_ops is NULL\n");
2812 		ret = -EINVAL;
2813 		goto out;
2814 	}
2815 
2816 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2817 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2818 		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
2819 						   pci_priv->pci_device_id);
2820 		if (ret) {
2821 			cnss_pr_err("Failed to reinit host driver, err = %d\n",
2822 				    ret);
2823 			goto out;
2824 		}
2825 		complete(&plat_priv->recovery_complete);
2826 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
2827 		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
2828 						  pci_priv->pci_device_id);
2829 		if (ret) {
2830 			cnss_pr_err("Failed to probe host driver, err = %d\n",
2831 				    ret);
2832 			complete_all(&plat_priv->power_up_complete);
2833 			goto out;
2834 		}
2835 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
2836 		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2837 		cnss_pci_free_blob_mem(pci_priv);
2838 		complete_all(&plat_priv->power_up_complete);
2839 	} else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
2840 			    &plat_priv->driver_state)) {
2841 		ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
2842 			pci_priv->pci_device_id);
2843 		if (ret) {
2844 			cnss_pr_err("Failed to idle restart host driver, err = %d\n",
2845 				    ret);
2846 			plat_priv->power_up_error = ret;
2847 			complete_all(&plat_priv->power_up_complete);
2848 			goto out;
2849 		}
2850 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
2851 		complete_all(&plat_priv->power_up_complete);
2852 	} else {
2853 		complete(&plat_priv->power_up_complete);
2854 	}
2855 
2856 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2857 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2858 		__pm_relax(plat_priv->recovery_ws);
2859 	}
2860 
2861 	cnss_pci_start_time_sync_update(pci_priv);
2862 
2863 	return 0;
2864 
2865 out:
2866 	return ret;
2867 }
2868 
2869 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
2870 {
2871 	struct cnss_plat_data *plat_priv;
2872 	int ret;
2873 
2874 	if (!pci_priv)
2875 		return -ENODEV;
2876 
2877 	plat_priv = pci_priv->plat_priv;
2878 
2879 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
2880 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
2881 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2882 		cnss_pr_dbg("Skip driver remove\n");
2883 		return 0;
2884 	}
2885 
2886 	if (!pci_priv->driver_ops) {
2887 		cnss_pr_err("driver_ops is NULL\n");
2888 		return -EINVAL;
2889 	}
2890 
2891 	cnss_pci_stop_time_sync_update(pci_priv);
2892 
2893 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2894 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2895 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
2896 	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
2897 		pci_priv->driver_ops->remove(pci_priv->pci_dev);
2898 		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2899 	} else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2900 			    &plat_priv->driver_state)) {
2901 		ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
2902 		if (ret == -EAGAIN) {
2903 			clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2904 				  &plat_priv->driver_state);
2905 			return ret;
2906 		}
2907 	}
2908 
2909 	plat_priv->get_info_cb_ctx = NULL;
2910 	plat_priv->get_info_cb = NULL;
2911 
2912 	return 0;
2913 }
2914 
2915 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
2916 				      int modem_current_status)
2917 {
2918 	struct cnss_wlan_driver *driver_ops;
2919 
2920 	if (!pci_priv)
2921 		return -ENODEV;
2922 
2923 	driver_ops = pci_priv->driver_ops;
2924 	if (!driver_ops || !driver_ops->modem_status)
2925 		return -EINVAL;
2926 
2927 	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
2928 
2929 	return 0;
2930 }
2931 
2932 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
2933 			   enum cnss_driver_status status)
2934 {
2935 	struct cnss_wlan_driver *driver_ops;
2936 
2937 	if (!pci_priv)
2938 		return -ENODEV;
2939 
2940 	driver_ops = pci_priv->driver_ops;
2941 	if (!driver_ops || !driver_ops->update_status)
2942 		return -EINVAL;
2943 
2944 	cnss_pr_dbg("Update driver status: %d\n", status);
2945 
2946 	driver_ops->update_status(pci_priv->pci_dev, status);
2947 
2948 	return 0;
2949 }
2950 
2951 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
2952 				   struct cnss_misc_reg *misc_reg,
2953 				   u32 misc_reg_size,
2954 				   char *reg_name)
2955 {
2956 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2957 	bool do_force_wake_put = true;
2958 	int i;
2959 
2960 	if (!misc_reg)
2961 		return;
2962 
2963 	if (in_interrupt() || irqs_disabled())
2964 		return;
2965 
2966 	if (cnss_pci_check_link_status(pci_priv))
2967 		return;
2968 
2969 	if (cnss_pci_force_wake_get(pci_priv)) {
2970 		/* Continue to dump when device has entered RDDM already */
2971 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
2972 			return;
2973 		do_force_wake_put = false;
2974 	}
2975 
2976 	cnss_pr_dbg("Start to dump %s registers\n", reg_name);
2977 
2978 	for (i = 0; i < misc_reg_size; i++) {
2979 		if (!test_bit(pci_priv->misc_reg_dev_mask,
2980 			      &misc_reg[i].dev_mask))
2981 			continue;
2982 
2983 		if (misc_reg[i].wr) {
2984 			if (misc_reg[i].offset ==
2985 			    QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
2986 			    i >= 1)
2987 				misc_reg[i].val =
2988 				QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
2989 				misc_reg[i - 1].val;
2990 			if (cnss_pci_reg_write(pci_priv,
2991 					       misc_reg[i].offset,
2992 					       misc_reg[i].val))
2993 				goto force_wake_put;
2994 			cnss_pr_vdbg("Write 0x%X to 0x%X\n",
2995 				     misc_reg[i].val,
2996 				     misc_reg[i].offset);
2997 
2998 		} else {
2999 			if (cnss_pci_reg_read(pci_priv,
3000 					      misc_reg[i].offset,
3001 					      &misc_reg[i].val))
3002 				goto force_wake_put;
3003 		}
3004 	}
3005 
3006 force_wake_put:
3007 	if (do_force_wake_put)
3008 		cnss_pci_force_wake_put(pci_priv);
3009 }
3010 
3011 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
3012 {
3013 	if (in_interrupt() || irqs_disabled())
3014 		return;
3015 
3016 	if (cnss_pci_check_link_status(pci_priv))
3017 		return;
3018 
3019 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
3020 			       WCSS_REG_SIZE, "wcss");
3021 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
3022 			       PCIE_REG_SIZE, "pcie");
3023 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
3024 			       WLAON_REG_SIZE, "wlaon");
3025 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
3026 			       SYSPM_REG_SIZE, "syspm");
3027 }
3028 
3029 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
3030 {
3031 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
3032 	u32 reg_offset;
3033 	bool do_force_wake_put = true;
3034 
3035 	if (in_interrupt() || irqs_disabled())
3036 		return;
3037 
3038 	if (cnss_pci_check_link_status(pci_priv))
3039 		return;
3040 
3041 	if (!pci_priv->debug_reg) {
3042 		pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
3043 						   sizeof(*pci_priv->debug_reg)
3044 						   * array_size, GFP_KERNEL);
3045 		if (!pci_priv->debug_reg)
3046 			return;
3047 	}
3048 
3049 	if (cnss_pci_force_wake_get(pci_priv))
3050 		do_force_wake_put = false;
3051 
3052 	cnss_pr_dbg("Start to dump shadow registers\n");
3053 
3054 	for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
3055 		reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
3056 		pci_priv->debug_reg[j].offset = reg_offset;
3057 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3058 				      &pci_priv->debug_reg[j].val))
3059 			goto force_wake_put;
3060 	}
3061 
3062 	for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
3063 		reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
3064 		pci_priv->debug_reg[j].offset = reg_offset;
3065 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3066 				      &pci_priv->debug_reg[j].val))
3067 			goto force_wake_put;
3068 	}
3069 
3070 force_wake_put:
3071 	if (do_force_wake_put)
3072 		cnss_pci_force_wake_put(pci_priv);
3073 }
3074 
3075 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
3076 {
3077 	int ret = 0;
3078 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3079 
3080 	ret = cnss_power_on_device(plat_priv, false);
3081 	if (ret) {
3082 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3083 		goto out;
3084 	}
3085 
3086 	ret = cnss_resume_pci_link(pci_priv);
3087 	if (ret) {
3088 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3089 		goto power_off;
3090 	}
3091 
3092 	ret = cnss_pci_call_driver_probe(pci_priv);
3093 	if (ret)
3094 		goto suspend_link;
3095 
3096 	return 0;
3097 suspend_link:
3098 	cnss_suspend_pci_link(pci_priv);
3099 power_off:
3100 	cnss_power_off_device(plat_priv);
3101 out:
3102 	return ret;
3103 }
3104 
3105 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
3106 {
3107 	int ret = 0;
3108 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3109 
3110 	cnss_pci_pm_runtime_resume(pci_priv);
3111 
3112 	ret = cnss_pci_call_driver_remove(pci_priv);
3113 	if (ret == -EAGAIN)
3114 		goto out;
3115 
3116 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3117 				   CNSS_BUS_WIDTH_NONE);
3118 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3119 	cnss_pci_set_auto_suspended(pci_priv, 0);
3120 
3121 	ret = cnss_suspend_pci_link(pci_priv);
3122 	if (ret)
3123 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3124 
3125 	cnss_power_off_device(plat_priv);
3126 
3127 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3128 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3129 
3130 out:
3131 	return ret;
3132 }
3133 
3134 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
3135 {
3136 	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
3137 		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
3138 }
3139 
3140 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
3141 {
3142 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3143 	struct cnss_ramdump_info *ramdump_info;
3144 
3145 	ramdump_info = &plat_priv->ramdump_info;
3146 	if (!ramdump_info->ramdump_size)
3147 		return -EINVAL;
3148 
3149 	return cnss_do_ramdump(plat_priv);
3150 }
3151 
3152 static void cnss_get_driver_mode_update_fw_name(struct cnss_plat_data *plat_priv)
3153 {
3154 	struct cnss_pci_data *pci_priv;
3155 	struct cnss_wlan_driver *driver_ops;
3156 
3157 	pci_priv = plat_priv->bus_priv;
3158 	driver_ops = pci_priv->driver_ops;
3159 
3160 	if (driver_ops && driver_ops->get_driver_mode) {
3161 		plat_priv->driver_mode = driver_ops->get_driver_mode();
3162 		cnss_pci_update_fw_name(pci_priv);
3163 		cnss_pr_dbg("New driver mode is %d", plat_priv->driver_mode);
3164 	}
3165 }
3166 
3167 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
3168 {
3169 	int ret = 0;
3170 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3171 	unsigned int timeout;
3172 	int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
3173 	int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
3174 
3175 	if (plat_priv->ramdump_info_v2.dump_data_valid) {
3176 		cnss_pci_clear_dump_info(pci_priv);
3177 		cnss_pci_power_off_mhi(pci_priv);
3178 		cnss_suspend_pci_link(pci_priv);
3179 		cnss_pci_deinit_mhi(pci_priv);
3180 		cnss_power_off_device(plat_priv);
3181 	}
3182 
3183 	/* Clear QMI send usage count during every power up */
3184 	pci_priv->qmi_send_usage_count = 0;
3185 
3186 	plat_priv->power_up_error = 0;
3187 
3188 	cnss_get_driver_mode_update_fw_name(plat_priv);
3189 retry:
3190 	ret = cnss_power_on_device(plat_priv, false);
3191 	if (ret) {
3192 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3193 		goto out;
3194 	}
3195 
3196 	ret = cnss_resume_pci_link(pci_priv);
3197 	if (ret) {
3198 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3199 		cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3200 			    cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
3201 		if (test_bit(IGNORE_PCI_LINK_FAILURE,
3202 			     &plat_priv->ctrl_params.quirks)) {
3203 			cnss_pr_dbg("Ignore PCI link resume failure\n");
3204 			ret = 0;
3205 			goto out;
3206 		}
3207 		if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
3208 			cnss_power_off_device(plat_priv);
3209 			/* Force toggle BT_EN GPIO low */
3210 			if (retry == POWER_ON_RETRY_MAX_TIMES) {
3211 				cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
3212 					    retry, bt_en_gpio);
3213 				if (bt_en_gpio >= 0)
3214 					gpio_direction_output(bt_en_gpio, 0);
3215 				cnss_pr_dbg("BT_EN GPIO val: %d\n",
3216 					    gpio_get_value(bt_en_gpio));
3217 			}
3218 			cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
3219 			cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3220 				    cnss_get_input_gpio_value(plat_priv,
3221 							      sw_ctrl_gpio));
3222 			msleep(POWER_ON_RETRY_DELAY_MS * retry);
3223 			goto retry;
3224 		}
3225 		/* Assert when it reaches maximum retries */
3226 		CNSS_ASSERT(0);
3227 		goto power_off;
3228 	}
3229 
3230 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
3231 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
3232 
3233 	ret = cnss_pci_start_mhi(pci_priv);
3234 	if (ret) {
3235 		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
3236 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
3237 		    !pci_priv->pci_link_down_ind && timeout) {
3238 			/* Start recovery directly for MHI start failures */
3239 			cnss_schedule_recovery(&pci_priv->pci_dev->dev,
3240 					       CNSS_REASON_DEFAULT);
3241 		}
3242 		return 0;
3243 	}
3244 
3245 	if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
3246 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
3247 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3248 		return 0;
3249 	}
3250 
3251 	cnss_set_pin_connect_status(plat_priv);
3252 
3253 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
3254 		ret = cnss_pci_call_driver_probe(pci_priv);
3255 		if (ret)
3256 			goto stop_mhi;
3257 	} else if (timeout) {
3258 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
3259 			timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
3260 		else
3261 			timeout += WLAN_MISSION_MODE_TIMEOUT;
3262 		mod_timer(&plat_priv->fw_boot_timer,
3263 			  jiffies + msecs_to_jiffies(timeout));
3264 	}
3265 
3266 	return 0;
3267 
3268 stop_mhi:
3269 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
3270 	cnss_pci_power_off_mhi(pci_priv);
3271 	cnss_suspend_pci_link(pci_priv);
3272 	cnss_pci_deinit_mhi(pci_priv);
3273 power_off:
3274 	cnss_power_off_device(plat_priv);
3275 out:
3276 	return ret;
3277 }
3278 
3279 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
3280 {
3281 	int ret = 0;
3282 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3283 	int do_force_wake = true;
3284 
3285 	cnss_pci_pm_runtime_resume(pci_priv);
3286 
3287 	ret = cnss_pci_call_driver_remove(pci_priv);
3288 	if (ret == -EAGAIN)
3289 		goto out;
3290 
3291 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3292 				   CNSS_BUS_WIDTH_NONE);
3293 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3294 	cnss_pci_set_auto_suspended(pci_priv, 0);
3295 
3296 	if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
3297 	     test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3298 	     test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
3299 	     test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
3300 	     test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
3301 	    test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
3302 		del_timer(&pci_priv->dev_rddm_timer);
3303 		cnss_pci_collect_dump_info(pci_priv, false);
3304 
3305 		if (!plat_priv->recovery_enabled)
3306 			CNSS_ASSERT(0);
3307 	}
3308 
3309 	if (!cnss_is_device_powered_on(plat_priv)) {
3310 		cnss_pr_dbg("Device is already powered off, ignore\n");
3311 		goto skip_power_off;
3312 	}
3313 
3314 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3315 		do_force_wake = false;
3316 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
3317 
3318 	/* FBC image will be freed after powering off MHI, so skip
3319 	 * if RAM dump data is still valid.
3320 	 */
3321 	if (plat_priv->ramdump_info_v2.dump_data_valid)
3322 		goto skip_power_off;
3323 
3324 	cnss_pci_power_off_mhi(pci_priv);
3325 	ret = cnss_suspend_pci_link(pci_priv);
3326 	if (ret)
3327 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3328 	cnss_pci_deinit_mhi(pci_priv);
3329 	cnss_power_off_device(plat_priv);
3330 
3331 skip_power_off:
3332 	pci_priv->remap_window = 0;
3333 
3334 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
3335 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
3336 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3337 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
3338 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
3339 		pci_priv->pci_link_down_ind = false;
3340 	}
3341 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3342 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3343 	memset(&print_optimize, 0, sizeof(print_optimize));
3344 
3345 out:
3346 	return ret;
3347 }
3348 
3349 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
3350 {
3351 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3352 
3353 	set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3354 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
3355 		    plat_priv->driver_state);
3356 
3357 	cnss_pci_collect_dump_info(pci_priv, true);
3358 	clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3359 }
3360 
3361 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
3362 {
3363 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3364 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3365 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
3366 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3367 	int ret = 0;
3368 
3369 	if (!info_v2->dump_data_valid || !dump_seg ||
3370 	    dump_data->nentries == 0)
3371 		return 0;
3372 
3373 	ret = cnss_do_elf_ramdump(plat_priv);
3374 
3375 	cnss_pci_clear_dump_info(pci_priv);
3376 	cnss_pci_power_off_mhi(pci_priv);
3377 	cnss_suspend_pci_link(pci_priv);
3378 	cnss_pci_deinit_mhi(pci_priv);
3379 	cnss_power_off_device(plat_priv);
3380 
3381 	return ret;
3382 }
3383 
3384 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
3385 {
3386 	int ret = 0;
3387 
3388 	if (!pci_priv) {
3389 		cnss_pr_err("pci_priv is NULL\n");
3390 		return -ENODEV;
3391 	}
3392 
3393 	switch (pci_priv->device_id) {
3394 	case QCA6174_DEVICE_ID:
3395 		ret = cnss_qca6174_powerup(pci_priv);
3396 		break;
3397 	case QCA6290_DEVICE_ID:
3398 	case QCA6390_DEVICE_ID:
3399 	case QCN7605_DEVICE_ID:
3400 	case QCA6490_DEVICE_ID:
3401 	case KIWI_DEVICE_ID:
3402 	case MANGO_DEVICE_ID:
3403 	case PEACH_DEVICE_ID:
3404 		ret = cnss_qca6290_powerup(pci_priv);
3405 		break;
3406 	default:
3407 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3408 			    pci_priv->device_id);
3409 		ret = -ENODEV;
3410 	}
3411 
3412 	return ret;
3413 }
3414 
3415 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
3416 {
3417 	int ret = 0;
3418 
3419 	if (!pci_priv) {
3420 		cnss_pr_err("pci_priv is NULL\n");
3421 		return -ENODEV;
3422 	}
3423 
3424 	switch (pci_priv->device_id) {
3425 	case QCA6174_DEVICE_ID:
3426 		ret = cnss_qca6174_shutdown(pci_priv);
3427 		break;
3428 	case QCA6290_DEVICE_ID:
3429 	case QCA6390_DEVICE_ID:
3430 	case QCN7605_DEVICE_ID:
3431 	case QCA6490_DEVICE_ID:
3432 	case KIWI_DEVICE_ID:
3433 	case MANGO_DEVICE_ID:
3434 	case PEACH_DEVICE_ID:
3435 		ret = cnss_qca6290_shutdown(pci_priv);
3436 		break;
3437 	default:
3438 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3439 			    pci_priv->device_id);
3440 		ret = -ENODEV;
3441 	}
3442 
3443 	return ret;
3444 }
3445 
3446 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
3447 {
3448 	int ret = 0;
3449 
3450 	if (!pci_priv) {
3451 		cnss_pr_err("pci_priv is NULL\n");
3452 		return -ENODEV;
3453 	}
3454 
3455 	switch (pci_priv->device_id) {
3456 	case QCA6174_DEVICE_ID:
3457 		cnss_qca6174_crash_shutdown(pci_priv);
3458 		break;
3459 	case QCA6290_DEVICE_ID:
3460 	case QCA6390_DEVICE_ID:
3461 	case QCN7605_DEVICE_ID:
3462 	case QCA6490_DEVICE_ID:
3463 	case KIWI_DEVICE_ID:
3464 	case MANGO_DEVICE_ID:
3465 	case PEACH_DEVICE_ID:
3466 		cnss_qca6290_crash_shutdown(pci_priv);
3467 		break;
3468 	default:
3469 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3470 			    pci_priv->device_id);
3471 		ret = -ENODEV;
3472 	}
3473 
3474 	return ret;
3475 }
3476 
3477 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
3478 {
3479 	int ret = 0;
3480 
3481 	if (!pci_priv) {
3482 		cnss_pr_err("pci_priv is NULL\n");
3483 		return -ENODEV;
3484 	}
3485 
3486 	switch (pci_priv->device_id) {
3487 	case QCA6174_DEVICE_ID:
3488 		ret = cnss_qca6174_ramdump(pci_priv);
3489 		break;
3490 	case QCA6290_DEVICE_ID:
3491 	case QCA6390_DEVICE_ID:
3492 	case QCN7605_DEVICE_ID:
3493 	case QCA6490_DEVICE_ID:
3494 	case KIWI_DEVICE_ID:
3495 	case MANGO_DEVICE_ID:
3496 	case PEACH_DEVICE_ID:
3497 		ret = cnss_qca6290_ramdump(pci_priv);
3498 		break;
3499 	default:
3500 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3501 			    pci_priv->device_id);
3502 		ret = -ENODEV;
3503 	}
3504 
3505 	return ret;
3506 }
3507 
3508 int cnss_pci_is_drv_connected(struct device *dev)
3509 {
3510 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
3511 
3512 	if (!pci_priv)
3513 		return -ENODEV;
3514 
3515 	return pci_priv->drv_connected_last;
3516 }
3517 EXPORT_SYMBOL(cnss_pci_is_drv_connected);
3518 
3519 static void cnss_wlan_reg_driver_work(struct work_struct *work)
3520 {
3521 	struct cnss_plat_data *plat_priv =
3522 	container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
3523 	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
3524 	struct cnss_cal_info *cal_info;
3525 	unsigned int timeout;
3526 
3527 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
3528 		return;
3529 
3530 	if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
3531 		goto reg_driver;
3532 	} else {
3533 		if (plat_priv->charger_mode) {
3534 			cnss_pr_err("Ignore calibration timeout in charger mode\n");
3535 			return;
3536 		}
3537 		if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
3538 			      &plat_priv->driver_state)) {
3539 			timeout = cnss_get_timeout(plat_priv,
3540 						   CNSS_TIMEOUT_CALIBRATION);
3541 			cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
3542 				    timeout / 1000);
3543 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3544 					      msecs_to_jiffies(timeout));
3545 			return;
3546 		}
3547 
3548 		del_timer(&plat_priv->fw_boot_timer);
3549 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
3550 		    !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3551 			cnss_pr_err("Timeout waiting for calibration to complete\n");
3552 			CNSS_ASSERT(0);
3553 		}
3554 		cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
3555 		if (!cal_info)
3556 			return;
3557 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
3558 		cnss_driver_event_post(plat_priv,
3559 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
3560 				       0, cal_info);
3561 	}
3562 reg_driver:
3563 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3564 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3565 		return;
3566 	}
3567 	reinit_completion(&plat_priv->power_up_complete);
3568 	cnss_driver_event_post(plat_priv,
3569 			       CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3570 			       CNSS_EVENT_SYNC_UNKILLABLE,
3571 			       pci_priv->driver_ops);
3572 }
3573 
3574 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
3575 {
3576 	int ret = 0;
3577 	struct cnss_plat_data *plat_priv;
3578 	struct cnss_pci_data *pci_priv;
3579 	const struct pci_device_id *id_table = driver_ops->id_table;
3580 	unsigned int timeout;
3581 
3582 	if (!cnss_check_driver_loading_allowed()) {
3583 		cnss_pr_info("No cnss2 dtsi entry present");
3584 		return -ENODEV;
3585 	}
3586 
3587 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3588 
3589 	if (!plat_priv) {
3590 		cnss_pr_buf("plat_priv is not ready for register driver\n");
3591 		return -EAGAIN;
3592 	}
3593 
3594 	pci_priv = plat_priv->bus_priv;
3595 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
3596 		while (id_table && id_table->device) {
3597 			if (plat_priv->device_id == id_table->device) {
3598 				if (plat_priv->device_id == KIWI_DEVICE_ID &&
3599 				    driver_ops->chip_version != 2) {
3600 					cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
3601 					return -ENODEV;
3602 				}
3603 				cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
3604 					     id_table->device);
3605 				plat_priv->driver_ops = driver_ops;
3606 				return 0;
3607 			}
3608 			id_table++;
3609 		}
3610 		return -ENODEV;
3611 	}
3612 
3613 	if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
3614 		cnss_pr_info("pci probe not yet done for register driver\n");
3615 		return -EAGAIN;
3616 	}
3617 
3618 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
3619 		cnss_pr_err("Driver has already registered\n");
3620 		return -EEXIST;
3621 	}
3622 
3623 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3624 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3625 		return -EINVAL;
3626 	}
3627 
3628 	if (!id_table || !pci_dev_present(id_table)) {
3629 		/* id_table pointer will move from pci_dev_present(),
3630 		 * so check again using local pointer.
3631 		 */
3632 		id_table = driver_ops->id_table;
3633 		while (id_table && id_table->vendor) {
3634 			cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
3635 				     id_table->device);
3636 			id_table++;
3637 		}
3638 		cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
3639 			    pci_priv->device_id);
3640 		return -ENODEV;
3641 	}
3642 
3643 	if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
3644 	    driver_ops->chip_version != plat_priv->device_version.major_version) {
3645 		cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
3646 			    driver_ops->chip_version,
3647 			    plat_priv->device_version.major_version);
3648 		return -ENODEV;
3649 	}
3650 
3651 	cnss_get_driver_mode_update_fw_name(plat_priv);
3652 	set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
3653 
3654 	if (!plat_priv->cbc_enabled ||
3655 	    test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
3656 		goto register_driver;
3657 
3658 	pci_priv->driver_ops = driver_ops;
3659 	/* If Cold Boot Calibration is enabled, it is the 1st step in init
3660 	 * sequence.CBC is done on file system_ready trigger. Qcacld will be
3661 	 * loaded from vendor_modprobe.sh at early boot and must be deferred
3662 	 * until CBC is complete
3663 	 */
3664 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
3665 	INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
3666 			  cnss_wlan_reg_driver_work);
3667 	schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3668 			      msecs_to_jiffies(timeout));
3669 	cnss_pr_info("WLAN register driver deferred for Calibration\n");
3670 	return 0;
3671 register_driver:
3672 	reinit_completion(&plat_priv->power_up_complete);
3673 	ret = cnss_driver_event_post(plat_priv,
3674 				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3675 				     CNSS_EVENT_SYNC_UNKILLABLE,
3676 				     driver_ops);
3677 
3678 	return ret;
3679 }
3680 EXPORT_SYMBOL(cnss_wlan_register_driver);
3681 
3682 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
3683 {
3684 	struct cnss_plat_data *plat_priv;
3685 	int ret = 0;
3686 	unsigned int timeout;
3687 
3688 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3689 	if (!plat_priv) {
3690 		cnss_pr_err("plat_priv is NULL\n");
3691 		return;
3692 	}
3693 
3694 	mutex_lock(&plat_priv->driver_ops_lock);
3695 
3696 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
3697 		goto skip_wait_power_up;
3698 
3699 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
3700 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
3701 					  msecs_to_jiffies(timeout));
3702 	if (!ret) {
3703 		cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
3704 			    timeout);
3705 		CNSS_ASSERT(0);
3706 	}
3707 
3708 skip_wait_power_up:
3709 	if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3710 	    !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3711 		goto skip_wait_recovery;
3712 
3713 	reinit_completion(&plat_priv->recovery_complete);
3714 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
3715 	ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
3716 					  msecs_to_jiffies(timeout));
3717 	if (!ret) {
3718 		cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
3719 			    timeout);
3720 		CNSS_ASSERT(0);
3721 	}
3722 
3723 skip_wait_recovery:
3724 	cnss_driver_event_post(plat_priv,
3725 			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3726 			       CNSS_EVENT_SYNC_UNKILLABLE, NULL);
3727 
3728 	mutex_unlock(&plat_priv->driver_ops_lock);
3729 }
3730 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
3731 
3732 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
3733 				  void *data)
3734 {
3735 	int ret = 0;
3736 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3737 
3738 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3739 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
3740 		return -EINVAL;
3741 	}
3742 
3743 	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3744 	pci_priv->driver_ops = data;
3745 
3746 	ret = cnss_pci_dev_powerup(pci_priv);
3747 	if (ret) {
3748 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3749 		pci_priv->driver_ops = NULL;
3750 	} else {
3751 		set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3752 	}
3753 
3754 	return ret;
3755 }
3756 
3757 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
3758 {
3759 	struct cnss_plat_data *plat_priv;
3760 
3761 	if (!pci_priv)
3762 		return -EINVAL;
3763 
3764 	plat_priv = pci_priv->plat_priv;
3765 	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3766 	cnss_pci_dev_shutdown(pci_priv);
3767 	pci_priv->driver_ops = NULL;
3768 	clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3769 
3770 	return 0;
3771 }
3772 
3773 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
3774 {
3775 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3776 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3777 	int ret = 0;
3778 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3779 
3780 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
3781 
3782 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3783 	    driver_ops && driver_ops->suspend) {
3784 		ret = driver_ops->suspend(pci_dev, state);
3785 		if (ret) {
3786 			cnss_pr_err("Failed to suspend host driver, err = %d\n",
3787 				    ret);
3788 			ret = -EAGAIN;
3789 		}
3790 	}
3791 
3792 	return ret;
3793 }
3794 
3795 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
3796 {
3797 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3798 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3799 	int ret = 0;
3800 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3801 
3802 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3803 	    driver_ops && driver_ops->resume) {
3804 		ret = driver_ops->resume(pci_dev);
3805 		if (ret)
3806 			cnss_pr_err("Failed to resume host driver, err = %d\n",
3807 				    ret);
3808 	}
3809 
3810 	return ret;
3811 }
3812 
3813 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
3814 {
3815 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3816 	int ret = 0;
3817 
3818 	if (pci_priv->pci_link_state == PCI_LINK_DOWN)
3819 		goto out;
3820 
3821 	if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
3822 		ret = -EAGAIN;
3823 		goto out;
3824 	}
3825 
3826 	if (pci_priv->drv_connected_last)
3827 		goto skip_disable_pci;
3828 
3829 	pci_clear_master(pci_dev);
3830 	cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
3831 	pci_disable_device(pci_dev);
3832 
3833 	ret = pci_set_power_state(pci_dev, PCI_D3hot);
3834 	if (ret)
3835 		cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
3836 
3837 skip_disable_pci:
3838 	if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
3839 		ret = -EAGAIN;
3840 		goto resume_mhi;
3841 	}
3842 	pci_priv->pci_link_state = PCI_LINK_DOWN;
3843 
3844 	return 0;
3845 
3846 resume_mhi:
3847 	if (!pci_is_enabled(pci_dev))
3848 		if (pci_enable_device(pci_dev))
3849 			cnss_pr_err("Failed to enable PCI device\n");
3850 	if (pci_priv->saved_state)
3851 		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
3852 	pci_set_master(pci_dev);
3853 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3854 out:
3855 	return ret;
3856 }
3857 
3858 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
3859 {
3860 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3861 	int ret = 0;
3862 
3863 	if (pci_priv->pci_link_state == PCI_LINK_UP)
3864 		goto out;
3865 
3866 	if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
3867 		cnss_fatal_err("Failed to resume PCI link from suspend\n");
3868 		cnss_pci_link_down(&pci_dev->dev);
3869 		ret = -EAGAIN;
3870 		goto out;
3871 	}
3872 
3873 	pci_priv->pci_link_state = PCI_LINK_UP;
3874 
3875 	if (pci_priv->drv_connected_last)
3876 		goto skip_enable_pci;
3877 
3878 	ret = pci_enable_device(pci_dev);
3879 	if (ret) {
3880 		cnss_pr_err("Failed to enable PCI device, err = %d\n",
3881 			    ret);
3882 		goto out;
3883 	}
3884 
3885 	if (pci_priv->saved_state)
3886 		cnss_set_pci_config_space(pci_priv,
3887 					  RESTORE_PCI_CONFIG_SPACE);
3888 	pci_set_master(pci_dev);
3889 
3890 skip_enable_pci:
3891 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3892 out:
3893 	return ret;
3894 }
3895 
3896 static int cnss_pci_suspend(struct device *dev)
3897 {
3898 	int ret = 0;
3899 	struct pci_dev *pci_dev = to_pci_dev(dev);
3900 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3901 	struct cnss_plat_data *plat_priv;
3902 
3903 	if (!pci_priv)
3904 		goto out;
3905 
3906 	plat_priv = pci_priv->plat_priv;
3907 	if (!plat_priv)
3908 		goto out;
3909 
3910 	if (!cnss_is_device_powered_on(plat_priv))
3911 		goto out;
3912 
3913 	/* No mhi state bit set if only finish pcie enumeration,
3914 	 * so test_bit is not applicable to check if it is INIT state.
3915 	 */
3916 	if (pci_priv->mhi_state == CNSS_MHI_INIT) {
3917 		bool suspend = cnss_should_suspend_pwroff(pci_dev);
3918 
3919 		/* Do PCI link suspend and power off in the LPM case
3920 		 * if chipset didn't do that after pcie enumeration.
3921 		 */
3922 		if (!suspend) {
3923 			ret = cnss_suspend_pci_link(pci_priv);
3924 			if (ret)
3925 				cnss_pr_err("Failed to suspend PCI link, err = %d\n",
3926 					    ret);
3927 			cnss_power_off_device(plat_priv);
3928 			goto out;
3929 		}
3930 	}
3931 
3932 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
3933 	    pci_priv->drv_supported) {
3934 		pci_priv->drv_connected_last =
3935 			cnss_pci_get_drv_connected(pci_priv);
3936 		if (!pci_priv->drv_connected_last) {
3937 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
3938 			ret = -EAGAIN;
3939 			goto out;
3940 		}
3941 	}
3942 
3943 	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3944 
3945 	ret = cnss_pci_suspend_driver(pci_priv);
3946 	if (ret)
3947 		goto clear_flag;
3948 
3949 	if (!pci_priv->disable_pc) {
3950 		mutex_lock(&pci_priv->bus_lock);
3951 		ret = cnss_pci_suspend_bus(pci_priv);
3952 		mutex_unlock(&pci_priv->bus_lock);
3953 		if (ret)
3954 			goto resume_driver;
3955 	}
3956 
3957 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3958 
3959 	return 0;
3960 
3961 resume_driver:
3962 	cnss_pci_resume_driver(pci_priv);
3963 clear_flag:
3964 	pci_priv->drv_connected_last = 0;
3965 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3966 out:
3967 	return ret;
3968 }
3969 
3970 static int cnss_pci_resume(struct device *dev)
3971 {
3972 	int ret = 0;
3973 	struct pci_dev *pci_dev = to_pci_dev(dev);
3974 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3975 	struct cnss_plat_data *plat_priv;
3976 
3977 	if (!pci_priv)
3978 		goto out;
3979 
3980 	plat_priv = pci_priv->plat_priv;
3981 	if (!plat_priv)
3982 		goto out;
3983 
3984 	if (pci_priv->pci_link_down_ind)
3985 		goto out;
3986 
3987 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3988 		goto out;
3989 
3990 	if (!pci_priv->disable_pc) {
3991 		ret = cnss_pci_resume_bus(pci_priv);
3992 		if (ret)
3993 			goto out;
3994 	}
3995 
3996 	ret = cnss_pci_resume_driver(pci_priv);
3997 
3998 	pci_priv->drv_connected_last = 0;
3999 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4000 
4001 out:
4002 	return ret;
4003 }
4004 
4005 static int cnss_pci_suspend_noirq(struct device *dev)
4006 {
4007 	int ret = 0;
4008 	struct pci_dev *pci_dev = to_pci_dev(dev);
4009 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4010 	struct cnss_wlan_driver *driver_ops;
4011 	struct cnss_plat_data *plat_priv;
4012 
4013 	if (!pci_priv)
4014 		goto out;
4015 
4016 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4017 		goto out;
4018 
4019 	driver_ops = pci_priv->driver_ops;
4020 	plat_priv = pci_priv->plat_priv;
4021 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4022 	    driver_ops && driver_ops->suspend_noirq)
4023 		ret = driver_ops->suspend_noirq(pci_dev);
4024 
4025 	if (pci_priv->disable_pc && !pci_dev->state_saved &&
4026 	    !pci_priv->plat_priv->use_pm_domain)
4027 		pci_save_state(pci_dev);
4028 
4029 out:
4030 	return ret;
4031 }
4032 
4033 static int cnss_pci_resume_noirq(struct device *dev)
4034 {
4035 	int ret = 0;
4036 	struct pci_dev *pci_dev = to_pci_dev(dev);
4037 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4038 	struct cnss_wlan_driver *driver_ops;
4039 	struct cnss_plat_data *plat_priv;
4040 
4041 	if (!pci_priv)
4042 		goto out;
4043 
4044 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4045 		goto out;
4046 
4047 	plat_priv = pci_priv->plat_priv;
4048 	driver_ops = pci_priv->driver_ops;
4049 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4050 	    driver_ops && driver_ops->resume_noirq &&
4051 	    !pci_priv->pci_link_down_ind)
4052 		ret = driver_ops->resume_noirq(pci_dev);
4053 
4054 out:
4055 	return ret;
4056 }
4057 
4058 static int cnss_pci_runtime_suspend(struct device *dev)
4059 {
4060 	int ret = 0;
4061 	struct pci_dev *pci_dev = to_pci_dev(dev);
4062 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4063 	struct cnss_plat_data *plat_priv;
4064 	struct cnss_wlan_driver *driver_ops;
4065 
4066 	if (!pci_priv)
4067 		return -EAGAIN;
4068 
4069 	plat_priv = pci_priv->plat_priv;
4070 	if (!plat_priv)
4071 		return -EAGAIN;
4072 
4073 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4074 		return -EAGAIN;
4075 
4076 	if (pci_priv->pci_link_down_ind) {
4077 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4078 		return -EAGAIN;
4079 	}
4080 
4081 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4082 	    pci_priv->drv_supported) {
4083 		pci_priv->drv_connected_last =
4084 			cnss_pci_get_drv_connected(pci_priv);
4085 		if (!pci_priv->drv_connected_last) {
4086 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4087 			return -EAGAIN;
4088 		}
4089 	}
4090 
4091 	cnss_pr_vdbg("Runtime suspend start\n");
4092 
4093 	driver_ops = pci_priv->driver_ops;
4094 	if (driver_ops && driver_ops->runtime_ops &&
4095 	    driver_ops->runtime_ops->runtime_suspend)
4096 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
4097 	else
4098 		ret = cnss_auto_suspend(dev);
4099 
4100 	if (ret)
4101 		pci_priv->drv_connected_last = 0;
4102 
4103 	cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
4104 
4105 	return ret;
4106 }
4107 
4108 static int cnss_pci_runtime_resume(struct device *dev)
4109 {
4110 	int ret = 0;
4111 	struct pci_dev *pci_dev = to_pci_dev(dev);
4112 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4113 	struct cnss_wlan_driver *driver_ops;
4114 
4115 	if (!pci_priv)
4116 		return -EAGAIN;
4117 
4118 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4119 		return -EAGAIN;
4120 
4121 	if (pci_priv->pci_link_down_ind) {
4122 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4123 		return -EAGAIN;
4124 	}
4125 
4126 	cnss_pr_vdbg("Runtime resume start\n");
4127 
4128 	driver_ops = pci_priv->driver_ops;
4129 	if (driver_ops && driver_ops->runtime_ops &&
4130 	    driver_ops->runtime_ops->runtime_resume)
4131 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
4132 	else
4133 		ret = cnss_auto_resume(dev);
4134 
4135 	if (!ret)
4136 		pci_priv->drv_connected_last = 0;
4137 
4138 	cnss_pr_vdbg("Runtime resume status: %d\n", ret);
4139 
4140 	return ret;
4141 }
4142 
4143 static int cnss_pci_runtime_idle(struct device *dev)
4144 {
4145 	cnss_pr_vdbg("Runtime idle\n");
4146 
4147 	pm_request_autosuspend(dev);
4148 
4149 	return -EBUSY;
4150 }
4151 
4152 int cnss_wlan_pm_control(struct device *dev, bool vote)
4153 {
4154 	struct pci_dev *pci_dev = to_pci_dev(dev);
4155 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4156 	int ret = 0;
4157 
4158 	if (!pci_priv)
4159 		return -ENODEV;
4160 
4161 	ret = cnss_pci_disable_pc(pci_priv, vote);
4162 	if (ret)
4163 		return ret;
4164 
4165 	pci_priv->disable_pc = vote;
4166 	cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
4167 
4168 	return 0;
4169 }
4170 EXPORT_SYMBOL(cnss_wlan_pm_control);
4171 
4172 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
4173 					   enum cnss_rtpm_id id)
4174 {
4175 	if (id >= RTPM_ID_MAX)
4176 		return;
4177 
4178 	atomic_inc(&pci_priv->pm_stats.runtime_get);
4179 	atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
4180 	pci_priv->pm_stats.runtime_get_timestamp_id[id] =
4181 		cnss_get_host_timestamp(pci_priv->plat_priv);
4182 }
4183 
4184 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
4185 					   enum cnss_rtpm_id id)
4186 {
4187 	if (id >= RTPM_ID_MAX)
4188 		return;
4189 
4190 	atomic_inc(&pci_priv->pm_stats.runtime_put);
4191 	atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
4192 	pci_priv->pm_stats.runtime_put_timestamp_id[id] =
4193 		cnss_get_host_timestamp(pci_priv->plat_priv);
4194 }
4195 
4196 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
4197 {
4198 	struct device *dev;
4199 
4200 	if (!pci_priv)
4201 		return;
4202 
4203 	dev = &pci_priv->pci_dev->dev;
4204 
4205 	cnss_pr_dbg("Runtime PM usage count: %d\n",
4206 		    atomic_read(&dev->power.usage_count));
4207 }
4208 
4209 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
4210 {
4211 	struct device *dev;
4212 	enum rpm_status status;
4213 
4214 	if (!pci_priv)
4215 		return -ENODEV;
4216 
4217 	dev = &pci_priv->pci_dev->dev;
4218 
4219 	status = dev->power.runtime_status;
4220 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4221 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4222 			     (void *)_RET_IP_);
4223 
4224 	return pm_request_resume(dev);
4225 }
4226 
4227 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
4228 {
4229 	struct device *dev;
4230 	enum rpm_status status;
4231 
4232 	if (!pci_priv)
4233 		return -ENODEV;
4234 
4235 	dev = &pci_priv->pci_dev->dev;
4236 
4237 	status = dev->power.runtime_status;
4238 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4239 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4240 			     (void *)_RET_IP_);
4241 
4242 	return pm_runtime_resume(dev);
4243 }
4244 
4245 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
4246 			    enum cnss_rtpm_id id)
4247 {
4248 	struct device *dev;
4249 	enum rpm_status status;
4250 
4251 	if (!pci_priv)
4252 		return -ENODEV;
4253 
4254 	dev = &pci_priv->pci_dev->dev;
4255 
4256 	status = dev->power.runtime_status;
4257 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4258 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4259 			     (void *)_RET_IP_);
4260 
4261 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4262 
4263 	return pm_runtime_get(dev);
4264 }
4265 
4266 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
4267 				 enum cnss_rtpm_id id)
4268 {
4269 	struct device *dev;
4270 	enum rpm_status status;
4271 
4272 	if (!pci_priv)
4273 		return -ENODEV;
4274 
4275 	dev = &pci_priv->pci_dev->dev;
4276 
4277 	status = dev->power.runtime_status;
4278 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4279 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4280 			     (void *)_RET_IP_);
4281 
4282 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4283 
4284 	return pm_runtime_get_sync(dev);
4285 }
4286 
4287 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
4288 				      enum cnss_rtpm_id id)
4289 {
4290 	if (!pci_priv)
4291 		return;
4292 
4293 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4294 	pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
4295 }
4296 
4297 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
4298 					enum cnss_rtpm_id id)
4299 {
4300 	struct device *dev;
4301 
4302 	if (!pci_priv)
4303 		return -ENODEV;
4304 
4305 	dev = &pci_priv->pci_dev->dev;
4306 
4307 	if (atomic_read(&dev->power.usage_count) == 0) {
4308 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4309 		return -EINVAL;
4310 	}
4311 
4312 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4313 
4314 	return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
4315 }
4316 
4317 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
4318 				    enum cnss_rtpm_id id)
4319 {
4320 	struct device *dev;
4321 
4322 	if (!pci_priv)
4323 		return;
4324 
4325 	dev = &pci_priv->pci_dev->dev;
4326 
4327 	if (atomic_read(&dev->power.usage_count) == 0) {
4328 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4329 		return;
4330 	}
4331 
4332 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4333 	pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
4334 }
4335 
4336 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
4337 {
4338 	if (!pci_priv)
4339 		return;
4340 
4341 	pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
4342 }
4343 
4344 int cnss_auto_suspend(struct device *dev)
4345 {
4346 	int ret = 0;
4347 	struct pci_dev *pci_dev = to_pci_dev(dev);
4348 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4349 	struct cnss_plat_data *plat_priv;
4350 
4351 	if (!pci_priv)
4352 		return -ENODEV;
4353 
4354 	plat_priv = pci_priv->plat_priv;
4355 	if (!plat_priv)
4356 		return -ENODEV;
4357 
4358 	mutex_lock(&pci_priv->bus_lock);
4359 	if (!pci_priv->qmi_send_usage_count) {
4360 		ret = cnss_pci_suspend_bus(pci_priv);
4361 		if (ret) {
4362 			mutex_unlock(&pci_priv->bus_lock);
4363 			return ret;
4364 		}
4365 	}
4366 
4367 	cnss_pci_set_auto_suspended(pci_priv, 1);
4368 	mutex_unlock(&pci_priv->bus_lock);
4369 
4370 	cnss_pci_set_monitor_wake_intr(pci_priv, true);
4371 
4372 	/* For suspend temporarily set bandwidth vote to NONE and dont save in
4373 	 * current_bw_vote as in resume path we should vote for last used
4374 	 * bandwidth vote. Also ignore error if bw voting is not setup.
4375 	 */
4376 	cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
4377 	return 0;
4378 }
4379 EXPORT_SYMBOL(cnss_auto_suspend);
4380 
4381 int cnss_auto_resume(struct device *dev)
4382 {
4383 	int ret = 0;
4384 	struct pci_dev *pci_dev = to_pci_dev(dev);
4385 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4386 	struct cnss_plat_data *plat_priv;
4387 
4388 	if (!pci_priv)
4389 		return -ENODEV;
4390 
4391 	plat_priv = pci_priv->plat_priv;
4392 	if (!plat_priv)
4393 		return -ENODEV;
4394 
4395 	mutex_lock(&pci_priv->bus_lock);
4396 	ret = cnss_pci_resume_bus(pci_priv);
4397 	if (ret) {
4398 		mutex_unlock(&pci_priv->bus_lock);
4399 		return ret;
4400 	}
4401 
4402 	cnss_pci_set_auto_suspended(pci_priv, 0);
4403 	mutex_unlock(&pci_priv->bus_lock);
4404 
4405 	cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
4406 
4407 	return 0;
4408 }
4409 EXPORT_SYMBOL(cnss_auto_resume);
4410 
4411 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
4412 {
4413 	struct pci_dev *pci_dev = to_pci_dev(dev);
4414 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4415 	struct cnss_plat_data *plat_priv;
4416 	struct mhi_controller *mhi_ctrl;
4417 
4418 	if (!pci_priv)
4419 		return -ENODEV;
4420 
4421 	switch (pci_priv->device_id) {
4422 	case QCA6390_DEVICE_ID:
4423 	case QCA6490_DEVICE_ID:
4424 	case KIWI_DEVICE_ID:
4425 	case MANGO_DEVICE_ID:
4426 	case PEACH_DEVICE_ID:
4427 		break;
4428 	default:
4429 		return 0;
4430 	}
4431 
4432 	mhi_ctrl = pci_priv->mhi_ctrl;
4433 	if (!mhi_ctrl)
4434 		return -EINVAL;
4435 
4436 	plat_priv = pci_priv->plat_priv;
4437 	if (!plat_priv)
4438 		return -ENODEV;
4439 
4440 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4441 		return -EAGAIN;
4442 
4443 	if (timeout_us) {
4444 		/* Busy wait for timeout_us */
4445 		return cnss_mhi_device_get_sync_atomic(pci_priv,
4446 						       timeout_us, false);
4447 	} else {
4448 		/* Sleep wait for mhi_ctrl->timeout_ms */
4449 		return mhi_device_get_sync(mhi_ctrl->mhi_dev);
4450 	}
4451 }
4452 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
4453 
4454 int cnss_pci_force_wake_request(struct device *dev)
4455 {
4456 	struct pci_dev *pci_dev = to_pci_dev(dev);
4457 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4458 	struct cnss_plat_data *plat_priv;
4459 	struct mhi_controller *mhi_ctrl;
4460 
4461 	if (!pci_priv)
4462 		return -ENODEV;
4463 
4464 	switch (pci_priv->device_id) {
4465 	case QCA6390_DEVICE_ID:
4466 	case QCA6490_DEVICE_ID:
4467 	case KIWI_DEVICE_ID:
4468 	case MANGO_DEVICE_ID:
4469 	case PEACH_DEVICE_ID:
4470 		break;
4471 	default:
4472 		return 0;
4473 	}
4474 
4475 	mhi_ctrl = pci_priv->mhi_ctrl;
4476 	if (!mhi_ctrl)
4477 		return -EINVAL;
4478 
4479 	plat_priv = pci_priv->plat_priv;
4480 	if (!plat_priv)
4481 		return -ENODEV;
4482 
4483 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4484 		return -EAGAIN;
4485 
4486 	mhi_device_get(mhi_ctrl->mhi_dev);
4487 
4488 	return 0;
4489 }
4490 EXPORT_SYMBOL(cnss_pci_force_wake_request);
4491 
4492 int cnss_pci_is_device_awake(struct device *dev)
4493 {
4494 	struct pci_dev *pci_dev = to_pci_dev(dev);
4495 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4496 	struct mhi_controller *mhi_ctrl;
4497 
4498 	if (!pci_priv)
4499 		return -ENODEV;
4500 
4501 	switch (pci_priv->device_id) {
4502 	case QCA6390_DEVICE_ID:
4503 	case QCA6490_DEVICE_ID:
4504 	case KIWI_DEVICE_ID:
4505 	case MANGO_DEVICE_ID:
4506 	case PEACH_DEVICE_ID:
4507 		break;
4508 	default:
4509 		return 0;
4510 	}
4511 
4512 	mhi_ctrl = pci_priv->mhi_ctrl;
4513 	if (!mhi_ctrl)
4514 		return -EINVAL;
4515 
4516 	return (mhi_ctrl->dev_state == MHI_STATE_M0);
4517 }
4518 EXPORT_SYMBOL(cnss_pci_is_device_awake);
4519 
4520 int cnss_pci_force_wake_release(struct device *dev)
4521 {
4522 	struct pci_dev *pci_dev = to_pci_dev(dev);
4523 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4524 	struct cnss_plat_data *plat_priv;
4525 	struct mhi_controller *mhi_ctrl;
4526 
4527 	if (!pci_priv)
4528 		return -ENODEV;
4529 
4530 	switch (pci_priv->device_id) {
4531 	case QCA6390_DEVICE_ID:
4532 	case QCA6490_DEVICE_ID:
4533 	case KIWI_DEVICE_ID:
4534 	case MANGO_DEVICE_ID:
4535 	case PEACH_DEVICE_ID:
4536 		break;
4537 	default:
4538 		return 0;
4539 	}
4540 
4541 	mhi_ctrl = pci_priv->mhi_ctrl;
4542 	if (!mhi_ctrl)
4543 		return -EINVAL;
4544 
4545 	plat_priv = pci_priv->plat_priv;
4546 	if (!plat_priv)
4547 		return -ENODEV;
4548 
4549 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4550 		return -EAGAIN;
4551 
4552 	mhi_device_put(mhi_ctrl->mhi_dev);
4553 
4554 	return 0;
4555 }
4556 EXPORT_SYMBOL(cnss_pci_force_wake_release);
4557 
4558 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
4559 {
4560 	int ret = 0;
4561 
4562 	if (!pci_priv)
4563 		return -ENODEV;
4564 
4565 	mutex_lock(&pci_priv->bus_lock);
4566 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4567 	    !pci_priv->qmi_send_usage_count)
4568 		ret = cnss_pci_resume_bus(pci_priv);
4569 	pci_priv->qmi_send_usage_count++;
4570 	cnss_pr_buf("Increased QMI send usage count to %d\n",
4571 		    pci_priv->qmi_send_usage_count);
4572 	mutex_unlock(&pci_priv->bus_lock);
4573 
4574 	return ret;
4575 }
4576 
4577 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
4578 {
4579 	int ret = 0;
4580 
4581 	if (!pci_priv)
4582 		return -ENODEV;
4583 
4584 	mutex_lock(&pci_priv->bus_lock);
4585 	if (pci_priv->qmi_send_usage_count)
4586 		pci_priv->qmi_send_usage_count--;
4587 	cnss_pr_buf("Decreased QMI send usage count to %d\n",
4588 		    pci_priv->qmi_send_usage_count);
4589 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4590 	    !pci_priv->qmi_send_usage_count &&
4591 	    !cnss_pcie_is_device_down(pci_priv))
4592 		ret = cnss_pci_suspend_bus(pci_priv);
4593 	mutex_unlock(&pci_priv->bus_lock);
4594 
4595 	return ret;
4596 }
4597 
4598 int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb,
4599 			       uint32_t len, uint8_t slotid)
4600 {
4601 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4602 	struct cnss_fw_mem *fw_mem;
4603 	void *mem = NULL;
4604 	int i, ret;
4605 	u32 *status;
4606 
4607 	if (!plat_priv)
4608 		return -EINVAL;
4609 
4610 	fw_mem = plat_priv->fw_mem;
4611 	if (slotid >= AFC_MAX_SLOT) {
4612 		cnss_pr_err("Invalid slot id %d\n", slotid);
4613 		ret = -EINVAL;
4614 		goto err;
4615 	}
4616 	if (len > AFC_SLOT_SIZE) {
4617 		cnss_pr_err("len %d greater than slot size", len);
4618 		ret = -EINVAL;
4619 		goto err;
4620 	}
4621 
4622 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4623 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4624 			mem = fw_mem[i].va;
4625 			status = mem + (slotid * AFC_SLOT_SIZE);
4626 			break;
4627 		}
4628 	}
4629 
4630 	if (!mem) {
4631 		cnss_pr_err("AFC mem is not available\n");
4632 		ret = -ENOMEM;
4633 		goto err;
4634 	}
4635 
4636 	memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
4637 	if (len < AFC_SLOT_SIZE)
4638 		memset(mem + (slotid * AFC_SLOT_SIZE) + len,
4639 		       0, AFC_SLOT_SIZE - len);
4640 	status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
4641 
4642 	return 0;
4643 err:
4644 	return ret;
4645 }
4646 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
4647 
4648 int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
4649 {
4650 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4651 	struct cnss_fw_mem *fw_mem;
4652 	void *mem = NULL;
4653 	int i, ret;
4654 
4655 	if (!plat_priv)
4656 		return -EINVAL;
4657 
4658 	fw_mem = plat_priv->fw_mem;
4659 	if (slotid >= AFC_MAX_SLOT) {
4660 		cnss_pr_err("Invalid slot id %d\n", slotid);
4661 		ret = -EINVAL;
4662 		goto err;
4663 	}
4664 
4665 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4666 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4667 			mem = fw_mem[i].va;
4668 			break;
4669 		}
4670 	}
4671 
4672 	if (!mem) {
4673 		cnss_pr_err("AFC mem is not available\n");
4674 		ret = -ENOMEM;
4675 		goto err;
4676 	}
4677 
4678 	memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
4679 	return 0;
4680 
4681 err:
4682 	return ret;
4683 }
4684 EXPORT_SYMBOL(cnss_reset_afcmem);
4685 
4686 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
4687 {
4688 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4689 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4690 	struct device *dev = &pci_priv->pci_dev->dev;
4691 	int i;
4692 
4693 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4694 		if (!fw_mem[i].va && fw_mem[i].size) {
4695 retry:
4696 			fw_mem[i].va =
4697 				dma_alloc_attrs(dev, fw_mem[i].size,
4698 						&fw_mem[i].pa, GFP_KERNEL,
4699 						fw_mem[i].attrs);
4700 
4701 			if (!fw_mem[i].va) {
4702 				if ((fw_mem[i].attrs &
4703 				    DMA_ATTR_FORCE_CONTIGUOUS)) {
4704 					fw_mem[i].attrs &=
4705 						~DMA_ATTR_FORCE_CONTIGUOUS;
4706 
4707 					cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
4708 						    fw_mem[i].type);
4709 					goto retry;
4710 				}
4711 				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
4712 					    fw_mem[i].size, fw_mem[i].type);
4713 				CNSS_ASSERT(0);
4714 				return -ENOMEM;
4715 			}
4716 		}
4717 	}
4718 
4719 	return 0;
4720 }
4721 
4722 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
4723 {
4724 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4725 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4726 	struct device *dev = &pci_priv->pci_dev->dev;
4727 	int i;
4728 
4729 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4730 		if (fw_mem[i].va && fw_mem[i].size) {
4731 			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
4732 				    fw_mem[i].va, &fw_mem[i].pa,
4733 				    fw_mem[i].size, fw_mem[i].type);
4734 			dma_free_attrs(dev, fw_mem[i].size,
4735 				       fw_mem[i].va, fw_mem[i].pa,
4736 				       fw_mem[i].attrs);
4737 			fw_mem[i].va = NULL;
4738 			fw_mem[i].pa = 0;
4739 			fw_mem[i].size = 0;
4740 			fw_mem[i].type = 0;
4741 		}
4742 	}
4743 
4744 	plat_priv->fw_mem_seg_len = 0;
4745 }
4746 
4747 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
4748 {
4749 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4750 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4751 	int i, j;
4752 
4753 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4754 		if (!qdss_mem[i].va && qdss_mem[i].size) {
4755 			qdss_mem[i].va =
4756 				dma_alloc_coherent(&pci_priv->pci_dev->dev,
4757 						   qdss_mem[i].size,
4758 						   &qdss_mem[i].pa,
4759 						   GFP_KERNEL);
4760 			if (!qdss_mem[i].va) {
4761 				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
4762 					    qdss_mem[i].size,
4763 					    qdss_mem[i].type, i);
4764 				break;
4765 			}
4766 		}
4767 	}
4768 
4769 	/* Best-effort allocation for QDSS trace */
4770 	if (i < plat_priv->qdss_mem_seg_len) {
4771 		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
4772 			qdss_mem[j].type = 0;
4773 			qdss_mem[j].size = 0;
4774 		}
4775 		plat_priv->qdss_mem_seg_len = i;
4776 	}
4777 
4778 	return 0;
4779 }
4780 
4781 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
4782 {
4783 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4784 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4785 	int i;
4786 
4787 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4788 		if (qdss_mem[i].va && qdss_mem[i].size) {
4789 			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
4790 				    &qdss_mem[i].pa, qdss_mem[i].size,
4791 				    qdss_mem[i].type);
4792 			dma_free_coherent(&pci_priv->pci_dev->dev,
4793 					  qdss_mem[i].size, qdss_mem[i].va,
4794 					  qdss_mem[i].pa);
4795 			qdss_mem[i].va = NULL;
4796 			qdss_mem[i].pa = 0;
4797 			qdss_mem[i].size = 0;
4798 			qdss_mem[i].type = 0;
4799 		}
4800 	}
4801 	plat_priv->qdss_mem_seg_len = 0;
4802 }
4803 
4804 int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv)
4805 {
4806 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4807 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4808 	char filename[MAX_FIRMWARE_NAME_LEN];
4809 	char *tme_patch_filename = NULL;
4810 	const struct firmware *fw_entry;
4811 	int ret = 0;
4812 
4813 	switch (pci_priv->device_id) {
4814 	case PEACH_DEVICE_ID:
4815 		tme_patch_filename = TME_PATCH_FILE_NAME;
4816 		break;
4817 	case QCA6174_DEVICE_ID:
4818 	case QCA6290_DEVICE_ID:
4819 	case QCA6390_DEVICE_ID:
4820 	case QCA6490_DEVICE_ID:
4821 	case KIWI_DEVICE_ID:
4822 	case MANGO_DEVICE_ID:
4823 	default:
4824 		cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n",
4825 			    pci_priv->device_id);
4826 		return 0;
4827 	}
4828 
4829 	if (!tme_lite_mem->va && !tme_lite_mem->size) {
4830 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4831 					    tme_patch_filename);
4832 
4833 		ret = firmware_request_nowarn(&fw_entry, filename,
4834 					      &pci_priv->pci_dev->dev);
4835 		if (ret) {
4836 			cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n",
4837 				    filename, ret);
4838 			return ret;
4839 		}
4840 
4841 		tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4842 						fw_entry->size, &tme_lite_mem->pa,
4843 						GFP_KERNEL);
4844 		if (!tme_lite_mem->va) {
4845 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4846 				    fw_entry->size);
4847 			release_firmware(fw_entry);
4848 			return -ENOMEM;
4849 		}
4850 
4851 		memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
4852 		tme_lite_mem->size = fw_entry->size;
4853 		release_firmware(fw_entry);
4854 	}
4855 
4856 	return 0;
4857 }
4858 
4859 static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv)
4860 {
4861 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4862 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4863 
4864 	if (tme_lite_mem->va && tme_lite_mem->size) {
4865 		cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4866 			    tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
4867 		dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size,
4868 				  tme_lite_mem->va, tme_lite_mem->pa);
4869 	}
4870 
4871 	tme_lite_mem->va = NULL;
4872 	tme_lite_mem->pa = 0;
4873 	tme_lite_mem->size = 0;
4874 }
4875 
4876 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
4877 {
4878 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4879 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4880 	char filename[MAX_FIRMWARE_NAME_LEN];
4881 	char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
4882 	const struct firmware *fw_entry;
4883 	int ret = 0;
4884 
4885 	/* Use forward compatibility here since for any recent device
4886 	 * it should use DEFAULT_PHY_UCODE_FILE_NAME.
4887 	 */
4888 	switch (pci_priv->device_id) {
4889 	case QCA6174_DEVICE_ID:
4890 		cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
4891 			    pci_priv->device_id);
4892 		return -EINVAL;
4893 	case QCA6290_DEVICE_ID:
4894 	case QCA6390_DEVICE_ID:
4895 	case QCA6490_DEVICE_ID:
4896 		phy_filename = DEFAULT_PHY_M3_FILE_NAME;
4897 		break;
4898 	case KIWI_DEVICE_ID:
4899 	case MANGO_DEVICE_ID:
4900 	case PEACH_DEVICE_ID:
4901 		switch (plat_priv->device_version.major_version) {
4902 		case FW_V2_NUMBER:
4903 			phy_filename = PHY_UCODE_V2_FILE_NAME;
4904 			break;
4905 		default:
4906 			break;
4907 		}
4908 		break;
4909 	default:
4910 		break;
4911 	}
4912 
4913 	if (!m3_mem->va && !m3_mem->size) {
4914 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4915 					    phy_filename);
4916 
4917 		ret = firmware_request_nowarn(&fw_entry, filename,
4918 					      &pci_priv->pci_dev->dev);
4919 		if (ret) {
4920 			cnss_pr_err("Failed to load M3 image: %s\n", filename);
4921 			return ret;
4922 		}
4923 
4924 		m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4925 						fw_entry->size, &m3_mem->pa,
4926 						GFP_KERNEL);
4927 		if (!m3_mem->va) {
4928 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4929 				    fw_entry->size);
4930 			release_firmware(fw_entry);
4931 			return -ENOMEM;
4932 		}
4933 
4934 		memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
4935 		m3_mem->size = fw_entry->size;
4936 		release_firmware(fw_entry);
4937 	}
4938 
4939 	return 0;
4940 }
4941 
4942 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
4943 {
4944 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4945 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4946 
4947 	if (m3_mem->va && m3_mem->size) {
4948 		cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4949 			    m3_mem->va, &m3_mem->pa, m3_mem->size);
4950 		dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
4951 				  m3_mem->va, m3_mem->pa);
4952 	}
4953 
4954 	m3_mem->va = NULL;
4955 	m3_mem->pa = 0;
4956 	m3_mem->size = 0;
4957 }
4958 
4959 #ifdef CONFIG_FREE_M3_BLOB_MEM
4960 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
4961 {
4962 	cnss_pci_free_m3_mem(pci_priv);
4963 }
4964 #else
4965 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
4966 {
4967 }
4968 #endif
4969 
4970 int cnss_pci_load_aux(struct cnss_pci_data *pci_priv)
4971 {
4972 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4973 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
4974 	char filename[MAX_FIRMWARE_NAME_LEN];
4975 	char *aux_filename = DEFAULT_AUX_FILE_NAME;
4976 	const struct firmware *fw_entry;
4977 	int ret = 0;
4978 
4979 	if (!aux_mem->va && !aux_mem->size) {
4980 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4981 					    aux_filename);
4982 
4983 		ret = firmware_request_nowarn(&fw_entry, filename,
4984 					      &pci_priv->pci_dev->dev);
4985 		if (ret) {
4986 			cnss_pr_err("Failed to load AUX image: %s\n", filename);
4987 			return ret;
4988 		}
4989 
4990 		aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4991 						fw_entry->size, &aux_mem->pa,
4992 						GFP_KERNEL);
4993 		if (!aux_mem->va) {
4994 			cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n",
4995 				    fw_entry->size);
4996 			release_firmware(fw_entry);
4997 			return -ENOMEM;
4998 		}
4999 
5000 		memcpy(aux_mem->va, fw_entry->data, fw_entry->size);
5001 		aux_mem->size = fw_entry->size;
5002 		release_firmware(fw_entry);
5003 	}
5004 
5005 	return 0;
5006 }
5007 
5008 static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv)
5009 {
5010 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5011 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5012 
5013 	if (aux_mem->va && aux_mem->size) {
5014 		cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5015 			    aux_mem->va, &aux_mem->pa, aux_mem->size);
5016 		dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size,
5017 				  aux_mem->va, aux_mem->pa);
5018 	}
5019 
5020 	aux_mem->va = NULL;
5021 	aux_mem->pa = 0;
5022 	aux_mem->size = 0;
5023 }
5024 
5025 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
5026 {
5027 	struct cnss_plat_data *plat_priv;
5028 
5029 	if (!pci_priv)
5030 		return;
5031 
5032 	cnss_fatal_err("Timeout waiting for FW ready indication\n");
5033 
5034 	plat_priv = pci_priv->plat_priv;
5035 	if (!plat_priv)
5036 		return;
5037 
5038 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
5039 		cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
5040 		return;
5041 	}
5042 
5043 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5044 			       CNSS_REASON_TIMEOUT);
5045 }
5046 
5047 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
5048 {
5049 	pci_priv->iommu_domain = NULL;
5050 }
5051 
5052 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5053 {
5054 	if (!pci_priv)
5055 		return -ENODEV;
5056 
5057 	if (!pci_priv->smmu_iova_len)
5058 		return -EINVAL;
5059 
5060 	*addr = pci_priv->smmu_iova_start;
5061 	*size = pci_priv->smmu_iova_len;
5062 
5063 	return 0;
5064 }
5065 
5066 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5067 {
5068 	if (!pci_priv)
5069 		return -ENODEV;
5070 
5071 	if (!pci_priv->smmu_iova_ipa_len)
5072 		return -EINVAL;
5073 
5074 	*addr = pci_priv->smmu_iova_ipa_start;
5075 	*size = pci_priv->smmu_iova_ipa_len;
5076 
5077 	return 0;
5078 }
5079 
5080 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
5081 {
5082 	if (pci_priv)
5083 		return pci_priv->smmu_s1_enable;
5084 
5085 	return false;
5086 }
5087 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
5088 {
5089 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5090 
5091 	if (!pci_priv)
5092 		return NULL;
5093 
5094 	return pci_priv->iommu_domain;
5095 }
5096 EXPORT_SYMBOL(cnss_smmu_get_domain);
5097 
5098 int cnss_smmu_map(struct device *dev,
5099 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
5100 {
5101 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5102 	struct cnss_plat_data *plat_priv;
5103 	unsigned long iova;
5104 	size_t len;
5105 	int ret = 0;
5106 	int flag = IOMMU_READ | IOMMU_WRITE;
5107 	struct pci_dev *root_port;
5108 	struct device_node *root_of_node;
5109 	bool dma_coherent = false;
5110 
5111 	if (!pci_priv)
5112 		return -ENODEV;
5113 
5114 	if (!iova_addr) {
5115 		cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
5116 			    &paddr, size);
5117 		return -EINVAL;
5118 	}
5119 
5120 	plat_priv = pci_priv->plat_priv;
5121 
5122 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
5123 	iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
5124 
5125 	if (pci_priv->iommu_geometry &&
5126 	    iova >= pci_priv->smmu_iova_ipa_start +
5127 		    pci_priv->smmu_iova_ipa_len) {
5128 		cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5129 			    iova,
5130 			    &pci_priv->smmu_iova_ipa_start,
5131 			    pci_priv->smmu_iova_ipa_len);
5132 		return -ENOMEM;
5133 	}
5134 
5135 	if (!test_bit(DISABLE_IO_COHERENCY,
5136 		      &plat_priv->ctrl_params.quirks)) {
5137 		root_port = pcie_find_root_port(pci_priv->pci_dev);
5138 		if (!root_port) {
5139 			cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
5140 		} else {
5141 			root_of_node = root_port->dev.of_node;
5142 			if (root_of_node && root_of_node->parent) {
5143 				dma_coherent =
5144 				    of_property_read_bool(root_of_node->parent,
5145 							  "dma-coherent");
5146 			cnss_pr_dbg("dma-coherent is %s\n",
5147 				    dma_coherent ? "enabled" : "disabled");
5148 			if (dma_coherent)
5149 				flag |= IOMMU_CACHE;
5150 			}
5151 		}
5152 	}
5153 
5154 	cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
5155 
5156 	ret = iommu_map(pci_priv->iommu_domain, iova,
5157 			rounddown(paddr, PAGE_SIZE), len, flag);
5158 	if (ret) {
5159 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
5160 		return ret;
5161 	}
5162 
5163 	pci_priv->smmu_iova_ipa_current = iova + len;
5164 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
5165 	cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
5166 
5167 	return 0;
5168 }
5169 EXPORT_SYMBOL(cnss_smmu_map);
5170 
5171 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
5172 {
5173 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5174 	unsigned long iova;
5175 	size_t unmapped;
5176 	size_t len;
5177 
5178 	if (!pci_priv)
5179 		return -ENODEV;
5180 
5181 	iova = rounddown(iova_addr, PAGE_SIZE);
5182 	len = roundup(size + iova_addr - iova, PAGE_SIZE);
5183 
5184 	if (iova >= pci_priv->smmu_iova_ipa_start +
5185 		    pci_priv->smmu_iova_ipa_len) {
5186 		cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5187 			    iova,
5188 			    &pci_priv->smmu_iova_ipa_start,
5189 			    pci_priv->smmu_iova_ipa_len);
5190 		return -ENOMEM;
5191 	}
5192 
5193 	cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
5194 
5195 	unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
5196 	if (unmapped != len) {
5197 		cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
5198 			    unmapped, len);
5199 		return -EINVAL;
5200 	}
5201 
5202 	pci_priv->smmu_iova_ipa_current = iova;
5203 	return 0;
5204 }
5205 EXPORT_SYMBOL(cnss_smmu_unmap);
5206 
5207 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
5208 {
5209 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5210 	struct cnss_plat_data *plat_priv;
5211 
5212 	if (!pci_priv)
5213 		return -ENODEV;
5214 
5215 	plat_priv = pci_priv->plat_priv;
5216 	if (!plat_priv)
5217 		return -ENODEV;
5218 
5219 	info->va = pci_priv->bar;
5220 	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
5221 	info->chip_id = plat_priv->chip_info.chip_id;
5222 	info->chip_family = plat_priv->chip_info.chip_family;
5223 	info->board_id = plat_priv->board_info.board_id;
5224 	info->soc_id = plat_priv->soc_info.soc_id;
5225 	info->fw_version = plat_priv->fw_version_info.fw_version;
5226 	strlcpy(info->fw_build_timestamp,
5227 		plat_priv->fw_version_info.fw_build_timestamp,
5228 		sizeof(info->fw_build_timestamp));
5229 	memcpy(&info->device_version, &plat_priv->device_version,
5230 	       sizeof(info->device_version));
5231 	memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
5232 	       sizeof(info->dev_mem_info));
5233 	memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
5234 	       sizeof(info->fw_build_id));
5235 
5236 	return 0;
5237 }
5238 EXPORT_SYMBOL(cnss_get_soc_info);
5239 
5240 int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv,
5241 				     char *user_name,
5242 				     int *num_vectors,
5243 				     u32 *user_base_data,
5244 				     u32 *base_vector)
5245 {
5246 	return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5247 					    user_name,
5248 					    num_vectors,
5249 					    user_base_data,
5250 					    base_vector);
5251 }
5252 
5253 static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv,
5254 					  unsigned int vec,
5255 					  const struct cpumask *cpumask)
5256 {
5257 	int ret;
5258 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5259 
5260 	ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec),
5261 				    cpumask);
5262 
5263 	return ret;
5264 }
5265 
5266 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
5267 {
5268 	int ret = 0;
5269 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5270 	int num_vectors;
5271 	struct cnss_msi_config *msi_config;
5272 
5273 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5274 		return 0;
5275 
5276 	if (cnss_pci_is_force_one_msi(pci_priv)) {
5277 		ret = cnss_pci_get_one_msi_assignment(pci_priv);
5278 		cnss_pr_dbg("force one msi\n");
5279 	} else {
5280 		ret = cnss_pci_get_msi_assignment(pci_priv);
5281 	}
5282 	if (ret) {
5283 		cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
5284 		goto out;
5285 	}
5286 
5287 	msi_config = pci_priv->msi_config;
5288 	if (!msi_config) {
5289 		cnss_pr_err("msi_config is NULL!\n");
5290 		ret = -EINVAL;
5291 		goto out;
5292 	}
5293 
5294 	num_vectors = pci_alloc_irq_vectors(pci_dev,
5295 					    msi_config->total_vectors,
5296 					    msi_config->total_vectors,
5297 					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
5298 	if ((num_vectors != msi_config->total_vectors) &&
5299 	    !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
5300 		cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
5301 			    msi_config->total_vectors, num_vectors);
5302 		if (num_vectors >= 0)
5303 			ret = -EINVAL;
5304 		goto reset_msi_config;
5305 	}
5306 
5307 	/* With VT-d disabled on x86 platform, only one pci irq vector is
5308 	 * allocated. Once suspend the irq may be migrated to CPU0 if it was
5309 	 * affine to other CPU with one new msi vector re-allocated.
5310 	 * The observation cause the issue about no irq handler for vector
5311 	 * once resume.
5312 	 * The fix is to set irq vector affinity to CPU0 before calling
5313 	 * request_irq to avoid the irq migration.
5314 	 */
5315 	if (cnss_pci_is_one_msi(pci_priv)) {
5316 		ret = cnss_pci_irq_set_affinity_hint(pci_priv,
5317 						     0,
5318 						     cpumask_of(0));
5319 		if (ret) {
5320 			cnss_pr_err("Failed to affinize irq vector to CPU0\n");
5321 			goto free_msi_vector;
5322 		}
5323 	}
5324 
5325 	if (cnss_pci_config_msi_addr(pci_priv)) {
5326 		ret = -EINVAL;
5327 		goto free_msi_vector;
5328 	}
5329 
5330 	if (cnss_pci_config_msi_data(pci_priv)) {
5331 		ret = -EINVAL;
5332 		goto free_msi_vector;
5333 	}
5334 
5335 	return 0;
5336 
5337 free_msi_vector:
5338 	if (cnss_pci_is_one_msi(pci_priv))
5339 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5340 	pci_free_irq_vectors(pci_priv->pci_dev);
5341 reset_msi_config:
5342 	pci_priv->msi_config = NULL;
5343 out:
5344 	return ret;
5345 }
5346 
5347 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
5348 {
5349 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5350 		return;
5351 
5352 	if (cnss_pci_is_one_msi(pci_priv))
5353 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5354 
5355 	pci_free_irq_vectors(pci_priv->pci_dev);
5356 }
5357 
5358 int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
5359 				 int *num_vectors, u32 *user_base_data,
5360 				 u32 *base_vector)
5361 {
5362 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5363 	struct cnss_msi_config *msi_config;
5364 	int idx;
5365 
5366 	if (!pci_priv)
5367 		return -ENODEV;
5368 
5369 	msi_config = pci_priv->msi_config;
5370 	if (!msi_config) {
5371 		cnss_pr_err("MSI is not supported.\n");
5372 		return -EINVAL;
5373 	}
5374 
5375 	for (idx = 0; idx < msi_config->total_users; idx++) {
5376 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
5377 			*num_vectors = msi_config->users[idx].num_vectors;
5378 			*user_base_data = msi_config->users[idx].base_vector
5379 				+ pci_priv->msi_ep_base_data;
5380 			*base_vector = msi_config->users[idx].base_vector;
5381 			/*Add only single print for each user*/
5382 			if (print_optimize.msi_log_chk[idx]++)
5383 				goto skip_print;
5384 
5385 			cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
5386 				    user_name, *num_vectors, *user_base_data,
5387 				    *base_vector);
5388 skip_print:
5389 			return 0;
5390 		}
5391 	}
5392 
5393 	cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
5394 
5395 	return -EINVAL;
5396 }
5397 EXPORT_SYMBOL(cnss_get_user_msi_assignment);
5398 
5399 int cnss_get_msi_irq(struct device *dev, unsigned int vector)
5400 {
5401 	struct pci_dev *pci_dev = to_pci_dev(dev);
5402 	int irq_num;
5403 
5404 	irq_num = pci_irq_vector(pci_dev, vector);
5405 	cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
5406 
5407 	return irq_num;
5408 }
5409 EXPORT_SYMBOL(cnss_get_msi_irq);
5410 
5411 bool cnss_is_one_msi(struct device *dev)
5412 {
5413 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5414 
5415 	if (!pci_priv)
5416 		return false;
5417 
5418 	return cnss_pci_is_one_msi(pci_priv);
5419 }
5420 EXPORT_SYMBOL(cnss_is_one_msi);
5421 
5422 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
5423 			  u32 *msi_addr_high)
5424 {
5425 	struct pci_dev *pci_dev = to_pci_dev(dev);
5426 	struct cnss_pci_data *pci_priv;
5427 	u16 control;
5428 
5429 	if (!pci_dev)
5430 		return;
5431 
5432 	pci_priv = cnss_get_pci_priv(pci_dev);
5433 	if (!pci_priv)
5434 		return;
5435 
5436 	if (pci_dev->msix_enabled) {
5437 		*msi_addr_low = pci_priv->msix_addr;
5438 		*msi_addr_high = 0;
5439 		if (!print_optimize.msi_addr_chk++)
5440 			cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5441 				    *msi_addr_low, *msi_addr_high);
5442 		return;
5443 	}
5444 
5445 	pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
5446 			     &control);
5447 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
5448 			      msi_addr_low);
5449 	/* Return MSI high address only when device supports 64-bit MSI */
5450 	if (control & PCI_MSI_FLAGS_64BIT)
5451 		pci_read_config_dword(pci_dev,
5452 				      pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
5453 				      msi_addr_high);
5454 	else
5455 		*msi_addr_high = 0;
5456 	 /*Add only single print as the address is constant*/
5457 	 if (!print_optimize.msi_addr_chk++)
5458 		cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5459 			    *msi_addr_low, *msi_addr_high);
5460 }
5461 EXPORT_SYMBOL(cnss_get_msi_address);
5462 
5463 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
5464 {
5465 	int ret, num_vectors;
5466 	u32 user_base_data, base_vector;
5467 
5468 	if (!pci_priv)
5469 		return -ENODEV;
5470 
5471 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5472 					   WAKE_MSI_NAME, &num_vectors,
5473 					   &user_base_data, &base_vector);
5474 	if (ret) {
5475 		cnss_pr_err("WAKE MSI is not valid\n");
5476 		return 0;
5477 	}
5478 
5479 	return user_base_data;
5480 }
5481 
5482 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
5483 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5484 {
5485 	return dma_set_mask(&pci_dev->dev, mask);
5486 }
5487 
5488 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5489 	u64 mask)
5490 {
5491 	return dma_set_coherent_mask(&pci_dev->dev, mask);
5492 }
5493 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5494 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5495 {
5496 	return pci_set_dma_mask(pci_dev, mask);
5497 }
5498 
5499 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5500 	u64 mask)
5501 {
5502 	return pci_set_consistent_dma_mask(pci_dev, mask);
5503 }
5504 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5505 
5506 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
5507 {
5508 	int ret = 0;
5509 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5510 	u16 device_id;
5511 
5512 	pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
5513 	if (device_id != pci_priv->pci_device_id->device)  {
5514 		cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
5515 			    device_id, pci_priv->pci_device_id->device);
5516 		ret = -EIO;
5517 		goto out;
5518 	}
5519 
5520 	ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
5521 	if (ret) {
5522 		pr_err("Failed to assign PCI resource, err = %d\n", ret);
5523 		goto out;
5524 	}
5525 
5526 	ret = pci_enable_device(pci_dev);
5527 	if (ret) {
5528 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
5529 		goto out;
5530 	}
5531 
5532 	ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
5533 	if (ret) {
5534 		cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
5535 		goto disable_device;
5536 	}
5537 
5538 	switch (device_id) {
5539 	case QCA6174_DEVICE_ID:
5540 	case QCN7605_DEVICE_ID:
5541 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5542 		break;
5543 	case QCA6390_DEVICE_ID:
5544 	case QCA6490_DEVICE_ID:
5545 	case KIWI_DEVICE_ID:
5546 	case MANGO_DEVICE_ID:
5547 	case PEACH_DEVICE_ID:
5548 		pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
5549 		break;
5550 	default:
5551 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5552 		break;
5553 	}
5554 
5555 	cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
5556 
5557 	ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5558 	if (ret) {
5559 		cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
5560 		goto release_region;
5561 	}
5562 
5563 	ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5564 	if (ret) {
5565 		cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
5566 			    ret);
5567 		goto release_region;
5568 	}
5569 
5570 	pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
5571 	if (!pci_priv->bar) {
5572 		cnss_pr_err("Failed to do PCI IO map!\n");
5573 		ret = -EIO;
5574 		goto release_region;
5575 	}
5576 
5577 	/* Save default config space without BME enabled */
5578 	pci_save_state(pci_dev);
5579 	pci_priv->default_state = pci_store_saved_state(pci_dev);
5580 
5581 	pci_set_master(pci_dev);
5582 
5583 	return 0;
5584 
5585 release_region:
5586 	pci_release_region(pci_dev, PCI_BAR_NUM);
5587 disable_device:
5588 	pci_disable_device(pci_dev);
5589 out:
5590 	return ret;
5591 }
5592 
5593 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
5594 {
5595 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5596 
5597 	pci_clear_master(pci_dev);
5598 	pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
5599 	pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
5600 
5601 	if (pci_priv->bar) {
5602 		pci_iounmap(pci_dev, pci_priv->bar);
5603 		pci_priv->bar = NULL;
5604 	}
5605 
5606 	pci_release_region(pci_dev, PCI_BAR_NUM);
5607 	if (pci_is_enabled(pci_dev))
5608 		pci_disable_device(pci_dev);
5609 }
5610 
5611 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
5612 {
5613 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5614 	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
5615 	gfp_t gfp = GFP_KERNEL;
5616 	u32 reg_offset;
5617 
5618 	if (in_interrupt() || irqs_disabled())
5619 		gfp = GFP_ATOMIC;
5620 
5621 	if (!plat_priv->qdss_reg) {
5622 		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
5623 						   sizeof(*plat_priv->qdss_reg)
5624 						   * array_size, gfp);
5625 		if (!plat_priv->qdss_reg)
5626 			return;
5627 	}
5628 
5629 	cnss_pr_dbg("Start to dump qdss registers\n");
5630 
5631 	for (i = 0; qdss_csr[i].name; i++) {
5632 		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
5633 		if (cnss_pci_reg_read(pci_priv, reg_offset,
5634 				      &plat_priv->qdss_reg[i]))
5635 			return;
5636 		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
5637 			    plat_priv->qdss_reg[i]);
5638 	}
5639 }
5640 
5641 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
5642 				 enum cnss_ce_index ce)
5643 {
5644 	int i;
5645 	u32 ce_base = ce * CE_REG_INTERVAL;
5646 	u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
5647 
5648 	switch (pci_priv->device_id) {
5649 	case QCA6390_DEVICE_ID:
5650 		src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
5651 		dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
5652 		cmn_base = QCA6390_CE_COMMON_REG_BASE;
5653 		break;
5654 	case QCA6490_DEVICE_ID:
5655 		src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
5656 		dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
5657 		cmn_base = QCA6490_CE_COMMON_REG_BASE;
5658 		break;
5659 	default:
5660 		return;
5661 	}
5662 
5663 	switch (ce) {
5664 	case CNSS_CE_09:
5665 	case CNSS_CE_10:
5666 		for (i = 0; ce_src[i].name; i++) {
5667 			reg_offset = src_ring_base + ce_base + ce_src[i].offset;
5668 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5669 				return;
5670 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5671 				    ce, ce_src[i].name, reg_offset, val);
5672 		}
5673 
5674 		for (i = 0; ce_dst[i].name; i++) {
5675 			reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
5676 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5677 				return;
5678 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5679 				    ce, ce_dst[i].name, reg_offset, val);
5680 		}
5681 		break;
5682 	case CNSS_CE_COMMON:
5683 		for (i = 0; ce_cmn[i].name; i++) {
5684 			reg_offset = cmn_base  + ce_cmn[i].offset;
5685 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5686 				return;
5687 			cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
5688 				    ce_cmn[i].name, reg_offset, val);
5689 		}
5690 		break;
5691 	default:
5692 		cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
5693 	}
5694 }
5695 
5696 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
5697 {
5698 	if (cnss_pci_check_link_status(pci_priv))
5699 		return;
5700 
5701 	cnss_pr_dbg("Start to dump debug registers\n");
5702 
5703 	cnss_mhi_debug_reg_dump(pci_priv);
5704 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5705 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
5706 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
5707 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
5708 }
5709 
5710 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
5711 {
5712 	if (cnss_get_host_sol_value(pci_priv->plat_priv))
5713 		return -EINVAL;
5714 
5715 	cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
5716 	cnss_set_host_sol_value(pci_priv->plat_priv, 1);
5717 
5718 	return 0;
5719 }
5720 
5721 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
5722 {
5723 	if (!cnss_pci_check_link_status(pci_priv))
5724 		cnss_mhi_debug_reg_dump(pci_priv);
5725 
5726 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5727 	cnss_pci_dump_misc_reg(pci_priv);
5728 	cnss_pci_dump_shadow_reg(pci_priv);
5729 }
5730 
5731 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
5732 {
5733 	int ret;
5734 	struct cnss_plat_data *plat_priv;
5735 
5736 	if (!pci_priv)
5737 		return -ENODEV;
5738 
5739 	plat_priv = pci_priv->plat_priv;
5740 	if (!plat_priv)
5741 		return -ENODEV;
5742 
5743 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5744 	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
5745 		return -EINVAL;
5746 	/*
5747 	 * Call pm_runtime_get_sync insteat of auto_resume to get
5748 	 * reference and make sure runtime_suspend wont get called.
5749 	 */
5750 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
5751 	if (ret < 0)
5752 		goto runtime_pm_put;
5753 	/*
5754 	 * In some scenarios, cnss_pci_pm_runtime_get_sync
5755 	 * might not resume PCI bus. For those cases do auto resume.
5756 	 */
5757 	cnss_auto_resume(&pci_priv->pci_dev->dev);
5758 
5759 	if (!pci_priv->is_smmu_fault)
5760 		cnss_pci_mhi_reg_dump(pci_priv);
5761 
5762 	/* If link is still down here, directly trigger link down recovery */
5763 	ret = cnss_pci_check_link_status(pci_priv);
5764 	if (ret) {
5765 		cnss_pci_link_down(&pci_priv->pci_dev->dev);
5766 		cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5767 		cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5768 		return 0;
5769 	}
5770 
5771 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
5772 	if (ret) {
5773 		if (pci_priv->is_smmu_fault) {
5774 			cnss_pci_mhi_reg_dump(pci_priv);
5775 			pci_priv->is_smmu_fault = false;
5776 		}
5777 		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5778 		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
5779 			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
5780 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5781 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5782 			return 0;
5783 		}
5784 		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
5785 		if (!cnss_pci_assert_host_sol(pci_priv)) {
5786 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5787 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5788 			return 0;
5789 		}
5790 		cnss_pci_dump_debug_reg(pci_priv);
5791 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5792 				       CNSS_REASON_DEFAULT);
5793 		goto runtime_pm_put;
5794 	}
5795 
5796 	if (pci_priv->is_smmu_fault) {
5797 		cnss_pci_mhi_reg_dump(pci_priv);
5798 		pci_priv->is_smmu_fault = false;
5799 	}
5800 
5801 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
5802 		mod_timer(&pci_priv->dev_rddm_timer,
5803 			  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
5804 	}
5805 
5806 runtime_pm_put:
5807 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5808 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5809 	return ret;
5810 }
5811 
5812 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
5813 				  struct cnss_dump_seg *dump_seg,
5814 				  enum cnss_fw_dump_type type, int seg_no,
5815 				  void *va, dma_addr_t dma, size_t size)
5816 {
5817 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5818 	struct device *dev = &pci_priv->pci_dev->dev;
5819 	phys_addr_t pa;
5820 
5821 	dump_seg->address = dma;
5822 	dump_seg->v_address = va;
5823 	dump_seg->size = size;
5824 	dump_seg->type = type;
5825 
5826 	cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
5827 		    seg_no, va, &dma, size);
5828 
5829 	if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
5830 		return;
5831 
5832 	cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
5833 }
5834 
5835 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
5836 				     struct cnss_dump_seg *dump_seg,
5837 				     enum cnss_fw_dump_type type, int seg_no,
5838 				     void *va, dma_addr_t dma, size_t size)
5839 {
5840 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5841 	struct device *dev = &pci_priv->pci_dev->dev;
5842 	phys_addr_t pa;
5843 
5844 	cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
5845 	cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
5846 }
5847 
5848 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
5849 				enum cnss_driver_status status, void *data)
5850 {
5851 	struct cnss_uevent_data uevent_data;
5852 	struct cnss_wlan_driver *driver_ops;
5853 
5854 	driver_ops = pci_priv->driver_ops;
5855 	if (!driver_ops || !driver_ops->update_event) {
5856 		cnss_pr_dbg("Hang event driver ops is NULL\n");
5857 		return -EINVAL;
5858 	}
5859 
5860 	cnss_pr_dbg("Calling driver uevent: %d\n", status);
5861 
5862 	uevent_data.status = status;
5863 	uevent_data.data = data;
5864 
5865 	return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
5866 }
5867 
5868 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
5869 {
5870 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5871 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5872 	struct cnss_hang_event hang_event;
5873 	void *hang_data_va = NULL;
5874 	u64 offset = 0;
5875 	u16 length = 0;
5876 	int i = 0;
5877 
5878 	if (!fw_mem || !plat_priv->fw_mem_seg_len)
5879 		return;
5880 
5881 	memset(&hang_event, 0, sizeof(hang_event));
5882 	switch (pci_priv->device_id) {
5883 	case QCA6390_DEVICE_ID:
5884 		offset = HST_HANG_DATA_OFFSET;
5885 		length = HANG_DATA_LENGTH;
5886 		break;
5887 	case QCA6490_DEVICE_ID:
5888 		/* Fallback to hard-coded values if hang event params not
5889 		 * present in QMI. Once all the firmware branches have the
5890 		 * fix to send params over QMI, this can be removed.
5891 		 */
5892 		if (plat_priv->hang_event_data_len) {
5893 			offset = plat_priv->hang_data_addr_offset;
5894 			length = plat_priv->hang_event_data_len;
5895 		} else {
5896 			offset = HSP_HANG_DATA_OFFSET;
5897 			length = HANG_DATA_LENGTH;
5898 		}
5899 		break;
5900 	case KIWI_DEVICE_ID:
5901 	case MANGO_DEVICE_ID:
5902 	case PEACH_DEVICE_ID:
5903 		offset = plat_priv->hang_data_addr_offset;
5904 		length = plat_priv->hang_event_data_len;
5905 		break;
5906 	default:
5907 		cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
5908 			    pci_priv->device_id);
5909 		return;
5910 	}
5911 
5912 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
5913 		if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
5914 		    fw_mem[i].va) {
5915 			/* The offset must be < (fw_mem size- hangdata length) */
5916 			if (!(offset <= fw_mem[i].size - length))
5917 				goto exit;
5918 
5919 			hang_data_va = fw_mem[i].va + offset;
5920 			hang_event.hang_event_data = kmemdup(hang_data_va,
5921 							     length,
5922 							     GFP_ATOMIC);
5923 			if (!hang_event.hang_event_data) {
5924 				cnss_pr_dbg("Hang data memory alloc failed\n");
5925 				return;
5926 			}
5927 			hang_event.hang_event_data_len = length;
5928 			break;
5929 		}
5930 	}
5931 
5932 	cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
5933 
5934 	kfree(hang_event.hang_event_data);
5935 	hang_event.hang_event_data = NULL;
5936 	return;
5937 exit:
5938 	cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
5939 		    plat_priv->hang_data_addr_offset,
5940 		    plat_priv->hang_event_data_len);
5941 }
5942 
5943 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
5944 void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv)
5945 {
5946 	struct cnss_ssr_driver_dump_entry ssr_entry[CNSS_HOST_DUMP_TYPE_MAX] = {0};
5947 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5948 	size_t num_entries_loaded = 0;
5949 	int x;
5950 	int ret = -1;
5951 
5952 	if (pci_priv->driver_ops &&
5953 	    pci_priv->driver_ops->collect_driver_dump) {
5954 		ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev,
5955 								ssr_entry,
5956 								&num_entries_loaded);
5957 	}
5958 
5959 	if (!ret) {
5960 		for (x = 0; x < num_entries_loaded; x++) {
5961 			cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n",
5962 				     x, ssr_entry[x].buffer_pointer,
5963 				     ssr_entry[x].region_name,
5964 				     ssr_entry[x].buffer_size);
5965 		}
5966 
5967 		cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded);
5968 	} else {
5969 		cnss_pr_info("Host SSR elf dump collection feature disabled\n");
5970 	}
5971 }
5972 #endif
5973 
5974 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
5975 {
5976 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5977 	struct cnss_dump_data *dump_data =
5978 		&plat_priv->ramdump_info_v2.dump_data;
5979 	struct cnss_dump_seg *dump_seg =
5980 		plat_priv->ramdump_info_v2.dump_data_vaddr;
5981 	struct image_info *fw_image, *rddm_image;
5982 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5983 	int ret, i, j;
5984 
5985 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
5986 	    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
5987 		cnss_pci_send_hang_event(pci_priv);
5988 
5989 	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
5990 		cnss_pr_dbg("RAM dump is already collected, skip\n");
5991 		return;
5992 	}
5993 
5994 	if (!cnss_is_device_powered_on(plat_priv)) {
5995 		cnss_pr_dbg("Device is already powered off, skip\n");
5996 		return;
5997 	}
5998 
5999 	if (!in_panic) {
6000 		mutex_lock(&pci_priv->bus_lock);
6001 		ret = cnss_pci_check_link_status(pci_priv);
6002 		if (ret) {
6003 			if (ret != -EACCES) {
6004 				mutex_unlock(&pci_priv->bus_lock);
6005 				return;
6006 			}
6007 			if (cnss_pci_resume_bus(pci_priv)) {
6008 				mutex_unlock(&pci_priv->bus_lock);
6009 				return;
6010 			}
6011 		}
6012 		mutex_unlock(&pci_priv->bus_lock);
6013 	} else {
6014 		if (cnss_pci_check_link_status(pci_priv))
6015 			return;
6016 		/* Inside panic handler, reduce timeout for RDDM to avoid
6017 		 * unnecessary hypervisor watchdog bite.
6018 		 */
6019 		pci_priv->mhi_ctrl->timeout_ms /= 2;
6020 	}
6021 
6022 	cnss_mhi_debug_reg_dump(pci_priv);
6023 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6024 	cnss_pci_dump_misc_reg(pci_priv);
6025 
6026 	cnss_rddm_trigger_debug(pci_priv);
6027 	ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
6028 	if (ret) {
6029 		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
6030 			       ret);
6031 		if (!cnss_pci_assert_host_sol(pci_priv))
6032 			return;
6033 		cnss_rddm_trigger_check(pci_priv);
6034 		cnss_pci_dump_debug_reg(pci_priv);
6035 		return;
6036 	}
6037 	cnss_rddm_trigger_check(pci_priv);
6038 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6039 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6040 	dump_data->nentries = 0;
6041 
6042 	if (plat_priv->qdss_mem_seg_len)
6043 		cnss_pci_dump_qdss_reg(pci_priv);
6044 	cnss_mhi_dump_sfr(pci_priv);
6045 
6046 	if (!dump_seg) {
6047 		cnss_pr_warn("FW image dump collection not setup");
6048 		goto skip_dump;
6049 	}
6050 
6051 	cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
6052 		    fw_image->entries);
6053 
6054 	for (i = 0; i < fw_image->entries; i++) {
6055 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6056 				      fw_image->mhi_buf[i].buf,
6057 				      fw_image->mhi_buf[i].dma_addr,
6058 				      fw_image->mhi_buf[i].len);
6059 		dump_seg++;
6060 	}
6061 
6062 	dump_data->nentries += fw_image->entries;
6063 
6064 	cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
6065 		    rddm_image->entries);
6066 
6067 	for (i = 0; i < rddm_image->entries; i++) {
6068 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6069 				      rddm_image->mhi_buf[i].buf,
6070 				      rddm_image->mhi_buf[i].dma_addr,
6071 				      rddm_image->mhi_buf[i].len);
6072 		dump_seg++;
6073 	}
6074 
6075 	dump_data->nentries += rddm_image->entries;
6076 
6077 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6078 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
6079 			if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
6080 				cnss_pr_dbg("Collect remote heap dump segment\n");
6081 				cnss_pci_add_dump_seg(pci_priv, dump_seg,
6082 						      CNSS_FW_REMOTE_HEAP, j,
6083 						      fw_mem[i].va,
6084 						      fw_mem[i].pa,
6085 						      fw_mem[i].size);
6086 				dump_seg++;
6087 				dump_data->nentries++;
6088 				j++;
6089 			} else {
6090 				cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
6091 			}
6092 		}
6093 	}
6094 
6095 	if (dump_data->nentries > 0)
6096 		plat_priv->ramdump_info_v2.dump_data_valid = true;
6097 
6098 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
6099 
6100 skip_dump:
6101 	complete(&plat_priv->rddm_complete);
6102 }
6103 
6104 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
6105 {
6106 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6107 	struct cnss_dump_seg *dump_seg =
6108 		plat_priv->ramdump_info_v2.dump_data_vaddr;
6109 	struct image_info *fw_image, *rddm_image;
6110 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6111 	int i, j;
6112 
6113 	if (!dump_seg)
6114 		return;
6115 
6116 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6117 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6118 
6119 	for (i = 0; i < fw_image->entries; i++) {
6120 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6121 					 fw_image->mhi_buf[i].buf,
6122 					 fw_image->mhi_buf[i].dma_addr,
6123 					 fw_image->mhi_buf[i].len);
6124 		dump_seg++;
6125 	}
6126 
6127 	for (i = 0; i < rddm_image->entries; i++) {
6128 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6129 					 rddm_image->mhi_buf[i].buf,
6130 					 rddm_image->mhi_buf[i].dma_addr,
6131 					 rddm_image->mhi_buf[i].len);
6132 		dump_seg++;
6133 	}
6134 
6135 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6136 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
6137 		    (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
6138 			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6139 						 CNSS_FW_REMOTE_HEAP, j,
6140 						 fw_mem[i].va, fw_mem[i].pa,
6141 						 fw_mem[i].size);
6142 			dump_seg++;
6143 			j++;
6144 		}
6145 	}
6146 
6147 	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
6148 	plat_priv->ramdump_info_v2.dump_data_valid = false;
6149 }
6150 
6151 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
6152 {
6153 	struct cnss_plat_data *plat_priv;
6154 
6155 	if (!pci_priv) {
6156 		cnss_pr_err("pci_priv is NULL\n");
6157 		return;
6158 	}
6159 
6160 	plat_priv = pci_priv->plat_priv;
6161 	if (!plat_priv) {
6162 		cnss_pr_err("plat_priv is NULL\n");
6163 		return;
6164 	}
6165 
6166 	if (plat_priv->recovery_enabled)
6167 		cnss_pci_collect_host_dump_info(pci_priv);
6168 
6169 	/* Call recovery handler in the DRIVER_RECOVERY event context
6170 	 * instead of scheduling work. In that way complete recovery
6171 	 * will be done as part of DRIVER_RECOVERY event and get
6172 	 * serialized with other events.
6173 	 */
6174 	cnss_recovery_handler(plat_priv);
6175 }
6176 
6177 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
6178 {
6179 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6180 
6181 	return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
6182 }
6183 
6184 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
6185 {
6186 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6187 
6188 	cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
6189 }
6190 
6191 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
6192 				 char *prefix_name, char *name)
6193 {
6194 	struct cnss_plat_data *plat_priv;
6195 
6196 	if (!pci_priv)
6197 		return;
6198 
6199 	plat_priv = pci_priv->plat_priv;
6200 
6201 	if (!plat_priv->use_fw_path_with_prefix) {
6202 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6203 		return;
6204 	}
6205 
6206 	switch (pci_priv->device_id) {
6207 	case QCN7605_DEVICE_ID:
6208 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6209 			  QCN7605_PATH_PREFIX "%s", name);
6210 		break;
6211 	case QCA6390_DEVICE_ID:
6212 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6213 			  QCA6390_PATH_PREFIX "%s", name);
6214 		break;
6215 	case QCA6490_DEVICE_ID:
6216 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6217 			  QCA6490_PATH_PREFIX "%s", name);
6218 		break;
6219 	case KIWI_DEVICE_ID:
6220 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6221 			  KIWI_PATH_PREFIX "%s", name);
6222 		break;
6223 	case MANGO_DEVICE_ID:
6224 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6225 			  MANGO_PATH_PREFIX "%s", name);
6226 		break;
6227 	case PEACH_DEVICE_ID:
6228 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6229 			  PEACH_PATH_PREFIX "%s", name);
6230 		break;
6231 	default:
6232 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6233 		break;
6234 	}
6235 
6236 	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
6237 }
6238 
6239 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
6240 {
6241 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6242 
6243 	switch (pci_priv->device_id) {
6244 	case QCA6390_DEVICE_ID:
6245 		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
6246 			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
6247 				    pci_priv->device_id,
6248 				    plat_priv->device_version.major_version);
6249 			return -EINVAL;
6250 		}
6251 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6252 					    FW_V2_FILE_NAME);
6253 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6254 			 FW_V2_FILE_NAME);
6255 		break;
6256 	case QCA6490_DEVICE_ID:
6257 		switch (plat_priv->device_version.major_version) {
6258 		case FW_V2_NUMBER:
6259 				cnss_pci_add_fw_prefix_name(pci_priv,
6260 							    plat_priv->firmware_name,
6261 							    FW_V2_FILE_NAME);
6262 				snprintf(plat_priv->fw_fallback_name,
6263 					 MAX_FIRMWARE_NAME_LEN,
6264 					 FW_V2_FILE_NAME);
6265 			break;
6266 		default:
6267 			cnss_pci_add_fw_prefix_name(pci_priv,
6268 						    plat_priv->firmware_name,
6269 						    DEFAULT_FW_FILE_NAME);
6270 			snprintf(plat_priv->fw_fallback_name,
6271 				 MAX_FIRMWARE_NAME_LEN,
6272 				 DEFAULT_FW_FILE_NAME);
6273 			break;
6274 		}
6275 		break;
6276 	case KIWI_DEVICE_ID:
6277 	case MANGO_DEVICE_ID:
6278 	case PEACH_DEVICE_ID:
6279 		switch (plat_priv->device_version.major_version) {
6280 		case FW_V2_NUMBER:
6281 			/*
6282 			 * kiwiv2 using seprate fw binary for MM and FTM mode,
6283 			 * platform driver loads corresponding binary according
6284 			 * to current mode indicated by wlan driver. Otherwise
6285 			 * use default binary.
6286 			 * Mission mode using same binary name as before,
6287 			 * if seprate binary is not there, fall back to default.
6288 			 */
6289 			if (plat_priv->driver_mode == CNSS_MISSION) {
6290 				cnss_pci_add_fw_prefix_name(pci_priv,
6291 							    plat_priv->firmware_name,
6292 							    FW_V2_FILE_NAME);
6293 				cnss_pci_add_fw_prefix_name(pci_priv,
6294 							    plat_priv->fw_fallback_name,
6295 							    FW_V2_FILE_NAME);
6296 			} else if (plat_priv->driver_mode == CNSS_FTM) {
6297 				cnss_pci_add_fw_prefix_name(pci_priv,
6298 							    plat_priv->firmware_name,
6299 							    FW_V2_FTM_FILE_NAME);
6300 				cnss_pci_add_fw_prefix_name(pci_priv,
6301 							    plat_priv->fw_fallback_name,
6302 							    FW_V2_FILE_NAME);
6303 			} else {
6304 				/*
6305 				 * Since during cold boot calibration phase,
6306 				 * wlan driver has not registered, so default
6307 				 * fw binary will be used.
6308 				 */
6309 				cnss_pci_add_fw_prefix_name(pci_priv,
6310 							    plat_priv->firmware_name,
6311 							    FW_V2_FILE_NAME);
6312 				snprintf(plat_priv->fw_fallback_name,
6313 					 MAX_FIRMWARE_NAME_LEN,
6314 					 FW_V2_FILE_NAME);
6315 			}
6316 			break;
6317 		default:
6318 			cnss_pci_add_fw_prefix_name(pci_priv,
6319 						    plat_priv->firmware_name,
6320 						    DEFAULT_FW_FILE_NAME);
6321 			snprintf(plat_priv->fw_fallback_name,
6322 				 MAX_FIRMWARE_NAME_LEN,
6323 				 DEFAULT_FW_FILE_NAME);
6324 			break;
6325 		}
6326 		break;
6327 	default:
6328 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6329 					    DEFAULT_FW_FILE_NAME);
6330 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6331 			 DEFAULT_FW_FILE_NAME);
6332 		break;
6333 	}
6334 
6335 	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
6336 		    plat_priv->firmware_name, plat_priv->fw_fallback_name);
6337 
6338 	return 0;
6339 }
6340 
6341 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
6342 {
6343 	switch (status) {
6344 	case MHI_CB_IDLE:
6345 		return "IDLE";
6346 	case MHI_CB_EE_RDDM:
6347 		return "RDDM";
6348 	case MHI_CB_SYS_ERROR:
6349 		return "SYS_ERROR";
6350 	case MHI_CB_FATAL_ERROR:
6351 		return "FATAL_ERROR";
6352 	case MHI_CB_EE_MISSION_MODE:
6353 		return "MISSION_MODE";
6354 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
6355 	case MHI_CB_FALLBACK_IMG:
6356 		return "FW_FALLBACK";
6357 #endif
6358 	default:
6359 		return "UNKNOWN";
6360 	}
6361 };
6362 
6363 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
6364 {
6365 	struct cnss_pci_data *pci_priv =
6366 		from_timer(pci_priv, t, dev_rddm_timer);
6367 	enum mhi_ee_type mhi_ee;
6368 
6369 	if (!pci_priv)
6370 		return;
6371 
6372 	cnss_fatal_err("Timeout waiting for RDDM notification\n");
6373 
6374 	if (!cnss_pci_assert_host_sol(pci_priv))
6375 		return;
6376 
6377 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6378 	if (mhi_ee == MHI_EE_PBL)
6379 		cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n");
6380 
6381 	if (mhi_ee == MHI_EE_RDDM) {
6382 		cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
6383 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6384 				       CNSS_REASON_RDDM);
6385 	} else {
6386 		cnss_mhi_debug_reg_dump(pci_priv);
6387 		cnss_pci_soc_scratch_reg_dump(pci_priv);
6388 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6389 				       CNSS_REASON_TIMEOUT);
6390 	}
6391 }
6392 
6393 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
6394 {
6395 	struct cnss_pci_data *pci_priv =
6396 		from_timer(pci_priv, t, boot_debug_timer);
6397 
6398 	if (!pci_priv)
6399 		return;
6400 
6401 	if (cnss_pci_check_link_status(pci_priv))
6402 		return;
6403 
6404 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
6405 		return;
6406 
6407 	if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
6408 		return;
6409 
6410 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
6411 		return;
6412 
6413 	cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
6414 		    BOOT_DEBUG_TIMEOUT_MS / 1000);
6415 	cnss_mhi_debug_reg_dump(pci_priv);
6416 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6417 	cnss_pci_dump_bl_sram_mem(pci_priv);
6418 
6419 	mod_timer(&pci_priv->boot_debug_timer,
6420 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
6421 }
6422 
6423 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
6424 {
6425 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6426 
6427 	cnss_ignore_qmi_failure(true);
6428 	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6429 	del_timer(&plat_priv->fw_boot_timer);
6430 	mod_timer(&pci_priv->dev_rddm_timer,
6431 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6432 	cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6433 
6434 	return 0;
6435 }
6436 
6437 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
6438 {
6439 	return cnss_pci_handle_mhi_sys_err(pci_priv);
6440 }
6441 
6442 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
6443 				   enum mhi_callback reason)
6444 {
6445 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6446 	struct cnss_plat_data *plat_priv;
6447 	enum cnss_recovery_reason cnss_reason;
6448 
6449 	if (!pci_priv) {
6450 		cnss_pr_err("pci_priv is NULL");
6451 		return;
6452 	}
6453 
6454 	plat_priv = pci_priv->plat_priv;
6455 
6456 	if (reason != MHI_CB_IDLE)
6457 		cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
6458 			    cnss_mhi_notify_status_to_str(reason), reason);
6459 
6460 	switch (reason) {
6461 	case MHI_CB_IDLE:
6462 	case MHI_CB_EE_MISSION_MODE:
6463 		return;
6464 	case MHI_CB_FATAL_ERROR:
6465 		cnss_ignore_qmi_failure(true);
6466 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6467 		del_timer(&plat_priv->fw_boot_timer);
6468 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6469 		cnss_reason = CNSS_REASON_DEFAULT;
6470 		break;
6471 	case MHI_CB_SYS_ERROR:
6472 		cnss_pci_handle_mhi_sys_err(pci_priv);
6473 		return;
6474 	case MHI_CB_EE_RDDM:
6475 		cnss_ignore_qmi_failure(true);
6476 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6477 		del_timer(&plat_priv->fw_boot_timer);
6478 		del_timer(&pci_priv->dev_rddm_timer);
6479 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6480 		cnss_reason = CNSS_REASON_RDDM;
6481 		break;
6482 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
6483 	case MHI_CB_FALLBACK_IMG:
6484 		/* for kiwi_v2 binary fallback is used, skip path fallback here */
6485 		if (!(pci_priv->device_id == KIWI_DEVICE_ID &&
6486 		      plat_priv->device_version.major_version == FW_V2_NUMBER)) {
6487 			plat_priv->use_fw_path_with_prefix = false;
6488 			cnss_pci_update_fw_name(pci_priv);
6489 		}
6490 		return;
6491 #endif
6492 	default:
6493 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
6494 		return;
6495 	}
6496 
6497 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
6498 }
6499 
6500 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
6501 {
6502 	int ret, num_vectors, i;
6503 	u32 user_base_data, base_vector;
6504 	int *irq;
6505 	unsigned int msi_data;
6506 	bool is_one_msi = false;
6507 
6508 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
6509 					   MHI_MSI_NAME, &num_vectors,
6510 					   &user_base_data, &base_vector);
6511 	if (ret)
6512 		return ret;
6513 
6514 	if (cnss_pci_is_one_msi(pci_priv)) {
6515 		is_one_msi = true;
6516 		num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
6517 	}
6518 	cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
6519 		    num_vectors, base_vector);
6520 
6521 	irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
6522 	if (!irq)
6523 		return -ENOMEM;
6524 
6525 	for (i = 0; i < num_vectors; i++) {
6526 		msi_data = base_vector;
6527 		if (!is_one_msi)
6528 			msi_data += i;
6529 		irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
6530 	}
6531 
6532 	pci_priv->mhi_ctrl->irq = irq;
6533 	pci_priv->mhi_ctrl->nr_irqs = num_vectors;
6534 
6535 	return 0;
6536 }
6537 
6538 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
6539 			     struct mhi_link_info *link_info)
6540 {
6541 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6542 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6543 	int ret = 0;
6544 
6545 	cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
6546 		    link_info->target_link_speed,
6547 		    link_info->target_link_width);
6548 
6549 	/* It has to set target link speed here before setting link bandwidth
6550 	 * when device requests link speed change. This can avoid setting link
6551 	 * bandwidth getting rejected if requested link speed is higher than
6552 	 * current one.
6553 	 */
6554 	ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
6555 					  link_info->target_link_speed);
6556 	if (ret)
6557 		cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
6558 			    link_info->target_link_speed, ret);
6559 
6560 	ret = cnss_pci_set_link_bandwidth(pci_priv,
6561 					  link_info->target_link_speed,
6562 					  link_info->target_link_width);
6563 
6564 	if (ret) {
6565 		cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
6566 		return ret;
6567 	}
6568 
6569 	pci_priv->def_link_speed = link_info->target_link_speed;
6570 	pci_priv->def_link_width = link_info->target_link_width;
6571 
6572 	return 0;
6573 }
6574 
6575 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
6576 			     void __iomem *addr, u32 *out)
6577 {
6578 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6579 
6580 	u32 tmp = readl_relaxed(addr);
6581 
6582 	/* Unexpected value, query the link status */
6583 	if (PCI_INVALID_READ(tmp) &&
6584 	    cnss_pci_check_link_status(pci_priv))
6585 		return -EIO;
6586 
6587 	*out = tmp;
6588 
6589 	return 0;
6590 }
6591 
6592 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
6593 			       void __iomem *addr, u32 val)
6594 {
6595 	writel_relaxed(val, addr);
6596 }
6597 
6598 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
6599 				 struct mhi_controller *mhi_ctrl)
6600 {
6601 	int ret = 0;
6602 
6603 	ret = mhi_get_soc_info(mhi_ctrl);
6604 	if (ret)
6605 		goto exit;
6606 
6607 	plat_priv->device_version.family_number = mhi_ctrl->family_number;
6608 	plat_priv->device_version.device_number = mhi_ctrl->device_number;
6609 	plat_priv->device_version.major_version = mhi_ctrl->major_version;
6610 	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
6611 
6612 	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
6613 		    plat_priv->device_version.family_number,
6614 		    plat_priv->device_version.device_number,
6615 		    plat_priv->device_version.major_version,
6616 		    plat_priv->device_version.minor_version);
6617 
6618 	/* Only keep lower 4 bits as real device major version */
6619 	plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
6620 
6621 exit:
6622 	return ret;
6623 }
6624 
6625 static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv)
6626 {
6627 	if (!pci_priv) {
6628 		cnss_pr_dbg("pci_priv is NULL");
6629 		return false;
6630 	}
6631 
6632 	switch (pci_priv->device_id) {
6633 	case PEACH_DEVICE_ID:
6634 		return true;
6635 	default:
6636 		return false;
6637 	}
6638 }
6639 
6640 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
6641 {
6642 	int ret = 0;
6643 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6644 	struct pci_dev *pci_dev = pci_priv->pci_dev;
6645 	struct mhi_controller *mhi_ctrl;
6646 	phys_addr_t bar_start;
6647 	const struct mhi_controller_config *cnss_mhi_config =
6648 						&cnss_mhi_config_default;
6649 
6650 	ret = cnss_qmi_init(plat_priv);
6651 	if (ret)
6652 		return -EINVAL;
6653 
6654 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6655 		return 0;
6656 
6657 	mhi_ctrl = mhi_alloc_controller();
6658 	if (!mhi_ctrl) {
6659 		cnss_pr_err("Invalid MHI controller context\n");
6660 		return -EINVAL;
6661 	}
6662 
6663 	pci_priv->mhi_ctrl = mhi_ctrl;
6664 	mhi_ctrl->cntrl_dev = &pci_dev->dev;
6665 
6666 	mhi_ctrl->fw_image = plat_priv->firmware_name;
6667 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
6668 	mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
6669 #endif
6670 
6671 	mhi_ctrl->regs = pci_priv->bar;
6672 	mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
6673 	bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
6674 	cnss_pr_dbg("BAR starts at %pa, length is %x\n",
6675 		    &bar_start, mhi_ctrl->reg_len);
6676 
6677 	ret = cnss_pci_get_mhi_msi(pci_priv);
6678 	if (ret) {
6679 		cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
6680 		goto free_mhi_ctrl;
6681 	}
6682 
6683 	if (cnss_pci_is_one_msi(pci_priv))
6684 		mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
6685 
6686 	if (pci_priv->smmu_s1_enable) {
6687 		mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
6688 		mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
6689 					pci_priv->smmu_iova_len;
6690 	} else {
6691 		mhi_ctrl->iova_start = 0;
6692 		mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
6693 	}
6694 
6695 	mhi_ctrl->status_cb = cnss_mhi_notify_status;
6696 	mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
6697 	mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
6698 	mhi_ctrl->read_reg = cnss_mhi_read_reg;
6699 	mhi_ctrl->write_reg = cnss_mhi_write_reg;
6700 
6701 	mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
6702 	if (!mhi_ctrl->rddm_size)
6703 		mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
6704 
6705 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
6706 		mhi_ctrl->sbl_size = SZ_256K;
6707 	else
6708 		mhi_ctrl->sbl_size = SZ_512K;
6709 
6710 	mhi_ctrl->seg_len = SZ_512K;
6711 	mhi_ctrl->fbc_download = true;
6712 
6713 	ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
6714 	if (ret)
6715 		goto free_mhi_irq;
6716 
6717 	/* Satellite config only supported on KIWI V2 and later chipset */
6718 	if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
6719 			(plat_priv->device_id == KIWI_DEVICE_ID &&
6720 			 plat_priv->device_version.major_version == 1)) {
6721 		if (plat_priv->device_id == QCN7605_DEVICE_ID)
6722 			cnss_mhi_config = &cnss_mhi_config_genoa;
6723 		else
6724 			cnss_mhi_config = &cnss_mhi_config_no_satellite;
6725 	}
6726 
6727 	mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv);
6728 
6729 	ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
6730 	if (ret) {
6731 		cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
6732 		goto free_mhi_irq;
6733 	}
6734 
6735 	/* MHI satellite driver only needs to connect when DRV is supported */
6736 	if (cnss_pci_get_drv_supported(pci_priv))
6737 		cnss_mhi_controller_set_base(pci_priv, bar_start);
6738 
6739 	cnss_get_bwscal_info(plat_priv);
6740 	cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
6741 
6742 	/* BW scale CB needs to be set after registering MHI per requirement */
6743 	if (!plat_priv->no_bwscale)
6744 		cnss_mhi_controller_set_bw_scale_cb(pci_priv,
6745 						    cnss_mhi_bw_scale);
6746 
6747 	ret = cnss_pci_update_fw_name(pci_priv);
6748 	if (ret)
6749 		goto unreg_mhi;
6750 
6751 	return 0;
6752 
6753 unreg_mhi:
6754 	mhi_unregister_controller(mhi_ctrl);
6755 free_mhi_irq:
6756 	kfree(mhi_ctrl->irq);
6757 free_mhi_ctrl:
6758 	mhi_free_controller(mhi_ctrl);
6759 
6760 	return ret;
6761 }
6762 
6763 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
6764 {
6765 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
6766 
6767 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6768 		return;
6769 
6770 	mhi_unregister_controller(mhi_ctrl);
6771 	kfree(mhi_ctrl->irq);
6772 	mhi_ctrl->irq = NULL;
6773 	mhi_free_controller(mhi_ctrl);
6774 	pci_priv->mhi_ctrl = NULL;
6775 }
6776 
6777 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
6778 {
6779 	switch (pci_priv->device_id) {
6780 	case QCA6390_DEVICE_ID:
6781 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
6782 		pci_priv->wcss_reg = wcss_reg_access_seq;
6783 		pci_priv->pcie_reg = pcie_reg_access_seq;
6784 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6785 		pci_priv->syspm_reg = syspm_reg_access_seq;
6786 
6787 		/* Configure WDOG register with specific value so that we can
6788 		 * know if HW is in the process of WDOG reset recovery or not
6789 		 * when reading the registers.
6790 		 */
6791 		cnss_pci_reg_write
6792 		(pci_priv,
6793 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
6794 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
6795 		break;
6796 	case QCA6490_DEVICE_ID:
6797 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
6798 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6799 		break;
6800 	default:
6801 		return;
6802 	}
6803 }
6804 
6805 #if !IS_ENABLED(CONFIG_ARCH_QCOM)
6806 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
6807 {
6808 	return 0;
6809 }
6810 
6811 static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
6812 {
6813 	struct cnss_pci_data *pci_priv = data;
6814 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6815 	enum rpm_status status;
6816 	struct device *dev;
6817 
6818 	pci_priv->wake_counter++;
6819 	cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
6820 		    pci_priv->wake_irq, pci_priv->wake_counter);
6821 
6822 	/* Make sure abort current suspend */
6823 	cnss_pm_stay_awake(plat_priv);
6824 	cnss_pm_relax(plat_priv);
6825 	/* Above two pm* API calls will abort system suspend only when
6826 	 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
6827 	 * calling pm_system_wakeup() is just to guarantee system suspend
6828 	 * can be aborted if it is not initiated in any case.
6829 	 */
6830 	pm_system_wakeup();
6831 
6832 	dev = &pci_priv->pci_dev->dev;
6833 	status = dev->power.runtime_status;
6834 
6835 	if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
6836 	     cnss_pci_get_auto_suspended(pci_priv)) ||
6837 	    (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
6838 		cnss_pci_set_monitor_wake_intr(pci_priv, false);
6839 		cnss_pci_pm_request_resume(pci_priv);
6840 	}
6841 
6842 	return IRQ_HANDLED;
6843 }
6844 
6845 /**
6846  * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
6847  * @pci_priv: driver PCI bus context pointer
6848  *
6849  * This function initializes WLAN PCI wake GPIO and corresponding
6850  * interrupt. It should be used in non-MSM platforms whose PCIe
6851  * root complex driver doesn't handle the GPIO.
6852  *
6853  * Return: 0 for success or skip, negative value for error
6854  */
6855 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
6856 {
6857 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6858 	struct device *dev = &plat_priv->plat_dev->dev;
6859 	int ret = 0;
6860 
6861 	pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
6862 						"wlan-pci-wake-gpio", 0);
6863 	if (pci_priv->wake_gpio < 0)
6864 		goto out;
6865 
6866 	cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
6867 		    pci_priv->wake_gpio);
6868 
6869 	ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
6870 	if (ret) {
6871 		cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
6872 			    ret);
6873 		goto out;
6874 	}
6875 
6876 	gpio_direction_input(pci_priv->wake_gpio);
6877 	pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
6878 
6879 	ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
6880 			  IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
6881 	if (ret) {
6882 		cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
6883 		goto free_gpio;
6884 	}
6885 
6886 	ret = enable_irq_wake(pci_priv->wake_irq);
6887 	if (ret) {
6888 		cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
6889 		goto free_irq;
6890 	}
6891 
6892 	return 0;
6893 
6894 free_irq:
6895 	free_irq(pci_priv->wake_irq, pci_priv);
6896 free_gpio:
6897 	gpio_free(pci_priv->wake_gpio);
6898 out:
6899 	return ret;
6900 }
6901 
6902 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
6903 {
6904 	if (pci_priv->wake_gpio < 0)
6905 		return;
6906 
6907 	disable_irq_wake(pci_priv->wake_irq);
6908 	free_irq(pci_priv->wake_irq, pci_priv);
6909 	gpio_free(pci_priv->wake_gpio);
6910 }
6911 #endif
6912 
6913 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
6914 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
6915 {
6916 	int ret = 0;
6917 
6918 	/* in the dual wlan card case, if call pci_register_driver after
6919 	 * finishing the first pcie device enumeration, it will cause
6920 	 * the cnss_pci_probe called in advance with the second wlan card,
6921 	 * and the sequence like this:
6922 	 * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe
6923 	 * -> exit msm_pcie_enumerate.
6924 	 * But the correct sequence we expected is like this:
6925 	 * enter msm_pcie_enumerate -> pci_bus_add_devices  ->
6926 	 * exit msm_pcie_enumerate -> cnss_pci_probe.
6927 	 * And this unexpected sequence will make the second wlan card do
6928 	 * pcie link suspend while the pcie enumeration not finished.
6929 	 * So need to add below logical to avoid doing pcie link suspend
6930 	 * if the enumeration has not finish.
6931 	 */
6932 	plat_priv->enumerate_done = true;
6933 
6934 	/* Now enumeration is finished, try to suspend PCIe link */
6935 	if (plat_priv->bus_priv) {
6936 		struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
6937 		struct pci_dev *pci_dev = pci_priv->pci_dev;
6938 
6939 		switch (pci_dev->device) {
6940 		case QCA6390_DEVICE_ID:
6941 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv,
6942 						    false,
6943 						    true,
6944 						    false);
6945 
6946 			cnss_pci_suspend_pwroff(pci_dev);
6947 			break;
6948 		default:
6949 			cnss_pr_err("Unknown PCI device found: 0x%x\n",
6950 				    pci_dev->device);
6951 			ret = -ENODEV;
6952 		}
6953 	}
6954 
6955 	return ret;
6956 }
6957 #else
6958 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
6959 {
6960 	return 0;
6961 }
6962 #endif
6963 
6964 /* Setting to use this cnss_pm_domain ops will let PM framework override the
6965  * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
6966  * has to take care everything device driver needed which is currently done
6967  * from pci_dev_pm_ops.
6968  */
6969 static struct dev_pm_domain cnss_pm_domain = {
6970 	.ops = {
6971 		SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
6972 		SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
6973 					      cnss_pci_resume_noirq)
6974 		SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
6975 				   cnss_pci_runtime_resume,
6976 				   cnss_pci_runtime_idle)
6977 	}
6978 };
6979 
6980 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
6981 {
6982 	struct device_node *child;
6983 	u32 id, i;
6984 	int id_n, ret;
6985 
6986 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
6987 		return 0;
6988 
6989 	if (!plat_priv->device_id) {
6990 		cnss_pr_err("Invalid device id\n");
6991 		return -EINVAL;
6992 	}
6993 
6994 	for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
6995 					 child) {
6996 		if (strcmp(child->name, "chip_cfg"))
6997 			continue;
6998 
6999 		id_n = of_property_count_u32_elems(child, "supported-ids");
7000 		if (id_n <= 0) {
7001 			cnss_pr_err("Device id is NOT set\n");
7002 			return -EINVAL;
7003 		}
7004 
7005 		for (i = 0; i < id_n; i++) {
7006 			ret = of_property_read_u32_index(child,
7007 							 "supported-ids",
7008 							 i, &id);
7009 			if (ret) {
7010 				cnss_pr_err("Failed to read supported ids\n");
7011 				return -EINVAL;
7012 			}
7013 
7014 			if (id == plat_priv->device_id) {
7015 				plat_priv->dev_node = child;
7016 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
7017 					    child->name, i, id);
7018 				return 0;
7019 			}
7020 		}
7021 	}
7022 
7023 	return -EINVAL;
7024 }
7025 
7026 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
7027 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7028 {
7029 	bool suspend_pwroff;
7030 
7031 	switch (pci_dev->device) {
7032 	case QCA6390_DEVICE_ID:
7033 	case QCA6490_DEVICE_ID:
7034 		suspend_pwroff = false;
7035 		break;
7036 	default:
7037 		suspend_pwroff = true;
7038 	}
7039 
7040 	return suspend_pwroff;
7041 }
7042 #else
7043 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7044 {
7045 	return true;
7046 }
7047 #endif
7048 
7049 #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED
7050 static void
7051 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7052 {
7053 	int ret;
7054 
7055 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7056 					  PCI_EXP_LNKSTA_CLS_2_5GB);
7057 	if (ret)
7058 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n",
7059 			     rc_num, ret);
7060 }
7061 
7062 static void
7063 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7064 {
7065 	int ret;
7066 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7067 
7068 	/* if not Genoa, do not restore rc speed */
7069 	if (pci_priv->device_id != QCN7605_DEVICE_ID) {
7070 		/* The request 0 will reset maximum GEN speed to default */
7071 		ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 0);
7072 		if (ret)
7073 			cnss_pr_err("Failed to reset max PCIe RC%x link speed to default, err = %d\n",
7074 				     plat_priv->rc_num, ret);
7075 	}
7076 }
7077 
7078 static void
7079 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7080 {
7081 	int ret;
7082 
7083 	/* suspend/resume will trigger retain to re-establish link speed */
7084 	ret = cnss_suspend_pci_link(pci_priv);
7085 	if (ret)
7086 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
7087 
7088 	ret = cnss_resume_pci_link(pci_priv);
7089 	if (ret)
7090 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
7091 
7092 	cnss_pci_get_link_status(pci_priv);
7093 }
7094 #else
7095 static void
7096 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7097 {
7098 }
7099 
7100 static void
7101 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7102 {
7103 }
7104 
7105 static void
7106 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7107 {
7108 }
7109 #endif
7110 
7111 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
7112 {
7113 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7114 	int rc_num = pci_dev->bus->domain_nr;
7115 	struct cnss_plat_data *plat_priv;
7116 	int ret = 0;
7117 	bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
7118 
7119 	plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7120 
7121 	if (suspend_pwroff) {
7122 		ret = cnss_suspend_pci_link(pci_priv);
7123 		if (ret)
7124 			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
7125 				    ret);
7126 		cnss_power_off_device(plat_priv);
7127 	} else {
7128 		cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
7129 			    pci_dev->device);
7130 		cnss_pci_link_retrain_trigger(pci_priv);
7131 	}
7132 }
7133 
7134 static int cnss_pci_probe(struct pci_dev *pci_dev,
7135 			  const struct pci_device_id *id)
7136 {
7137 	int ret = 0;
7138 	struct cnss_pci_data *pci_priv;
7139 	struct device *dev = &pci_dev->dev;
7140 	int rc_num = pci_dev->bus->domain_nr;
7141 	struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7142 
7143 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n",
7144 		    id->vendor, pci_dev->device, rc_num);
7145 	if (!plat_priv) {
7146 		cnss_pr_err("Find match plat_priv with rc number failure\n");
7147 		ret = -ENODEV;
7148 		goto out;
7149 	}
7150 
7151 	pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
7152 	if (!pci_priv) {
7153 		ret = -ENOMEM;
7154 		goto out;
7155 	}
7156 
7157 	pci_priv->pci_link_state = PCI_LINK_UP;
7158 	pci_priv->plat_priv = plat_priv;
7159 	pci_priv->pci_dev = pci_dev;
7160 	pci_priv->pci_device_id = id;
7161 	pci_priv->device_id = pci_dev->device;
7162 	cnss_set_pci_priv(pci_dev, pci_priv);
7163 	plat_priv->device_id = pci_dev->device;
7164 	plat_priv->bus_priv = pci_priv;
7165 	mutex_init(&pci_priv->bus_lock);
7166 	if (plat_priv->use_pm_domain)
7167 		dev->pm_domain = &cnss_pm_domain;
7168 
7169 	cnss_pci_restore_rc_speed(pci_priv);
7170 
7171 	ret = cnss_pci_get_dev_cfg_node(plat_priv);
7172 	if (ret) {
7173 		cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
7174 		goto reset_ctx;
7175 	}
7176 
7177 	cnss_get_sleep_clk_supported(plat_priv);
7178 
7179 	ret = cnss_dev_specific_power_on(plat_priv);
7180 	if (ret < 0)
7181 		goto reset_ctx;
7182 
7183 	cnss_pci_of_reserved_mem_device_init(pci_priv);
7184 
7185 	ret = cnss_register_subsys(plat_priv);
7186 	if (ret)
7187 		goto reset_ctx;
7188 
7189 	ret = cnss_register_ramdump(plat_priv);
7190 	if (ret)
7191 		goto unregister_subsys;
7192 
7193 	ret = cnss_pci_init_smmu(pci_priv);
7194 	if (ret)
7195 		goto unregister_ramdump;
7196 
7197 	/* update drv support flag */
7198 	cnss_pci_update_drv_supported(pci_priv);
7199 
7200 	cnss_update_supported_link_info(pci_priv);
7201 
7202 	ret = cnss_reg_pci_event(pci_priv);
7203 	if (ret) {
7204 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
7205 		goto deinit_smmu;
7206 	}
7207 
7208 	ret = cnss_pci_enable_bus(pci_priv);
7209 	if (ret)
7210 		goto dereg_pci_event;
7211 
7212 	ret = cnss_pci_enable_msi(pci_priv);
7213 	if (ret)
7214 		goto disable_bus;
7215 
7216 	ret = cnss_pci_register_mhi(pci_priv);
7217 	if (ret)
7218 		goto disable_msi;
7219 
7220 	switch (pci_dev->device) {
7221 	case QCA6174_DEVICE_ID:
7222 		pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
7223 				     &pci_priv->revision_id);
7224 		break;
7225 	case QCA6290_DEVICE_ID:
7226 	case QCA6390_DEVICE_ID:
7227 	case QCN7605_DEVICE_ID:
7228 	case QCA6490_DEVICE_ID:
7229 	case KIWI_DEVICE_ID:
7230 	case MANGO_DEVICE_ID:
7231 	case PEACH_DEVICE_ID:
7232 		if ((cnss_is_dual_wlan_enabled() &&
7233 		     plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled())
7234 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false,
7235 						    false);
7236 
7237 		timer_setup(&pci_priv->dev_rddm_timer,
7238 			    cnss_dev_rddm_timeout_hdlr, 0);
7239 		timer_setup(&pci_priv->boot_debug_timer,
7240 			    cnss_boot_debug_timeout_hdlr, 0);
7241 		INIT_DELAYED_WORK(&pci_priv->time_sync_work,
7242 				  cnss_pci_time_sync_work_hdlr);
7243 		cnss_pci_get_link_status(pci_priv);
7244 		cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
7245 		cnss_pci_wake_gpio_init(pci_priv);
7246 		break;
7247 	default:
7248 		cnss_pr_err("Unknown PCI device found: 0x%x\n",
7249 			    pci_dev->device);
7250 		ret = -ENODEV;
7251 		goto unreg_mhi;
7252 	}
7253 
7254 	cnss_pci_config_regs(pci_priv);
7255 	if (EMULATION_HW)
7256 		goto out;
7257 	if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done)
7258 		goto probe_done;
7259 	cnss_pci_suspend_pwroff(pci_dev);
7260 
7261 probe_done:
7262 	set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7263 
7264 	return 0;
7265 
7266 unreg_mhi:
7267 	cnss_pci_unregister_mhi(pci_priv);
7268 disable_msi:
7269 	cnss_pci_disable_msi(pci_priv);
7270 disable_bus:
7271 	cnss_pci_disable_bus(pci_priv);
7272 dereg_pci_event:
7273 	cnss_dereg_pci_event(pci_priv);
7274 deinit_smmu:
7275 	cnss_pci_deinit_smmu(pci_priv);
7276 unregister_ramdump:
7277 	cnss_unregister_ramdump(plat_priv);
7278 unregister_subsys:
7279 	cnss_unregister_subsys(plat_priv);
7280 reset_ctx:
7281 	plat_priv->bus_priv = NULL;
7282 out:
7283 	return ret;
7284 }
7285 
7286 static void cnss_pci_remove(struct pci_dev *pci_dev)
7287 {
7288 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7289 	struct cnss_plat_data *plat_priv =
7290 		cnss_bus_dev_to_plat_priv(&pci_dev->dev);
7291 
7292 	clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7293 	cnss_pci_unregister_driver_hdlr(pci_priv);
7294 	cnss_pci_free_aux_mem(pci_priv);
7295 	cnss_pci_free_tme_lite_mem(pci_priv);
7296 	cnss_pci_free_m3_mem(pci_priv);
7297 	cnss_pci_free_fw_mem(pci_priv);
7298 	cnss_pci_free_qdss_mem(pci_priv);
7299 
7300 	switch (pci_dev->device) {
7301 	case QCA6290_DEVICE_ID:
7302 	case QCA6390_DEVICE_ID:
7303 	case QCN7605_DEVICE_ID:
7304 	case QCA6490_DEVICE_ID:
7305 	case KIWI_DEVICE_ID:
7306 	case MANGO_DEVICE_ID:
7307 	case PEACH_DEVICE_ID:
7308 		cnss_pci_wake_gpio_deinit(pci_priv);
7309 		del_timer(&pci_priv->boot_debug_timer);
7310 		del_timer(&pci_priv->dev_rddm_timer);
7311 		break;
7312 	default:
7313 		break;
7314 	}
7315 
7316 	cnss_pci_unregister_mhi(pci_priv);
7317 	cnss_pci_disable_msi(pci_priv);
7318 	cnss_pci_disable_bus(pci_priv);
7319 	cnss_dereg_pci_event(pci_priv);
7320 	cnss_pci_deinit_smmu(pci_priv);
7321 	if (plat_priv) {
7322 		cnss_unregister_ramdump(plat_priv);
7323 		cnss_unregister_subsys(plat_priv);
7324 		plat_priv->bus_priv = NULL;
7325 	} else {
7326 		cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
7327 	}
7328 }
7329 
7330 static const struct pci_device_id cnss_pci_id_table[] = {
7331 	{ QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7332 	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7333 	{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7334 	{ QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7335 	{ QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7336 	{ KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7337 	{ MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7338 	{ PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7339 	{ 0 }
7340 };
7341 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
7342 
7343 static const struct dev_pm_ops cnss_pm_ops = {
7344 	SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7345 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7346 				      cnss_pci_resume_noirq)
7347 	SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
7348 			   cnss_pci_runtime_idle)
7349 };
7350 
7351 static struct pci_driver cnss_pci_driver = {
7352 	.name     = "cnss_pci",
7353 	.id_table = cnss_pci_id_table,
7354 	.probe    = cnss_pci_probe,
7355 	.remove   = cnss_pci_remove,
7356 	.driver = {
7357 		.pm = &cnss_pm_ops,
7358 	},
7359 };
7360 
7361 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
7362 {
7363 	int ret, retry = 0;
7364 
7365 	/* Always set initial target PCIe link speed to Gen2 for QCA6490 device
7366 	 * since there may be link issues if it boots up with Gen3 link speed.
7367 	 * Device is able to change it later at any time. It will be rejected
7368 	 * if requested speed is higher than the one specified in PCIe DT.
7369 	 */
7370 	if (plat_priv->device_id == QCA6490_DEVICE_ID) {
7371 		ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7372 						  PCI_EXP_LNKSTA_CLS_5_0GB);
7373 		if (ret && ret != -EPROBE_DEFER)
7374 			cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
7375 				    rc_num, ret);
7376 	} else {
7377 		cnss_pci_downgrade_rc_speed(plat_priv, rc_num);
7378 	}
7379 
7380 	cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
7381 retry:
7382 	ret = _cnss_pci_enumerate(plat_priv, rc_num);
7383 	if (ret) {
7384 		if (ret == -EPROBE_DEFER) {
7385 			cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
7386 			goto out;
7387 		}
7388 		cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
7389 			    rc_num, ret);
7390 		if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
7391 			cnss_pr_dbg("Retry PCI link training #%d\n", retry);
7392 			goto retry;
7393 		} else {
7394 			goto out;
7395 		}
7396 	}
7397 
7398 	plat_priv->rc_num = rc_num;
7399 
7400 out:
7401 	return ret;
7402 }
7403 
7404 int cnss_pci_init(struct cnss_plat_data *plat_priv)
7405 {
7406 	struct device *dev = &plat_priv->plat_dev->dev;
7407 	const __be32 *prop;
7408 	int ret = 0, prop_len = 0, rc_count, i;
7409 
7410 	prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
7411 	if (!prop || !prop_len) {
7412 		cnss_pr_err("Failed to get PCIe RC number from DT\n");
7413 		goto out;
7414 	}
7415 
7416 	rc_count = prop_len / sizeof(__be32);
7417 	for (i = 0; i < rc_count; i++) {
7418 		ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
7419 		if (!ret)
7420 			break;
7421 		else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
7422 			goto out;
7423 	}
7424 
7425 	ret = cnss_try_suspend(plat_priv);
7426 	if (ret) {
7427 		cnss_pr_err("Failed to suspend, ret: %d\n", ret);
7428 		goto out;
7429 	}
7430 
7431 	if (!cnss_driver_registered) {
7432 		ret = pci_register_driver(&cnss_pci_driver);
7433 		if (ret) {
7434 			cnss_pr_err("Failed to register to PCI framework, err = %d\n",
7435 				    ret);
7436 			goto out;
7437 		}
7438 		if (!plat_priv->bus_priv) {
7439 			cnss_pr_err("Failed to probe PCI driver\n");
7440 			ret = -ENODEV;
7441 			goto unreg_pci;
7442 		}
7443 		cnss_driver_registered = true;
7444 	}
7445 
7446 	return 0;
7447 
7448 unreg_pci:
7449 	pci_unregister_driver(&cnss_pci_driver);
7450 out:
7451 	return ret;
7452 }
7453 
7454 void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
7455 {
7456 	if (cnss_driver_registered) {
7457 		pci_unregister_driver(&cnss_pci_driver);
7458 		cnss_driver_registered = false;
7459 	}
7460 }
7461