xref: /wlan-dirver/platform/cnss2/pci.c (revision 2dec17eda30e5411288154c9bcadb76fd03934f7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/completion.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/memblock.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/suspend.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 #include "main.h"
20 #include "bus.h"
21 #include "debug.h"
22 #include "pci.h"
23 #include "pci_platform.h"
24 #include "reg.h"
25 
26 #define PCI_LINK_UP			1
27 #define PCI_LINK_DOWN			0
28 
29 #define SAVE_PCI_CONFIG_SPACE		1
30 #define RESTORE_PCI_CONFIG_SPACE	0
31 
32 #define PCI_BAR_NUM			0
33 #define PCI_INVALID_READ(val)		((val) == U32_MAX)
34 
35 #define PCI_DMA_MASK_32_BIT		DMA_BIT_MASK(32)
36 #define PCI_DMA_MASK_36_BIT		DMA_BIT_MASK(36)
37 #define PCI_DMA_MASK_64_BIT		DMA_BIT_MASK(64)
38 
39 #define MHI_NODE_NAME			"qcom,mhi"
40 #define MHI_MSI_NAME			"MHI"
41 
42 #define QCA6390_PATH_PREFIX		"qca6390/"
43 #define QCA6490_PATH_PREFIX		"qca6490/"
44 #define QCN7605_PATH_PREFIX             "qcn7605/"
45 #define KIWI_PATH_PREFIX		"kiwi/"
46 #define MANGO_PATH_PREFIX		"mango/"
47 #define PEACH_PATH_PREFIX		"peach/"
48 #define DEFAULT_PHY_M3_FILE_NAME	"m3.bin"
49 #define DEFAULT_AUX_FILE_NAME		"aux_ucode.elf"
50 #define DEFAULT_PHY_UCODE_FILE_NAME	"phy_ucode.elf"
51 #define TME_PATCH_FILE_NAME		"tmel_patch.elf"
52 #define PHY_UCODE_V2_FILE_NAME		"phy_ucode20.elf"
53 #define DEFAULT_FW_FILE_NAME		"amss.bin"
54 #define FW_V2_FILE_NAME			"amss20.bin"
55 #define FW_V2_FTM_FILE_NAME		"amss20_ftm.bin"
56 #define DEVICE_MAJOR_VERSION_MASK	0xF
57 
58 #define WAKE_MSI_NAME			"WAKE"
59 
60 #define DEV_RDDM_TIMEOUT		5000
61 #define WAKE_EVENT_TIMEOUT		5000
62 
63 #ifdef CONFIG_CNSS_EMULATION
64 #define EMULATION_HW			1
65 #else
66 #define EMULATION_HW			0
67 #endif
68 
69 #define RAMDUMP_SIZE_DEFAULT		0x420000
70 #define CNSS_256KB_SIZE			0x40000
71 #define DEVICE_RDDM_COOKIE		0xCAFECACE
72 
73 static bool cnss_driver_registered;
74 
75 static DEFINE_SPINLOCK(pci_link_down_lock);
76 static DEFINE_SPINLOCK(pci_reg_window_lock);
77 static DEFINE_SPINLOCK(time_sync_lock);
78 
79 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
80 #define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
81 
82 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US	1000
83 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US	2000
84 
85 #define RDDM_LINK_RECOVERY_RETRY		20
86 #define RDDM_LINK_RECOVERY_RETRY_DELAY_MS	20
87 
88 #define FORCE_WAKE_DELAY_MIN_US			4000
89 #define FORCE_WAKE_DELAY_MAX_US			6000
90 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
91 
92 #define REG_RETRY_MAX_TIMES		3
93 
94 #define MHI_SUSPEND_RETRY_MAX_TIMES		3
95 #define MHI_SUSPEND_RETRY_DELAY_US		5000
96 
97 #define BOOT_DEBUG_TIMEOUT_MS			7000
98 
99 #define HANG_DATA_LENGTH		384
100 #define HST_HANG_DATA_OFFSET		((3 * 1024 * 1024) - HANG_DATA_LENGTH)
101 #define HSP_HANG_DATA_OFFSET		((2 * 1024 * 1024) - HANG_DATA_LENGTH)
102 #define GNO_HANG_DATA_OFFSET		(0x7d000 - HANG_DATA_LENGTH)
103 
104 #define AFC_SLOT_SIZE                   0x1000
105 #define AFC_MAX_SLOT                    2
106 #define AFC_MEM_SIZE                    (AFC_SLOT_SIZE * AFC_MAX_SLOT)
107 #define AFC_AUTH_STATUS_OFFSET          1
108 #define AFC_AUTH_SUCCESS                1
109 #define AFC_AUTH_ERROR                  0
110 
111 static const struct mhi_channel_config cnss_mhi_channels[] = {
112 	{
113 		.num = 0,
114 		.name = "LOOPBACK",
115 		.num_elements = 32,
116 		.event_ring = 1,
117 		.dir = DMA_TO_DEVICE,
118 		.ee_mask = 0x4,
119 		.pollcfg = 0,
120 		.doorbell = MHI_DB_BRST_DISABLE,
121 		.lpm_notify = false,
122 		.offload_channel = false,
123 		.doorbell_mode_switch = false,
124 		.auto_queue = false,
125 	},
126 	{
127 		.num = 1,
128 		.name = "LOOPBACK",
129 		.num_elements = 32,
130 		.event_ring = 1,
131 		.dir = DMA_FROM_DEVICE,
132 		.ee_mask = 0x4,
133 		.pollcfg = 0,
134 		.doorbell = MHI_DB_BRST_DISABLE,
135 		.lpm_notify = false,
136 		.offload_channel = false,
137 		.doorbell_mode_switch = false,
138 		.auto_queue = false,
139 	},
140 	{
141 		.num = 4,
142 		.name = "DIAG",
143 		.num_elements = 64,
144 		.event_ring = 1,
145 		.dir = DMA_TO_DEVICE,
146 		.ee_mask = 0x4,
147 		.pollcfg = 0,
148 		.doorbell = MHI_DB_BRST_DISABLE,
149 		.lpm_notify = false,
150 		.offload_channel = false,
151 		.doorbell_mode_switch = false,
152 		.auto_queue = false,
153 	},
154 	{
155 		.num = 5,
156 		.name = "DIAG",
157 		.num_elements = 64,
158 		.event_ring = 1,
159 		.dir = DMA_FROM_DEVICE,
160 		.ee_mask = 0x4,
161 		.pollcfg = 0,
162 		.doorbell = MHI_DB_BRST_DISABLE,
163 		.lpm_notify = false,
164 		.offload_channel = false,
165 		.doorbell_mode_switch = false,
166 		.auto_queue = false,
167 	},
168 	{
169 		.num = 20,
170 		.name = "IPCR",
171 		.num_elements = 64,
172 		.event_ring = 1,
173 		.dir = DMA_TO_DEVICE,
174 		.ee_mask = 0x4,
175 		.pollcfg = 0,
176 		.doorbell = MHI_DB_BRST_DISABLE,
177 		.lpm_notify = false,
178 		.offload_channel = false,
179 		.doorbell_mode_switch = false,
180 		.auto_queue = false,
181 	},
182 	{
183 		.num = 21,
184 		.name = "IPCR",
185 		.num_elements = 64,
186 		.event_ring = 1,
187 		.dir = DMA_FROM_DEVICE,
188 		.ee_mask = 0x4,
189 		.pollcfg = 0,
190 		.doorbell = MHI_DB_BRST_DISABLE,
191 		.lpm_notify = false,
192 		.offload_channel = false,
193 		.doorbell_mode_switch = false,
194 		.auto_queue = true,
195 	},
196 /* All MHI satellite config to be at the end of data struct */
197 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
198 	{
199 		.num = 50,
200 		.name = "ADSP_0",
201 		.num_elements = 64,
202 		.event_ring = 3,
203 		.dir = DMA_BIDIRECTIONAL,
204 		.ee_mask = 0x4,
205 		.pollcfg = 0,
206 		.doorbell = MHI_DB_BRST_DISABLE,
207 		.lpm_notify = false,
208 		.offload_channel = true,
209 		.doorbell_mode_switch = false,
210 		.auto_queue = false,
211 	},
212 	{
213 		.num = 51,
214 		.name = "ADSP_1",
215 		.num_elements = 64,
216 		.event_ring = 3,
217 		.dir = DMA_BIDIRECTIONAL,
218 		.ee_mask = 0x4,
219 		.pollcfg = 0,
220 		.doorbell = MHI_DB_BRST_DISABLE,
221 		.lpm_notify = false,
222 		.offload_channel = true,
223 		.doorbell_mode_switch = false,
224 		.auto_queue = false,
225 	},
226 	{
227 		.num = 70,
228 		.name = "ADSP_2",
229 		.num_elements = 64,
230 		.event_ring = 3,
231 		.dir = DMA_BIDIRECTIONAL,
232 		.ee_mask = 0x4,
233 		.pollcfg = 0,
234 		.doorbell = MHI_DB_BRST_DISABLE,
235 		.lpm_notify = false,
236 		.offload_channel = true,
237 		.doorbell_mode_switch = false,
238 		.auto_queue = false,
239 	},
240 	{
241 		.num = 71,
242 		.name = "ADSP_3",
243 		.num_elements = 64,
244 		.event_ring = 3,
245 		.dir = DMA_BIDIRECTIONAL,
246 		.ee_mask = 0x4,
247 		.pollcfg = 0,
248 		.doorbell = MHI_DB_BRST_DISABLE,
249 		.lpm_notify = false,
250 		.offload_channel = true,
251 		.doorbell_mode_switch = false,
252 		.auto_queue = false,
253 	},
254 #endif
255 };
256 
257 static const struct mhi_channel_config cnss_mhi_channels_genoa[] = {
258 	{
259 		.num = 0,
260 		.name = "LOOPBACK",
261 		.num_elements = 32,
262 		.event_ring = 1,
263 		.dir = DMA_TO_DEVICE,
264 		.ee_mask = 0x4,
265 		.pollcfg = 0,
266 		.doorbell = MHI_DB_BRST_DISABLE,
267 		.lpm_notify = false,
268 		.offload_channel = false,
269 		.doorbell_mode_switch = false,
270 		.auto_queue = false,
271 	},
272 	{
273 		.num = 1,
274 		.name = "LOOPBACK",
275 		.num_elements = 32,
276 		.event_ring = 1,
277 		.dir = DMA_FROM_DEVICE,
278 		.ee_mask = 0x4,
279 		.pollcfg = 0,
280 		.doorbell = MHI_DB_BRST_DISABLE,
281 		.lpm_notify = false,
282 		.offload_channel = false,
283 		.doorbell_mode_switch = false,
284 		.auto_queue = false,
285 	},
286 	{
287 		.num = 4,
288 		.name = "DIAG",
289 		.num_elements = 64,
290 		.event_ring = 1,
291 		.dir = DMA_TO_DEVICE,
292 		.ee_mask = 0x4,
293 		.pollcfg = 0,
294 		.doorbell = MHI_DB_BRST_DISABLE,
295 		.lpm_notify = false,
296 		.offload_channel = false,
297 		.doorbell_mode_switch = false,
298 		.auto_queue = false,
299 	},
300 	{
301 		.num = 5,
302 		.name = "DIAG",
303 		.num_elements = 64,
304 		.event_ring = 1,
305 		.dir = DMA_FROM_DEVICE,
306 		.ee_mask = 0x4,
307 		.pollcfg = 0,
308 		.doorbell = MHI_DB_BRST_DISABLE,
309 		.lpm_notify = false,
310 		.offload_channel = false,
311 		.doorbell_mode_switch = false,
312 		.auto_queue = false,
313 	},
314 	{
315 		.num = 16,
316 		.name = "IPCR",
317 		.num_elements = 64,
318 		.event_ring = 1,
319 		.dir = DMA_TO_DEVICE,
320 		.ee_mask = 0x4,
321 		.pollcfg = 0,
322 		.doorbell = MHI_DB_BRST_DISABLE,
323 		.lpm_notify = false,
324 		.offload_channel = false,
325 		.doorbell_mode_switch = false,
326 		.auto_queue = false,
327 	},
328 	{
329 		.num = 17,
330 		.name = "IPCR",
331 		.num_elements = 64,
332 		.event_ring = 1,
333 		.dir = DMA_FROM_DEVICE,
334 		.ee_mask = 0x4,
335 		.pollcfg = 0,
336 		.doorbell = MHI_DB_BRST_DISABLE,
337 		.lpm_notify = false,
338 		.offload_channel = false,
339 		.doorbell_mode_switch = false,
340 		.auto_queue = true,
341 	},
342 };
343 
344 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
345 static struct mhi_event_config cnss_mhi_events[] = {
346 #else
347 static const struct mhi_event_config cnss_mhi_events[] = {
348 #endif
349 	{
350 		.num_elements = 32,
351 		.irq_moderation_ms = 0,
352 		.irq = 1,
353 		.mode = MHI_DB_BRST_DISABLE,
354 		.data_type = MHI_ER_CTRL,
355 		.priority = 0,
356 		.hardware_event = false,
357 		.client_managed = false,
358 		.offload_channel = false,
359 	},
360 	{
361 		.num_elements = 256,
362 		.irq_moderation_ms = 0,
363 		.irq = 2,
364 		.mode = MHI_DB_BRST_DISABLE,
365 		.priority = 1,
366 		.hardware_event = false,
367 		.client_managed = false,
368 		.offload_channel = false,
369 	},
370 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
371 	{
372 		.num_elements = 32,
373 		.irq_moderation_ms = 0,
374 		.irq = 1,
375 		.mode = MHI_DB_BRST_DISABLE,
376 		.data_type = MHI_ER_BW_SCALE,
377 		.priority = 2,
378 		.hardware_event = false,
379 		.client_managed = false,
380 		.offload_channel = false,
381 	},
382 #endif
383 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
384 	{
385 		.num_elements = 256,
386 		.irq_moderation_ms = 0,
387 		.irq = 2,
388 		.mode = MHI_DB_BRST_DISABLE,
389 		.data_type = MHI_ER_DATA,
390 		.priority = 1,
391 		.hardware_event = false,
392 		.client_managed = true,
393 		.offload_channel = true,
394 	},
395 #endif
396 };
397 
398 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
399 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
400 #define CNSS_MHI_SATELLITE_EVT_COUNT 1
401 #else
402 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
403 #define CNSS_MHI_SATELLITE_EVT_COUNT 0
404 #endif
405 
406 static const struct mhi_controller_config cnss_mhi_config_default = {
407 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
408 	.max_channels = 72,
409 #else
410 	.max_channels = 32,
411 #endif
412 	.timeout_ms = 10000,
413 	.use_bounce_buf = false,
414 	.buf_len = 0x8000,
415 	.num_channels = ARRAY_SIZE(cnss_mhi_channels),
416 	.ch_cfg = cnss_mhi_channels,
417 	.num_events = ARRAY_SIZE(cnss_mhi_events),
418 	.event_cfg = cnss_mhi_events,
419 	.m2_no_db = true,
420 };
421 
422 static const struct mhi_controller_config cnss_mhi_config_genoa = {
423 	.max_channels = 32,
424 	.timeout_ms = 10000,
425 	.use_bounce_buf = false,
426 	.buf_len = 0x8000,
427 	.num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa),
428 	.ch_cfg = cnss_mhi_channels_genoa,
429 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
430 		CNSS_MHI_SATELLITE_EVT_COUNT,
431 	.event_cfg = cnss_mhi_events,
432 	.m2_no_db = true,
433 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
434 	.bhie_offset = 0x0324,
435 #endif
436 };
437 
438 static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
439 	.max_channels = 32,
440 	.timeout_ms = 10000,
441 	.use_bounce_buf = false,
442 	.buf_len = 0x8000,
443 	.num_channels = ARRAY_SIZE(cnss_mhi_channels) -
444 			CNSS_MHI_SATELLITE_CH_CFG_COUNT,
445 	.ch_cfg = cnss_mhi_channels,
446 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
447 			CNSS_MHI_SATELLITE_EVT_COUNT,
448 	.event_cfg = cnss_mhi_events,
449 	.m2_no_db = true,
450 };
451 
452 static struct cnss_pci_reg ce_src[] = {
453 	{ "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
454 	{ "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
455 	{ "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
456 	{ "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
457 	{ "SRC_CTRL", CE_SRC_CTRL_OFFSET },
458 	{ "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
459 	{ "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
460 	{ "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
461 	{ NULL },
462 };
463 
464 static struct cnss_pci_reg ce_dst[] = {
465 	{ "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
466 	{ "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
467 	{ "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
468 	{ "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
469 	{ "DEST_CTRL", CE_DEST_CTRL_OFFSET },
470 	{ "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
471 	{ "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
472 	{ "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
473 	{ "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
474 	{ "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
475 	{ "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
476 	{ "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
477 	{ "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
478 	{ "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
479 	{ "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
480 	{ NULL },
481 };
482 
483 static struct cnss_pci_reg ce_cmn[] = {
484 	{ "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
485 	{ "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
486 	{ "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
487 	{ "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
488 	{ "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
489 	{ NULL },
490 };
491 
492 static struct cnss_pci_reg qdss_csr[] = {
493 	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
494 	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
495 	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
496 	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
497 	{ NULL },
498 };
499 
500 static struct cnss_pci_reg pci_scratch[] = {
501 	{ "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
502 	{ "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
503 	{ "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
504 	{ NULL },
505 };
506 
507 static struct cnss_pci_reg pci_bhi_debug[] = {
508 	{ "PCIE_BHIE_DEBUG_0", PCIE_PCIE_BHIE_DEBUG_0 },
509 	{ "PCIE_BHIE_DEBUG_1", PCIE_PCIE_BHIE_DEBUG_1 },
510 	{ "PCIE_BHIE_DEBUG_2", PCIE_PCIE_BHIE_DEBUG_2 },
511 	{ "PCIE_BHIE_DEBUG_3", PCIE_PCIE_BHIE_DEBUG_3 },
512 	{ "PCIE_BHIE_DEBUG_4", PCIE_PCIE_BHIE_DEBUG_4 },
513 	{ "PCIE_BHIE_DEBUG_5", PCIE_PCIE_BHIE_DEBUG_5 },
514 	{ "PCIE_BHIE_DEBUG_6", PCIE_PCIE_BHIE_DEBUG_6 },
515 	{ "PCIE_BHIE_DEBUG_7", PCIE_PCIE_BHIE_DEBUG_7 },
516 	{ "PCIE_BHIE_DEBUG_8", PCIE_PCIE_BHIE_DEBUG_8 },
517 	{ "PCIE_BHIE_DEBUG_9", PCIE_PCIE_BHIE_DEBUG_9 },
518 	{ "PCIE_BHIE_DEBUG_10", PCIE_PCIE_BHIE_DEBUG_10 },
519 	{ NULL },
520 };
521 
522 /* First field of the structure is the device bit mask. Use
523  * enum cnss_pci_reg_mask as reference for the value.
524  */
525 static struct cnss_misc_reg wcss_reg_access_seq[] = {
526 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
527 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
528 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
529 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
530 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
531 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
532 	{1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
533 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
534 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
535 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
536 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
537 	{1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
538 	{1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
539 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
540 	{1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
541 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
542 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
543 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
544 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
545 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
546 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
547 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
548 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
549 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
550 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
551 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
552 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
553 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
554 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
555 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
556 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
557 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
558 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
559 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
560 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
561 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
562 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
563 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
564 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
565 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
566 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
567 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
568 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
569 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
570 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
571 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
572 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
573 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
574 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
575 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
576 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
577 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
578 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
579 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
580 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
581 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
582 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
583 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
584 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
585 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
586 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
587 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
588 	{1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
589 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
590 };
591 
592 static struct cnss_misc_reg pcie_reg_access_seq[] = {
593 	{1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
594 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
595 	{1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
596 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
597 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
598 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
599 	{1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
600 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
601 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
602 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
603 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
604 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
605 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
606 	{1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
607 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
608 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
609 	{1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
610 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
611 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
612 	{1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
613 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
614 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
615 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
616 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
617 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
618 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
619 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
620 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
621 	{1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
622 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
623 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
624 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
625 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
626 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
627 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
628 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
629 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
630 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
631 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
632 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
633 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
634 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
635 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
636 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
637 	{1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
638 };
639 
640 static struct cnss_misc_reg wlaon_reg_access_seq[] = {
641 	{3, 0, WLAON_SOC_POWER_CTRL, 0},
642 	{3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
643 	{3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
644 	{3, 0, WLAON_SW_COLD_RESET, 0},
645 	{3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
646 	{3, 0, WLAON_GDSC_DELAY_SETTING, 0},
647 	{3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
648 	{3, 0, WLAON_WL_PWR_STATUS_REG, 0},
649 	{3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
650 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
651 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
652 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
653 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
654 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
655 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
656 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
657 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
658 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
659 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
660 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
661 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
662 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
663 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
664 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
665 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
666 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
667 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
668 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
669 	{2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
670 	{2, 0, WLAON_WL_AON_CXPC_REG, 0},
671 	{2, 0, WLAON_WL_AON_APM_STATUS0, 0},
672 	{2, 0, WLAON_WL_AON_APM_STATUS1, 0},
673 	{2, 0, WLAON_WL_AON_APM_STATUS2, 0},
674 	{2, 0, WLAON_WL_AON_APM_STATUS3, 0},
675 	{2, 0, WLAON_WL_AON_APM_STATUS4, 0},
676 	{2, 0, WLAON_WL_AON_APM_STATUS5, 0},
677 	{2, 0, WLAON_WL_AON_APM_STATUS6, 0},
678 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
679 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
680 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
681 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
682 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
683 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
684 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
685 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
686 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
687 	{3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
688 	{3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
689 	{3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
690 	{3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
691 	{3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
692 	{3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
693 	{3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
694 	{3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
695 	{3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
696 	{3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
697 	{3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
698 	{3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
699 	{3, 0, WLAON_QDSS_WCSS_REG, 0},
700 	{3, 0, WLAON_QDSS_WCSS_ACK, 0},
701 	{3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
702 	{3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
703 	{3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
704 	{3, 0, WLAON_DLY_CONFIG, 0},
705 	{3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
706 	{3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
707 	{3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
708 	{3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
709 	{3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
710 	{3, 0, WLAON_Q6_COOKIE_BIT, 0},
711 	{3, 0, WLAON_WARM_SW_ENTRY, 0},
712 	{3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
713 	{3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
714 	{3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
715 	{3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
716 	{3, 0, WLAON_DEBUG, 0},
717 	{3, 0, WLAON_SOC_PARAMETERS, 0},
718 	{3, 0, WLAON_WLPM_SIGNAL, 0},
719 	{3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
720 	{3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
721 	{3, 0, WLAON_PBL_STACK_CANARY, 0},
722 	{3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
723 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
724 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
725 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
726 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
727 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
728 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
729 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
730 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
731 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
732 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
733 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
734 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
735 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
736 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
737 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
738 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
739 	{3, 0, WLAON_MEM_CNT_SEL_REG, 0},
740 	{3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
741 	{3, 0, WLAON_MEM_DEBUG_REG, 0},
742 	{3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
743 	{3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
744 	{3, 0, WLAON_WL_AON_SPARE2, 0},
745 	{3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
746 	{3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
747 	{3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
748 	{3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
749 	{3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
750 	{3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
751 	{3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
752 	{3, 0, WLAON_POWERCTRL_PMU_REG, 0},
753 	{3, 0, WLAON_POWERCTRL_MEM_REG, 0},
754 	{3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
755 	{3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
756 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
757 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
758 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
759 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
760 	{3, 0, WLAON_MEM_SVS_CFG_REG, 0},
761 	{3, 0, WLAON_CMN_AON_MISC_REG, 0},
762 	{3, 0, WLAON_INTR_STATUS, 0},
763 	{2, 0, WLAON_INTR_ENABLE, 0},
764 	{2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
765 	{2, 0, WLAON_NOC_DBG_BUS_REG, 0},
766 	{2, 0, WLAON_WL_CTRL_MISC_REG, 0},
767 	{2, 0, WLAON_DBG_STATUS0, 0},
768 	{2, 0, WLAON_DBG_STATUS1, 0},
769 	{2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
770 	{2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
771 	{2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
772 };
773 
774 static struct cnss_misc_reg syspm_reg_access_seq[] = {
775 	{1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
776 	{1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
777 	{1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
778 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
779 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
780 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
781 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
782 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
783 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
784 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
785 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
786 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
787 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
788 };
789 
790 static struct cnss_print_optimize print_optimize;
791 
792 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
793 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
794 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
795 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
796 
797 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
798 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev);
799 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev);
800 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
801 				       enum cnss_bus_event_type type,
802 				       void *data);
803 
804 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
805 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
806 {
807 	mhi_debug_reg_dump(pci_priv->mhi_ctrl);
808 }
809 
810 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
811 {
812 	mhi_dump_sfr(pci_priv->mhi_ctrl);
813 }
814 
815 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
816 				      u32 cookie)
817 {
818 	return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
819 }
820 
821 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
822 				    bool notify_clients)
823 {
824 	return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
825 }
826 
827 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
828 				   bool notify_clients)
829 {
830 	return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
831 }
832 
833 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
834 				       u32 timeout)
835 {
836 	return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
837 }
838 
839 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
840 					   int timeout_us, bool in_panic)
841 {
842 	return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
843 					  timeout_us, in_panic);
844 }
845 
846 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
847 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
848 {
849 	return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl);
850 }
851 #endif
852 
853 static void
854 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
855 				    int (*cb)(struct mhi_controller *mhi_ctrl,
856 					      struct mhi_link_info *link_info))
857 {
858 	mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
859 }
860 
861 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
862 {
863 	return mhi_force_reset(pci_priv->mhi_ctrl);
864 }
865 
866 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
867 				  phys_addr_t base)
868 {
869 	return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
870 }
871 #else
872 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
873 {
874 }
875 
876 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
877 {
878 }
879 
880 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
881 				      u32 cookie)
882 {
883 	return false;
884 }
885 
886 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
887 				    bool notify_clients)
888 {
889 	return -EOPNOTSUPP;
890 }
891 
892 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
893 				   bool notify_clients)
894 {
895 	return -EOPNOTSUPP;
896 }
897 
898 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
899 				       u32 timeout)
900 {
901 }
902 
903 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
904 					   int timeout_us, bool in_panic)
905 {
906 	return -EOPNOTSUPP;
907 }
908 
909 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
910 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
911 {
912 	return -EOPNOTSUPP;
913 }
914 #endif
915 
916 static void
917 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
918 				    int (*cb)(struct mhi_controller *mhi_ctrl,
919 					      struct mhi_link_info *link_info))
920 {
921 }
922 
923 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
924 {
925 	return -EOPNOTSUPP;
926 }
927 
928 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
929 				  phys_addr_t base)
930 {
931 }
932 #endif /* CONFIG_MHI_BUS_MISC */
933 
934 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
935 #define CNSS_MHI_WAKE_TIMEOUT		500000
936 
937 static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv,
938 					     enum cnss_smmu_fault_time id)
939 {
940 	if (id >= SMMU_CB_MAX)
941 		return;
942 
943 	pci_priv->smmu_fault_timestamp[id] = sched_clock();
944 }
945 
946 static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain,
947 					    void *handler_token)
948 {
949 	struct cnss_pci_data *pci_priv = handler_token;
950 	int ret = 0;
951 
952 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY);
953 	ret = cnss_mhi_device_get_sync_atomic(pci_priv,
954 					      CNSS_MHI_WAKE_TIMEOUT, true);
955 	if (ret < 0) {
956 		cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret);
957 		return;
958 	}
959 
960 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING);
961 	ret = cnss_mhi_host_notify_db_disable_trace(pci_priv);
962 	if (ret < 0)
963 		cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret);
964 
965 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT);
966 }
967 
968 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
969 {
970 	qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain,
971 					 cnss_pci_smmu_fault_handler_irq, pci_priv);
972 }
973 #else
974 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
975 {
976 }
977 #endif
978 
979 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
980 {
981 	u16 device_id;
982 
983 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
984 		cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
985 			    (void *)_RET_IP_);
986 		return -EACCES;
987 	}
988 
989 	if (pci_priv->pci_link_down_ind) {
990 		cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
991 		return -EIO;
992 	}
993 
994 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
995 	if (device_id != pci_priv->device_id)  {
996 		cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
997 			       (void *)_RET_IP_, device_id,
998 			       pci_priv->device_id);
999 		return -EIO;
1000 	}
1001 
1002 	return 0;
1003 }
1004 
1005 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
1006 {
1007 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1008 
1009 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
1010 	u32 window_enable = WINDOW_ENABLE_BIT | window;
1011 	u32 val;
1012 
1013 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
1014 		window_enable = QCN7605_WINDOW_ENABLE_BIT | window;
1015 
1016 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
1017 		writel_relaxed(window_enable, pci_priv->bar +
1018 			       PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1019 	} else {
1020 		writel_relaxed(window_enable, pci_priv->bar +
1021 			       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1022 	}
1023 
1024 	if (window != pci_priv->remap_window) {
1025 		pci_priv->remap_window = window;
1026 		cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
1027 			    window_enable);
1028 	}
1029 
1030 	/* Read it back to make sure the write has taken effect */
1031 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
1032 		val = readl_relaxed(pci_priv->bar +
1033 			PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1034 	} else {
1035 		val = readl_relaxed(pci_priv->bar +
1036 			QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1037 	}
1038 	if (val != window_enable) {
1039 		cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
1040 			    window_enable, val);
1041 		if (!cnss_pci_check_link_status(pci_priv) &&
1042 		    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
1043 			CNSS_ASSERT(0);
1044 	}
1045 }
1046 
1047 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
1048 			     u32 offset, u32 *val)
1049 {
1050 	int ret;
1051 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1052 
1053 	if (!in_interrupt() && !irqs_disabled()) {
1054 		ret = cnss_pci_check_link_status(pci_priv);
1055 		if (ret)
1056 			return ret;
1057 	}
1058 
1059 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1060 	    offset < MAX_UNWINDOWED_ADDRESS) {
1061 		*val = readl_relaxed(pci_priv->bar + offset);
1062 		return 0;
1063 	}
1064 
1065 	/* If in panic, assumption is kernel panic handler will hold all threads
1066 	 * and interrupts. Further pci_reg_window_lock could be held before
1067 	 * panic. So only lock during normal operation.
1068 	 */
1069 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1070 		cnss_pci_select_window(pci_priv, offset);
1071 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1072 				     (offset & WINDOW_RANGE_MASK));
1073 	} else {
1074 		spin_lock_bh(&pci_reg_window_lock);
1075 		cnss_pci_select_window(pci_priv, offset);
1076 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1077 				     (offset & WINDOW_RANGE_MASK));
1078 		spin_unlock_bh(&pci_reg_window_lock);
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1085 			      u32 val)
1086 {
1087 	int ret;
1088 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1089 
1090 	if (!in_interrupt() && !irqs_disabled()) {
1091 		ret = cnss_pci_check_link_status(pci_priv);
1092 		if (ret)
1093 			return ret;
1094 	}
1095 
1096 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1097 	    offset < MAX_UNWINDOWED_ADDRESS) {
1098 		writel_relaxed(val, pci_priv->bar + offset);
1099 		return 0;
1100 	}
1101 
1102 	/* Same constraint as PCI register read in panic */
1103 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1104 		cnss_pci_select_window(pci_priv, offset);
1105 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1106 			  (offset & WINDOW_RANGE_MASK));
1107 	} else {
1108 		spin_lock_bh(&pci_reg_window_lock);
1109 		cnss_pci_select_window(pci_priv, offset);
1110 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1111 			  (offset & WINDOW_RANGE_MASK));
1112 		spin_unlock_bh(&pci_reg_window_lock);
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
1119 {
1120 	struct device *dev = &pci_priv->pci_dev->dev;
1121 	int ret;
1122 
1123 	ret = cnss_pci_force_wake_request_sync(dev,
1124 					       FORCE_WAKE_DELAY_TIMEOUT_US);
1125 	if (ret) {
1126 		if (ret != -EAGAIN)
1127 			cnss_pr_err("Failed to request force wake\n");
1128 		return ret;
1129 	}
1130 
1131 	/* If device's M1 state-change event races here, it can be ignored,
1132 	 * as the device is expected to immediately move from M2 to M0
1133 	 * without entering low power state.
1134 	 */
1135 	if (cnss_pci_is_device_awake(dev) != true)
1136 		cnss_pr_warn("MHI not in M0, while reg still accessible\n");
1137 
1138 	return 0;
1139 }
1140 
1141 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
1142 {
1143 	struct device *dev = &pci_priv->pci_dev->dev;
1144 	int ret;
1145 
1146 	ret = cnss_pci_force_wake_release(dev);
1147 	if (ret && ret != -EAGAIN)
1148 		cnss_pr_err("Failed to release force wake\n");
1149 
1150 	return ret;
1151 }
1152 
1153 #if IS_ENABLED(CONFIG_INTERCONNECT)
1154 /**
1155  * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
1156  * @plat_priv: Platform private data struct
1157  * @bw: bandwidth
1158  * @save: toggle flag to save bandwidth to current_bw_vote
1159  *
1160  * Setup bandwidth votes for configured interconnect paths
1161  *
1162  * Return: 0 for success
1163  */
1164 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1165 				    u32 bw, bool save)
1166 {
1167 	int ret = 0;
1168 	struct cnss_bus_bw_info *bus_bw_info;
1169 
1170 	if (!plat_priv->icc.path_count)
1171 		return -EOPNOTSUPP;
1172 
1173 	if (bw >= plat_priv->icc.bus_bw_cfg_count) {
1174 		cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
1175 		return -EINVAL;
1176 	}
1177 
1178 	cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
1179 
1180 	list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
1181 		ret = icc_set_bw(bus_bw_info->icc_path,
1182 				 bus_bw_info->cfg_table[bw].avg_bw,
1183 				 bus_bw_info->cfg_table[bw].peak_bw);
1184 		if (ret) {
1185 			cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
1186 				    bw, ret, bus_bw_info->icc_name,
1187 				    bus_bw_info->cfg_table[bw].avg_bw,
1188 				    bus_bw_info->cfg_table[bw].peak_bw);
1189 			break;
1190 		}
1191 	}
1192 	if (ret == 0 && save)
1193 		plat_priv->icc.current_bw_vote = bw;
1194 	return ret;
1195 }
1196 
1197 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1198 {
1199 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1200 
1201 	if (!plat_priv)
1202 		return -ENODEV;
1203 
1204 	if (bandwidth < 0)
1205 		return -EINVAL;
1206 
1207 	return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
1208 }
1209 #else
1210 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1211 				    u32 bw, bool save)
1212 {
1213 	return 0;
1214 }
1215 
1216 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1217 {
1218 	return 0;
1219 }
1220 #endif
1221 EXPORT_SYMBOL(cnss_request_bus_bandwidth);
1222 
1223 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
1224 			    u32 *val, bool raw_access)
1225 {
1226 	int ret = 0;
1227 	bool do_force_wake_put = true;
1228 
1229 	if (raw_access) {
1230 		ret = cnss_pci_reg_read(pci_priv, offset, val);
1231 		goto out;
1232 	}
1233 
1234 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1235 	if (ret)
1236 		goto out;
1237 
1238 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1239 	if (ret < 0)
1240 		goto runtime_pm_put;
1241 
1242 	ret = cnss_pci_force_wake_get(pci_priv);
1243 	if (ret)
1244 		do_force_wake_put = false;
1245 
1246 	ret = cnss_pci_reg_read(pci_priv, offset, val);
1247 	if (ret) {
1248 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
1249 			    offset, ret);
1250 		goto force_wake_put;
1251 	}
1252 
1253 force_wake_put:
1254 	if (do_force_wake_put)
1255 		cnss_pci_force_wake_put(pci_priv);
1256 runtime_pm_put:
1257 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1258 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1259 out:
1260 	return ret;
1261 }
1262 
1263 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1264 			     u32 val, bool raw_access)
1265 {
1266 	int ret = 0;
1267 	bool do_force_wake_put = true;
1268 
1269 	if (raw_access) {
1270 		ret = cnss_pci_reg_write(pci_priv, offset, val);
1271 		goto out;
1272 	}
1273 
1274 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1275 	if (ret)
1276 		goto out;
1277 
1278 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1279 	if (ret < 0)
1280 		goto runtime_pm_put;
1281 
1282 	ret = cnss_pci_force_wake_get(pci_priv);
1283 	if (ret)
1284 		do_force_wake_put = false;
1285 
1286 	ret = cnss_pci_reg_write(pci_priv, offset, val);
1287 	if (ret) {
1288 		cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
1289 			    val, offset, ret);
1290 		goto force_wake_put;
1291 	}
1292 
1293 force_wake_put:
1294 	if (do_force_wake_put)
1295 		cnss_pci_force_wake_put(pci_priv);
1296 runtime_pm_put:
1297 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1298 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1299 out:
1300 	return ret;
1301 }
1302 
1303 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
1304 {
1305 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1306 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1307 	bool link_down_or_recovery;
1308 
1309 	if (!plat_priv)
1310 		return -ENODEV;
1311 
1312 	link_down_or_recovery = pci_priv->pci_link_down_ind ||
1313 		(test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
1314 
1315 	if (save) {
1316 		if (link_down_or_recovery) {
1317 			pci_priv->saved_state = NULL;
1318 		} else {
1319 			pci_save_state(pci_dev);
1320 			pci_priv->saved_state = pci_store_saved_state(pci_dev);
1321 		}
1322 	} else {
1323 		if (link_down_or_recovery) {
1324 			pci_load_saved_state(pci_dev, pci_priv->default_state);
1325 			pci_restore_state(pci_dev);
1326 		} else if (pci_priv->saved_state) {
1327 			pci_load_and_free_saved_state(pci_dev,
1328 						      &pci_priv->saved_state);
1329 			pci_restore_state(pci_dev);
1330 		}
1331 	}
1332 
1333 	return 0;
1334 }
1335 
1336 static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv)
1337 {
1338 	int ret = 0;
1339 	struct pci_dev *root_port;
1340 	struct device_node *root_of_node;
1341 	struct cnss_plat_data *plat_priv;
1342 
1343 	if (!pci_priv)
1344 		return -EINVAL;
1345 
1346 	if (pci_priv->device_id != KIWI_DEVICE_ID)
1347 		return ret;
1348 
1349 	plat_priv = pci_priv->plat_priv;
1350 	root_port = pcie_find_root_port(pci_priv->pci_dev);
1351 
1352 	if (!root_port) {
1353 		cnss_pr_err("PCIe root port is null\n");
1354 		return -EINVAL;
1355 	}
1356 
1357 	root_of_node = root_port->dev.of_node;
1358 	if (root_of_node && root_of_node->parent) {
1359 		ret = of_property_read_u32(root_of_node->parent,
1360 					   "qcom,target-link-speed",
1361 					   &plat_priv->supported_link_speed);
1362 		if (!ret)
1363 			cnss_pr_dbg("Supported PCIe Link Speed: %d\n",
1364 				    plat_priv->supported_link_speed);
1365 		else
1366 			plat_priv->supported_link_speed = 0;
1367 	}
1368 
1369 	return ret;
1370 }
1371 
1372 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
1373 {
1374 	u16 link_status;
1375 	int ret;
1376 
1377 	ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
1378 					&link_status);
1379 	if (ret)
1380 		return ret;
1381 
1382 	cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
1383 
1384 	pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
1385 	pci_priv->def_link_width =
1386 		(link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
1387 	pci_priv->cur_link_speed = pci_priv->def_link_speed;
1388 
1389 	cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
1390 		    pci_priv->def_link_speed, pci_priv->def_link_width);
1391 
1392 	return 0;
1393 }
1394 
1395 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
1396 {
1397 	u32 reg_offset, val;
1398 	int i;
1399 
1400 	switch (pci_priv->device_id) {
1401 	case QCA6390_DEVICE_ID:
1402 	case QCA6490_DEVICE_ID:
1403 	case KIWI_DEVICE_ID:
1404 	case MANGO_DEVICE_ID:
1405 	case PEACH_DEVICE_ID:
1406 		break;
1407 	default:
1408 		return;
1409 	}
1410 
1411 	if (in_interrupt() || irqs_disabled())
1412 		return;
1413 
1414 	if (cnss_pci_check_link_status(pci_priv))
1415 		return;
1416 
1417 	cnss_pr_dbg("Start to dump SOC Scratch registers\n");
1418 
1419 	for (i = 0; pci_scratch[i].name; i++) {
1420 		reg_offset = pci_scratch[i].offset;
1421 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1422 			return;
1423 		cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
1424 			    pci_scratch[i].name, val);
1425 	}
1426 }
1427 
1428 static void cnss_pci_soc_reset_cause_reg_dump(struct cnss_pci_data *pci_priv)
1429 {
1430 	u32 val;
1431 
1432 	switch (pci_priv->device_id) {
1433 	case PEACH_DEVICE_ID:
1434 		break;
1435 	default:
1436 		return;
1437 	}
1438 
1439 	if (in_interrupt() || irqs_disabled())
1440 		return;
1441 
1442 	if (cnss_pci_check_link_status(pci_priv))
1443 		return;
1444 
1445 	cnss_pr_dbg("Start to dump SOC Reset Cause registers\n");
1446 
1447 	if (cnss_pci_reg_read(pci_priv, WLAON_SOC_RESET_CAUSE_SHADOW_REG,
1448 			      &val))
1449 		return;
1450 	cnss_pr_dbg("WLAON_SOC_RESET_CAUSE_SHADOW_REG = 0x%x\n",
1451 		     val);
1452 
1453 }
1454 
1455 static void cnss_pci_bhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
1456 {
1457 	u32 reg_offset, val;
1458 	int i;
1459 
1460 	switch (pci_priv->device_id) {
1461 	case PEACH_DEVICE_ID:
1462 		break;
1463 	default:
1464 		return;
1465 	}
1466 
1467 	if (cnss_pci_check_link_status(pci_priv))
1468 		return;
1469 
1470 	cnss_pr_dbg("Start to dump PCIE BHIE DEBUG registers\n");
1471 
1472 	for (i = 0; pci_bhi_debug[i].name; i++) {
1473 		reg_offset = pci_bhi_debug[i].offset;
1474 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1475 			return;
1476 		cnss_pr_dbg("PCIE__%s = 0x%x\n",
1477 			     pci_bhi_debug[i].name, val);
1478 	}
1479 }
1480 
1481 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
1482 {
1483 	int ret = 0;
1484 
1485 	if (!pci_priv)
1486 		return -ENODEV;
1487 
1488 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1489 		cnss_pr_info("PCI link is already suspended\n");
1490 		goto out;
1491 	}
1492 
1493 	pci_clear_master(pci_priv->pci_dev);
1494 
1495 	ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
1496 	if (ret)
1497 		goto out;
1498 
1499 	pci_disable_device(pci_priv->pci_dev);
1500 
1501 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1502 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
1503 		if (ret)
1504 			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
1505 	}
1506 
1507 	/* Always do PCIe L2 suspend during power off/PCIe link recovery */
1508 	pci_priv->drv_connected_last = 0;
1509 
1510 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
1511 	if (ret)
1512 		goto out;
1513 
1514 	pci_priv->pci_link_state = PCI_LINK_DOWN;
1515 
1516 	return 0;
1517 out:
1518 	return ret;
1519 }
1520 
1521 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
1522 {
1523 	int ret = 0;
1524 
1525 	if (!pci_priv)
1526 		return -ENODEV;
1527 
1528 	if (pci_priv->pci_link_state == PCI_LINK_UP) {
1529 		cnss_pr_info("PCI link is already resumed\n");
1530 		goto out;
1531 	}
1532 
1533 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
1534 	if (ret) {
1535 		ret = -EAGAIN;
1536 		cnss_pci_update_link_event(pci_priv,
1537 					   BUS_EVENT_PCI_LINK_RESUME_FAIL, NULL);
1538 		goto out;
1539 	}
1540 
1541 	pci_priv->pci_link_state = PCI_LINK_UP;
1542 
1543 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1544 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
1545 		if (ret) {
1546 			cnss_pr_err("Failed to set D0, err = %d\n", ret);
1547 			goto out;
1548 		}
1549 	}
1550 
1551 	ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
1552 	if (ret)
1553 		goto out;
1554 
1555 	ret = pci_enable_device(pci_priv->pci_dev);
1556 	if (ret) {
1557 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
1558 		goto out;
1559 	}
1560 
1561 	pci_set_master(pci_priv->pci_dev);
1562 
1563 	if (pci_priv->pci_link_down_ind)
1564 		pci_priv->pci_link_down_ind = false;
1565 
1566 	return 0;
1567 out:
1568 	return ret;
1569 }
1570 
1571 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
1572 				       enum cnss_bus_event_type type,
1573 				       void *data)
1574 {
1575 	struct cnss_bus_event bus_event;
1576 
1577 	bus_event.etype = type;
1578 	bus_event.event_data = data;
1579 	cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
1580 }
1581 
1582 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
1583 {
1584 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1585 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1586 	unsigned long flags;
1587 
1588 	if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
1589 		     &plat_priv->ctrl_params.quirks))
1590 		panic("cnss: PCI link is down\n");
1591 
1592 	spin_lock_irqsave(&pci_link_down_lock, flags);
1593 	if (pci_priv->pci_link_down_ind) {
1594 		cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
1595 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
1596 		return;
1597 	}
1598 	pci_priv->pci_link_down_ind = true;
1599 	spin_unlock_irqrestore(&pci_link_down_lock, flags);
1600 
1601 	if (pci_priv->mhi_ctrl) {
1602 		/* Notify MHI about link down*/
1603 		mhi_report_error(pci_priv->mhi_ctrl);
1604 	}
1605 
1606 	if (pci_dev->device == QCA6174_DEVICE_ID)
1607 		disable_irq_nosync(pci_dev->irq);
1608 
1609 	/* Notify bus related event. Now for all supported chips.
1610 	 * Here PCIe LINK_DOWN notification taken care.
1611 	 * uevent buffer can be extended later, to cover more bus info.
1612 	 */
1613 	cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
1614 
1615 	cnss_fatal_err("PCI link down, schedule recovery\n");
1616 	reinit_completion(&pci_priv->wake_event_complete);
1617 	cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
1618 }
1619 
1620 int cnss_pci_link_down(struct device *dev)
1621 {
1622 	struct pci_dev *pci_dev = to_pci_dev(dev);
1623 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1624 	struct cnss_plat_data *plat_priv = NULL;
1625 	int ret;
1626 
1627 	if (!pci_priv) {
1628 		cnss_pr_err("pci_priv is NULL\n");
1629 		return -EINVAL;
1630 	}
1631 
1632 	plat_priv = pci_priv->plat_priv;
1633 	if (!plat_priv) {
1634 		cnss_pr_err("plat_priv is NULL\n");
1635 		return -ENODEV;
1636 	}
1637 
1638 	if (pci_priv->pci_link_down_ind) {
1639 		cnss_pr_dbg("PCI link down recovery is already in progress\n");
1640 		return -EBUSY;
1641 	}
1642 
1643 	if (pci_priv->drv_connected_last &&
1644 	    of_property_read_bool(plat_priv->plat_dev->dev.of_node,
1645 				  "cnss-enable-self-recovery"))
1646 		plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
1647 
1648 	cnss_pr_err("PCI link down is detected by drivers\n");
1649 
1650 	ret = cnss_pci_assert_perst(pci_priv);
1651 	if (ret)
1652 		cnss_pci_handle_linkdown(pci_priv);
1653 
1654 	return ret;
1655 }
1656 EXPORT_SYMBOL(cnss_pci_link_down);
1657 
1658 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
1659 {
1660 	struct pci_dev *pci_dev = to_pci_dev(dev);
1661 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1662 
1663 	if (!pci_priv) {
1664 		cnss_pr_err("pci_priv is NULL\n");
1665 		return -ENODEV;
1666 	}
1667 
1668 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1669 		cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
1670 		return -EACCES;
1671 	}
1672 
1673 	cnss_pr_dbg("Start to get PCIe reg dump\n");
1674 
1675 	return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
1676 }
1677 EXPORT_SYMBOL(cnss_pci_get_reg_dump);
1678 
1679 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
1680 {
1681 	struct cnss_plat_data *plat_priv;
1682 
1683 	if (!pci_priv) {
1684 		cnss_pr_err("pci_priv is NULL\n");
1685 		return -ENODEV;
1686 	}
1687 
1688 	plat_priv = pci_priv->plat_priv;
1689 	if (!plat_priv) {
1690 		cnss_pr_err("plat_priv is NULL\n");
1691 		return -ENODEV;
1692 	}
1693 
1694 	return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
1695 		pci_priv->pci_link_down_ind;
1696 }
1697 
1698 int cnss_pci_is_device_down(struct device *dev)
1699 {
1700 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
1701 
1702 	return cnss_pcie_is_device_down(pci_priv);
1703 }
1704 EXPORT_SYMBOL(cnss_pci_is_device_down);
1705 
1706 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
1707 {
1708 	spin_lock_bh(&pci_reg_window_lock);
1709 }
1710 EXPORT_SYMBOL(cnss_pci_lock_reg_window);
1711 
1712 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
1713 {
1714 	spin_unlock_bh(&pci_reg_window_lock);
1715 }
1716 EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
1717 
1718 int cnss_get_pci_slot(struct device *dev)
1719 {
1720 	struct pci_dev *pci_dev = to_pci_dev(dev);
1721 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1722 	struct cnss_plat_data *plat_priv = NULL;
1723 
1724 	if (!pci_priv) {
1725 		cnss_pr_err("pci_priv is NULL\n");
1726 		return -EINVAL;
1727 	}
1728 
1729 	plat_priv = pci_priv->plat_priv;
1730 	if (!plat_priv) {
1731 		cnss_pr_err("plat_priv is NULL\n");
1732 		return -ENODEV;
1733 	}
1734 
1735 	return plat_priv->rc_num;
1736 }
1737 EXPORT_SYMBOL(cnss_get_pci_slot);
1738 
1739 /**
1740  * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
1741  * @pci_priv: driver PCI bus context pointer
1742  *
1743  * Dump primary and secondary bootloader debug log data. For SBL check the
1744  * log struct address and size for validity.
1745  *
1746  * Return: None
1747  */
1748 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
1749 {
1750 	enum mhi_ee_type ee;
1751 	u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
1752 	u32 pbl_log_sram_start;
1753 	u32 pbl_stage, sbl_log_start, sbl_log_size;
1754 	u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
1755 	u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
1756 	u32 sbl_log_def_start = SRAM_START;
1757 	u32 sbl_log_def_end = SRAM_END;
1758 	int i;
1759 
1760 	cnss_pci_soc_reset_cause_reg_dump(pci_priv);
1761 
1762 	switch (pci_priv->device_id) {
1763 	case QCA6390_DEVICE_ID:
1764 		pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
1765 		pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1766 		sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1767 		break;
1768 	case QCA6490_DEVICE_ID:
1769 		pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
1770 		pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1771 		sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1772 		break;
1773 	case KIWI_DEVICE_ID:
1774 		pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
1775 		pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
1776 		pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1777 		sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1778 		break;
1779 	case MANGO_DEVICE_ID:
1780 		pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
1781 		pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
1782 		pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1783 		sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1784 		break;
1785 	case PEACH_DEVICE_ID:
1786 		pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS;
1787 		pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START;
1788 		pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1789 		sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1790 		break;
1791 	default:
1792 		return;
1793 	}
1794 
1795 	if (cnss_pci_check_link_status(pci_priv))
1796 		return;
1797 
1798 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1799 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1800 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1801 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1802 	cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
1803 			  &pbl_bootstrap_status);
1804 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
1805 		    pbl_stage, sbl_log_start, sbl_log_size);
1806 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
1807 		    pbl_wlan_boot_cfg, pbl_bootstrap_status);
1808 
1809 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1810 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1811 		cnss_pr_err("Avoid Dumping PBL log data in Mission mode\n");
1812 		return;
1813 	}
1814 
1815 	cnss_pr_dbg("Dumping PBL log data\n");
1816 	for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
1817 		mem_addr = pbl_log_sram_start + i;
1818 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1819 			break;
1820 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1821 	}
1822 
1823 	sbl_log_size = (sbl_log_size > sbl_log_max_size ?
1824 			sbl_log_max_size : sbl_log_size);
1825 	if (sbl_log_start < sbl_log_def_start ||
1826 	    sbl_log_start > sbl_log_def_end ||
1827 	    (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
1828 		cnss_pr_err("Invalid SBL log data\n");
1829 		return;
1830 	}
1831 
1832 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1833 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1834 		cnss_pr_err("Avoid Dumping SBL log data in Mission mode\n");
1835 		return;
1836 	}
1837 
1838 	cnss_pr_dbg("Dumping SBL log data\n");
1839 	for (i = 0; i < sbl_log_size; i += sizeof(val)) {
1840 		mem_addr = sbl_log_start + i;
1841 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1842 			break;
1843 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1844 	}
1845 }
1846 
1847 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
1848 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1849 {
1850 }
1851 #else
1852 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1853 {
1854 	struct cnss_plat_data *plat_priv;
1855 	u32 i, mem_addr;
1856 	u32 *dump_ptr;
1857 
1858 	plat_priv = pci_priv->plat_priv;
1859 
1860 	if (plat_priv->device_id != QCA6490_DEVICE_ID ||
1861 	    cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
1862 		return;
1863 
1864 	if (!plat_priv->sram_dump) {
1865 		cnss_pr_err("SRAM dump memory is not allocated\n");
1866 		return;
1867 	}
1868 
1869 	if (cnss_pci_check_link_status(pci_priv))
1870 		return;
1871 
1872 	cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
1873 
1874 	for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
1875 		mem_addr = SRAM_START + i;
1876 		dump_ptr = (u32 *)(plat_priv->sram_dump + i);
1877 		if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
1878 			cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
1879 			break;
1880 		}
1881 		/* Relinquish CPU after dumping 256KB chunks*/
1882 		if (!(i % CNSS_256KB_SIZE))
1883 			cond_resched();
1884 	}
1885 }
1886 #endif
1887 
1888 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
1889 {
1890 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1891 
1892 	cnss_fatal_err("MHI power up returns timeout\n");
1893 
1894 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
1895 	    cnss_get_dev_sol_value(plat_priv) > 0) {
1896 		/* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
1897 		 * high. If RDDM times out, PBL/SBL error region may have been
1898 		 * erased so no need to dump them either.
1899 		 */
1900 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
1901 		    !pci_priv->pci_link_down_ind) {
1902 			mod_timer(&pci_priv->dev_rddm_timer,
1903 				  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1904 		}
1905 	} else {
1906 		cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
1907 		cnss_mhi_debug_reg_dump(pci_priv);
1908 		cnss_pci_bhi_debug_reg_dump(pci_priv);
1909 		cnss_pci_soc_scratch_reg_dump(pci_priv);
1910 		/* Dump PBL/SBL error log if RDDM cookie is not set */
1911 		cnss_pci_dump_bl_sram_mem(pci_priv);
1912 		cnss_pci_dump_sram(pci_priv);
1913 		return -ETIMEDOUT;
1914 	}
1915 
1916 	return 0;
1917 }
1918 
1919 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
1920 {
1921 	switch (mhi_state) {
1922 	case CNSS_MHI_INIT:
1923 		return "INIT";
1924 	case CNSS_MHI_DEINIT:
1925 		return "DEINIT";
1926 	case CNSS_MHI_POWER_ON:
1927 		return "POWER_ON";
1928 	case CNSS_MHI_POWERING_OFF:
1929 		return "POWERING_OFF";
1930 	case CNSS_MHI_POWER_OFF:
1931 		return "POWER_OFF";
1932 	case CNSS_MHI_FORCE_POWER_OFF:
1933 		return "FORCE_POWER_OFF";
1934 	case CNSS_MHI_SUSPEND:
1935 		return "SUSPEND";
1936 	case CNSS_MHI_RESUME:
1937 		return "RESUME";
1938 	case CNSS_MHI_TRIGGER_RDDM:
1939 		return "TRIGGER_RDDM";
1940 	case CNSS_MHI_RDDM_DONE:
1941 		return "RDDM_DONE";
1942 	default:
1943 		return "UNKNOWN";
1944 	}
1945 };
1946 
1947 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
1948 					enum cnss_mhi_state mhi_state)
1949 {
1950 	switch (mhi_state) {
1951 	case CNSS_MHI_INIT:
1952 		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
1953 			return 0;
1954 		break;
1955 	case CNSS_MHI_DEINIT:
1956 	case CNSS_MHI_POWER_ON:
1957 		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
1958 		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1959 			return 0;
1960 		break;
1961 	case CNSS_MHI_FORCE_POWER_OFF:
1962 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1963 			return 0;
1964 		break;
1965 	case CNSS_MHI_POWER_OFF:
1966 	case CNSS_MHI_SUSPEND:
1967 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1968 		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1969 			return 0;
1970 		break;
1971 	case CNSS_MHI_RESUME:
1972 		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1973 			return 0;
1974 		break;
1975 	case CNSS_MHI_TRIGGER_RDDM:
1976 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1977 		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
1978 			return 0;
1979 		break;
1980 	case CNSS_MHI_RDDM_DONE:
1981 		return 0;
1982 	default:
1983 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
1984 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
1985 	}
1986 
1987 	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
1988 		    cnss_mhi_state_to_str(mhi_state), mhi_state,
1989 		    pci_priv->mhi_state);
1990 	if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
1991 		CNSS_ASSERT(0);
1992 
1993 	return -EINVAL;
1994 }
1995 
1996 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
1997 {
1998 	int read_val, ret;
1999 
2000 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
2001 		return -EOPNOTSUPP;
2002 
2003 	if (cnss_pci_check_link_status(pci_priv))
2004 		return -EINVAL;
2005 
2006 	cnss_pr_err("Write GCC Spare with ACE55 Pattern");
2007 	cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
2008 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
2009 	cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
2010 	ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
2011 				&read_val);
2012 	cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
2013 	return ret;
2014 }
2015 
2016 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
2017 {
2018 	int read_val, ret;
2019 	u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg;
2020 
2021 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
2022 		return -EOPNOTSUPP;
2023 
2024 	if (cnss_pci_check_link_status(pci_priv))
2025 		return -EINVAL;
2026 
2027 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
2028 	cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
2029 		    read_val, ret);
2030 
2031 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
2032 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
2033 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
2034 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
2035 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n",
2036 		    pbl_stage, sbl_log_start, sbl_log_size);
2037 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg);
2038 
2039 	return ret;
2040 }
2041 
2042 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
2043 				       enum cnss_mhi_state mhi_state)
2044 {
2045 	switch (mhi_state) {
2046 	case CNSS_MHI_INIT:
2047 		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2048 		break;
2049 	case CNSS_MHI_DEINIT:
2050 		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2051 		break;
2052 	case CNSS_MHI_POWER_ON:
2053 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2054 		break;
2055 	case CNSS_MHI_POWERING_OFF:
2056 		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2057 		break;
2058 	case CNSS_MHI_POWER_OFF:
2059 	case CNSS_MHI_FORCE_POWER_OFF:
2060 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2061 		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2062 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2063 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2064 		break;
2065 	case CNSS_MHI_SUSPEND:
2066 		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2067 		break;
2068 	case CNSS_MHI_RESUME:
2069 		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2070 		break;
2071 	case CNSS_MHI_TRIGGER_RDDM:
2072 		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2073 		break;
2074 	case CNSS_MHI_RDDM_DONE:
2075 		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2076 		break;
2077 	default:
2078 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2079 	}
2080 }
2081 
2082 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
2083 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2084 {
2085 	return mhi_pm_resume_force(pci_priv->mhi_ctrl);
2086 }
2087 #else
2088 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2089 {
2090 	return mhi_pm_resume(pci_priv->mhi_ctrl);
2091 }
2092 #endif
2093 
2094 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
2095 				  enum cnss_mhi_state mhi_state)
2096 {
2097 	int ret = 0, retry = 0;
2098 
2099 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
2100 		return 0;
2101 
2102 	if (mhi_state < 0) {
2103 		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
2104 		return -EINVAL;
2105 	}
2106 
2107 	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
2108 	if (ret)
2109 		goto out;
2110 
2111 	cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
2112 		     cnss_mhi_state_to_str(mhi_state), mhi_state);
2113 
2114 	switch (mhi_state) {
2115 	case CNSS_MHI_INIT:
2116 		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
2117 		break;
2118 	case CNSS_MHI_DEINIT:
2119 		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
2120 		ret = 0;
2121 		break;
2122 	case CNSS_MHI_POWER_ON:
2123 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
2124 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
2125 		/* Only set img_pre_alloc when power up succeeds */
2126 		if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
2127 			cnss_pr_dbg("Notify MHI to use already allocated images\n");
2128 			pci_priv->mhi_ctrl->img_pre_alloc = true;
2129 		}
2130 #endif
2131 		break;
2132 	case CNSS_MHI_POWER_OFF:
2133 		mhi_power_down(pci_priv->mhi_ctrl, true);
2134 		ret = 0;
2135 		break;
2136 	case CNSS_MHI_FORCE_POWER_OFF:
2137 		mhi_power_down(pci_priv->mhi_ctrl, false);
2138 		ret = 0;
2139 		break;
2140 	case CNSS_MHI_SUSPEND:
2141 retry_mhi_suspend:
2142 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2143 		if (pci_priv->drv_connected_last)
2144 			ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
2145 		else
2146 			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
2147 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2148 		if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
2149 			cnss_pr_vdbg("Retry MHI suspend #%d\n", retry);
2150 			usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
2151 				     MHI_SUSPEND_RETRY_DELAY_US + 1000);
2152 			goto retry_mhi_suspend;
2153 		}
2154 		break;
2155 	case CNSS_MHI_RESUME:
2156 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2157 		if (pci_priv->drv_connected_last) {
2158 			ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
2159 			if (ret) {
2160 				mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2161 				break;
2162 			}
2163 			ret = cnss_mhi_pm_fast_resume(pci_priv, true);
2164 			cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
2165 		} else {
2166 			if (pci_priv->device_id == QCA6390_DEVICE_ID)
2167 				ret = cnss_mhi_pm_force_resume(pci_priv);
2168 			else
2169 				ret = mhi_pm_resume(pci_priv->mhi_ctrl);
2170 		}
2171 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2172 		break;
2173 	case CNSS_MHI_TRIGGER_RDDM:
2174 		cnss_rddm_trigger_debug(pci_priv);
2175 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
2176 		if (ret) {
2177 			cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
2178 
2179 			cnss_pr_dbg("Sending host reset req\n");
2180 			ret = cnss_mhi_force_reset(pci_priv);
2181 			cnss_rddm_trigger_check(pci_priv);
2182 		}
2183 		break;
2184 	case CNSS_MHI_RDDM_DONE:
2185 		break;
2186 	default:
2187 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2188 		ret = -EINVAL;
2189 	}
2190 
2191 	if (ret)
2192 		goto out;
2193 
2194 	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
2195 
2196 	return 0;
2197 
2198 out:
2199 	cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
2200 		    cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
2201 	return ret;
2202 }
2203 
2204 static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv)
2205 {
2206 	int ret = 0;
2207 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2208 	struct cnss_plat_data *plat_priv;
2209 
2210 	if (!pci_dev)
2211 		return -ENODEV;
2212 
2213 	if (!pci_dev->msix_enabled)
2214 		return ret;
2215 
2216 	plat_priv = pci_priv->plat_priv;
2217 	if (!plat_priv) {
2218 		cnss_pr_err("plat_priv is NULL\n");
2219 		return -ENODEV;
2220 	}
2221 
2222 	ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
2223 				   "msix-match-addr",
2224 				   &pci_priv->msix_addr);
2225 	cnss_pr_dbg("MSI-X Match address is 0x%X\n",
2226 		    pci_priv->msix_addr);
2227 
2228 	return ret;
2229 }
2230 
2231 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
2232 {
2233 	struct msi_desc *msi_desc;
2234 	struct cnss_msi_config *msi_config;
2235 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2236 
2237 	msi_config = pci_priv->msi_config;
2238 
2239 	if (pci_dev->msix_enabled) {
2240 		pci_priv->msi_ep_base_data = msi_config->users[0].base_vector;
2241 		cnss_pr_dbg("MSI-X base data is %d\n",
2242 			    pci_priv->msi_ep_base_data);
2243 		return 0;
2244 	}
2245 
2246 	msi_desc = irq_get_msi_desc(pci_dev->irq);
2247 	if (!msi_desc) {
2248 		cnss_pr_err("msi_desc is NULL!\n");
2249 		return -EINVAL;
2250 	}
2251 
2252 	pci_priv->msi_ep_base_data = msi_desc->msg.data;
2253 	cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
2254 
2255 	return 0;
2256 }
2257 
2258 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
2259 #define PLC_PCIE_NAME_LEN		14
2260 
2261 static struct cnss_plat_data *
2262 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2263 {
2264 	int plat_env_count = cnss_get_plat_env_count();
2265 	struct cnss_plat_data *plat_env;
2266 	struct cnss_pci_data *pci_priv;
2267 	int i = 0;
2268 
2269 	if (!driver_ops) {
2270 		cnss_pr_err("No cnss driver\n");
2271 		return NULL;
2272 	}
2273 
2274 	for (i = 0; i < plat_env_count; i++) {
2275 		plat_env = cnss_get_plat_env(i);
2276 		if (!plat_env)
2277 			continue;
2278 		if (driver_ops->name && plat_env->pld_bus_ops_name) {
2279 			/* driver_ops->name = PLD_PCIE_OPS_NAME
2280 			 * #ifdef MULTI_IF_NAME
2281 			 * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME
2282 			 * #else
2283 			 * #define PLD_PCIE_OPS_NAME "pld_pcie"
2284 			 * #endif
2285 			 */
2286 			if (memcmp(driver_ops->name,
2287 				   plat_env->pld_bus_ops_name,
2288 				   PLC_PCIE_NAME_LEN) == 0)
2289 				return plat_env;
2290 		}
2291 	}
2292 
2293 	cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name);
2294 	/* in the dual wlan card case, the pld_bus_ops_name from dts
2295 	 * and driver_ops-> name from ko should match, otherwise
2296 	 * wlanhost driver don't know which plat_env it can use;
2297 	 * if doesn't find the match one, then get first available
2298 	 * instance insteadly.
2299 	 */
2300 
2301 	for (i = 0; i < plat_env_count; i++) {
2302 		plat_env = cnss_get_plat_env(i);
2303 
2304 		if (!plat_env)
2305 			continue;
2306 
2307 		pci_priv = plat_env->bus_priv;
2308 		if (!pci_priv) {
2309 			cnss_pr_err("pci_priv is NULL\n");
2310 			continue;
2311 		}
2312 
2313 		if (driver_ops == pci_priv->driver_ops)
2314 			return plat_env;
2315 	}
2316 	/* Doesn't find the existing instance,
2317 	 * so return the fist empty instance
2318 	 */
2319 	for (i = 0; i < plat_env_count; i++) {
2320 		plat_env = cnss_get_plat_env(i);
2321 
2322 		if (!plat_env)
2323 			continue;
2324 		pci_priv = plat_env->bus_priv;
2325 		if (!pci_priv) {
2326 			cnss_pr_err("pci_priv is NULL\n");
2327 			continue;
2328 		}
2329 
2330 		if (!pci_priv->driver_ops)
2331 			return plat_env;
2332 	}
2333 
2334 	return NULL;
2335 }
2336 
2337 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2338 {
2339 	int ret = 0;
2340 	u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG;
2341 	struct cnss_plat_data *plat_priv;
2342 
2343 	if (!pci_priv) {
2344 		cnss_pr_err("pci_priv is NULL\n");
2345 		return -ENODEV;
2346 	}
2347 
2348 	plat_priv = pci_priv->plat_priv;
2349 	/**
2350 	 * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0,
2351 	 * wlan fw will use the hardcode 7 as the qrtr node id.
2352 	 * in the dual Hastings case, we will read qrtr node id
2353 	 * from device tree and pass to get plat_priv->qrtr_node_id,
2354 	 * which always is not zero. And then store this new value
2355 	 * to pcie register, wlan fw will read out this qrtr node id
2356 	 * from this register and overwrite to the hardcode one
2357 	 * while do initialization for ipc router.
2358 	 * without this change, two Hastings will use the same
2359 	 * qrtr node instance id, which will mess up qmi message
2360 	 * exchange. According to qrtr spec, every node should
2361 	 * have unique qrtr node id
2362 	 */
2363 	if (plat_priv->device_id == QCA6390_DEVICE_ID &&
2364 	    plat_priv->qrtr_node_id) {
2365 		u32 val;
2366 
2367 		cnss_pr_dbg("write 0x%x to SCRATCH REG\n",
2368 			    plat_priv->qrtr_node_id);
2369 		ret = cnss_pci_reg_write(pci_priv, scratch,
2370 					 plat_priv->qrtr_node_id);
2371 		if (ret) {
2372 			cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2373 				    scratch, ret);
2374 			goto out;
2375 		}
2376 
2377 		ret = cnss_pci_reg_read(pci_priv, scratch, &val);
2378 		if (ret) {
2379 			cnss_pr_err("Failed to read SCRATCH REG");
2380 			goto out;
2381 		}
2382 
2383 		if (val != plat_priv->qrtr_node_id) {
2384 			cnss_pr_err("qrtr node id write to register doesn't match with readout value");
2385 			return -ERANGE;
2386 		}
2387 	}
2388 out:
2389 	return ret;
2390 }
2391 #else
2392 static struct cnss_plat_data *
2393 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2394 {
2395 	return cnss_bus_dev_to_plat_priv(NULL);
2396 }
2397 
2398 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2399 {
2400 	return 0;
2401 }
2402 #endif
2403 
2404 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
2405 {
2406 	int ret = 0;
2407 	struct cnss_plat_data *plat_priv;
2408 	unsigned int timeout = 0;
2409 	int retry = 0;
2410 
2411 	if (!pci_priv) {
2412 		cnss_pr_err("pci_priv is NULL\n");
2413 		return -ENODEV;
2414 	}
2415 
2416 	plat_priv = pci_priv->plat_priv;
2417 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2418 		return 0;
2419 
2420 	if (MHI_TIMEOUT_OVERWRITE_MS)
2421 		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
2422 	cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
2423 
2424 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
2425 	if (ret)
2426 		return ret;
2427 
2428 	timeout = pci_priv->mhi_ctrl->timeout_ms;
2429 	/* For non-perf builds the timeout is 10 (default) * 6 seconds */
2430 	if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2431 		pci_priv->mhi_ctrl->timeout_ms *= 6;
2432 	else /* For perf builds the timeout is 10 (default) * 3 seconds */
2433 		pci_priv->mhi_ctrl->timeout_ms *= 3;
2434 
2435 retry:
2436 	ret = cnss_pci_store_qrtr_node_id(pci_priv);
2437 	if (ret) {
2438 		if (retry++ < REG_RETRY_MAX_TIMES)
2439 			goto retry;
2440 		else
2441 			return ret;
2442 	}
2443 
2444 	/* Start the timer to dump MHI/PBL/SBL debug data periodically */
2445 	mod_timer(&pci_priv->boot_debug_timer,
2446 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
2447 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
2448 	del_timer_sync(&pci_priv->boot_debug_timer);
2449 	if (ret == 0)
2450 		cnss_wlan_adsp_pc_enable(pci_priv, false);
2451 
2452 	pci_priv->mhi_ctrl->timeout_ms = timeout;
2453 
2454 	if (ret == -ETIMEDOUT) {
2455 		/* This is a special case needs to be handled that if MHI
2456 		 * power on returns -ETIMEDOUT, controller needs to take care
2457 		 * the cleanup by calling MHI power down. Force to set the bit
2458 		 * for driver internal MHI state to make sure it can be handled
2459 		 * properly later.
2460 		 */
2461 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2462 		ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
2463 	} else if (!ret) {
2464 		/* kernel may allocate a dummy vector before request_irq and
2465 		 * then allocate a real vector when request_irq is called.
2466 		 * So get msi_data here again to avoid spurious interrupt
2467 		 * as msi_data will configured to srngs.
2468 		 */
2469 		if (cnss_pci_is_one_msi(pci_priv))
2470 			ret = cnss_pci_config_msi_data(pci_priv);
2471 	}
2472 
2473 	return ret;
2474 }
2475 
2476 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
2477 {
2478 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2479 
2480 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2481 		return;
2482 
2483 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
2484 		cnss_pr_dbg("MHI is already powered off\n");
2485 		return;
2486 	}
2487 	cnss_wlan_adsp_pc_enable(pci_priv, true);
2488 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
2489 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
2490 
2491 	if (!pci_priv->pci_link_down_ind)
2492 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
2493 	else
2494 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
2495 }
2496 
2497 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
2498 {
2499 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2500 
2501 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2502 		return;
2503 
2504 	if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
2505 		cnss_pr_dbg("MHI is already deinited\n");
2506 		return;
2507 	}
2508 
2509 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
2510 }
2511 
2512 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
2513 					bool set_vddd4blow, bool set_shutdown,
2514 					bool do_force_wake)
2515 {
2516 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2517 	int ret;
2518 	u32 val;
2519 
2520 	if (!plat_priv->set_wlaon_pwr_ctrl)
2521 		return;
2522 
2523 	if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
2524 	    pci_priv->pci_link_down_ind)
2525 		return;
2526 
2527 	if (do_force_wake)
2528 		if (cnss_pci_force_wake_get(pci_priv))
2529 			return;
2530 
2531 	ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
2532 	if (ret) {
2533 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
2534 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2535 		goto force_wake_put;
2536 	}
2537 
2538 	cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
2539 		    WLAON_QFPROM_PWR_CTRL_REG, val);
2540 
2541 	if (set_vddd4blow)
2542 		val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2543 	else
2544 		val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2545 
2546 	if (set_shutdown)
2547 		val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2548 	else
2549 		val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2550 
2551 	ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
2552 	if (ret) {
2553 		cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2554 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2555 		goto force_wake_put;
2556 	}
2557 
2558 	cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
2559 		    WLAON_QFPROM_PWR_CTRL_REG);
2560 
2561 	if (set_shutdown)
2562 		usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
2563 			     WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
2564 
2565 force_wake_put:
2566 	if (do_force_wake)
2567 		cnss_pci_force_wake_put(pci_priv);
2568 }
2569 
2570 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
2571 					 u64 *time_us)
2572 {
2573 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2574 	u32 low, high;
2575 	u64 device_ticks;
2576 
2577 	if (!plat_priv->device_freq_hz) {
2578 		cnss_pr_err("Device time clock frequency is not valid\n");
2579 		return -EINVAL;
2580 	}
2581 
2582 	switch (pci_priv->device_id) {
2583 	case KIWI_DEVICE_ID:
2584 	case MANGO_DEVICE_ID:
2585 	case PEACH_DEVICE_ID:
2586 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
2587 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
2588 		break;
2589 	default:
2590 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
2591 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
2592 		break;
2593 	}
2594 
2595 	device_ticks = (u64)high << 32 | low;
2596 	do_div(device_ticks, plat_priv->device_freq_hz / 100000);
2597 	*time_us = device_ticks * 10;
2598 
2599 	return 0;
2600 }
2601 
2602 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
2603 {
2604 	switch (pci_priv->device_id) {
2605 	case KIWI_DEVICE_ID:
2606 	case MANGO_DEVICE_ID:
2607 	case PEACH_DEVICE_ID:
2608 		return;
2609 	default:
2610 		break;
2611 	}
2612 
2613 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2614 			   TIME_SYNC_ENABLE);
2615 }
2616 
2617 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
2618 {
2619 	switch (pci_priv->device_id) {
2620 	case KIWI_DEVICE_ID:
2621 	case MANGO_DEVICE_ID:
2622 	case PEACH_DEVICE_ID:
2623 		return;
2624 	default:
2625 		break;
2626 	}
2627 
2628 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2629 			   TIME_SYNC_CLEAR);
2630 }
2631 
2632 
2633 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
2634 					  u32 low, u32 high)
2635 {
2636 	u32 time_reg_low;
2637 	u32 time_reg_high;
2638 
2639 	switch (pci_priv->device_id) {
2640 	case KIWI_DEVICE_ID:
2641 	case MANGO_DEVICE_ID:
2642 	case PEACH_DEVICE_ID:
2643 		/* Use the next two shadow registers after host's usage */
2644 		time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
2645 				(pci_priv->plat_priv->num_shadow_regs_v3 *
2646 				 SHADOW_REG_LEN_BYTES);
2647 		time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
2648 		break;
2649 	default:
2650 		time_reg_low = PCIE_SHADOW_REG_VALUE_34;
2651 		time_reg_high = PCIE_SHADOW_REG_VALUE_35;
2652 		break;
2653 	}
2654 
2655 	cnss_pci_reg_write(pci_priv, time_reg_low, low);
2656 	cnss_pci_reg_write(pci_priv, time_reg_high, high);
2657 
2658 	cnss_pci_reg_read(pci_priv, time_reg_low, &low);
2659 	cnss_pci_reg_read(pci_priv, time_reg_high, &high);
2660 
2661 	cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
2662 		    time_reg_low, low, time_reg_high, high);
2663 }
2664 
2665 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
2666 {
2667 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2668 	struct device *dev = &pci_priv->pci_dev->dev;
2669 	unsigned long flags = 0;
2670 	u64 host_time_us, device_time_us, offset;
2671 	u32 low, high;
2672 	int ret;
2673 
2674 	ret = cnss_pci_prevent_l1(dev);
2675 	if (ret)
2676 		goto out;
2677 
2678 	ret = cnss_pci_force_wake_get(pci_priv);
2679 	if (ret)
2680 		goto allow_l1;
2681 
2682 	spin_lock_irqsave(&time_sync_lock, flags);
2683 	cnss_pci_clear_time_sync_counter(pci_priv);
2684 	cnss_pci_enable_time_sync_counter(pci_priv);
2685 	host_time_us = cnss_get_host_timestamp(plat_priv);
2686 	ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
2687 	cnss_pci_clear_time_sync_counter(pci_priv);
2688 	spin_unlock_irqrestore(&time_sync_lock, flags);
2689 	if (ret)
2690 		goto force_wake_put;
2691 
2692 	if (host_time_us < device_time_us) {
2693 		cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
2694 			    host_time_us, device_time_us);
2695 		ret = -EINVAL;
2696 		goto force_wake_put;
2697 	}
2698 
2699 	offset = host_time_us - device_time_us;
2700 	cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
2701 		    host_time_us, device_time_us, offset);
2702 
2703 	low = offset & 0xFFFFFFFF;
2704 	high = offset >> 32;
2705 
2706 	cnss_pci_time_sync_reg_update(pci_priv, low, high);
2707 
2708 force_wake_put:
2709 	cnss_pci_force_wake_put(pci_priv);
2710 allow_l1:
2711 	cnss_pci_allow_l1(dev);
2712 out:
2713 	return ret;
2714 }
2715 
2716 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
2717 {
2718 	struct cnss_pci_data *pci_priv =
2719 		container_of(work, struct cnss_pci_data, time_sync_work.work);
2720 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2721 	unsigned int time_sync_period_ms =
2722 		plat_priv->ctrl_params.time_sync_period;
2723 
2724 	if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
2725 		cnss_pr_dbg("Time sync is disabled\n");
2726 		return;
2727 	}
2728 
2729 	if (!time_sync_period_ms) {
2730 		cnss_pr_dbg("Skip time sync as time period is 0\n");
2731 		return;
2732 	}
2733 
2734 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
2735 		return;
2736 
2737 	if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
2738 		goto runtime_pm_put;
2739 
2740 	mutex_lock(&pci_priv->bus_lock);
2741 	cnss_pci_update_timestamp(pci_priv);
2742 	mutex_unlock(&pci_priv->bus_lock);
2743 	schedule_delayed_work(&pci_priv->time_sync_work,
2744 			      msecs_to_jiffies(time_sync_period_ms));
2745 
2746 runtime_pm_put:
2747 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
2748 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
2749 }
2750 
2751 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
2752 {
2753 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2754 
2755 	switch (pci_priv->device_id) {
2756 	case QCA6390_DEVICE_ID:
2757 	case QCA6490_DEVICE_ID:
2758 	case KIWI_DEVICE_ID:
2759 	case MANGO_DEVICE_ID:
2760 	case PEACH_DEVICE_ID:
2761 		break;
2762 	default:
2763 		return -EOPNOTSUPP;
2764 	}
2765 
2766 	if (!plat_priv->device_freq_hz) {
2767 		cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
2768 		return -EINVAL;
2769 	}
2770 
2771 	cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
2772 
2773 	return 0;
2774 }
2775 
2776 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
2777 {
2778 	switch (pci_priv->device_id) {
2779 	case QCA6390_DEVICE_ID:
2780 	case QCA6490_DEVICE_ID:
2781 	case KIWI_DEVICE_ID:
2782 	case MANGO_DEVICE_ID:
2783 	case PEACH_DEVICE_ID:
2784 		break;
2785 	default:
2786 		return;
2787 	}
2788 
2789 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
2790 }
2791 
2792 int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv,
2793 				  unsigned long thermal_state,
2794 				  int tcdev_id)
2795 {
2796 	if (!pci_priv) {
2797 		cnss_pr_err("pci_priv is NULL!\n");
2798 		return -ENODEV;
2799 	}
2800 
2801 	if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) {
2802 		cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n");
2803 		return -EINVAL;
2804 	}
2805 
2806 	return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev,
2807 							 thermal_state,
2808 							 tcdev_id);
2809 }
2810 
2811 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
2812 				     unsigned int time_sync_period)
2813 {
2814 	struct cnss_plat_data *plat_priv;
2815 
2816 	if (!pci_priv)
2817 		return -ENODEV;
2818 
2819 	plat_priv = pci_priv->plat_priv;
2820 
2821 	cnss_pci_stop_time_sync_update(pci_priv);
2822 	plat_priv->ctrl_params.time_sync_period = time_sync_period;
2823 	cnss_pci_start_time_sync_update(pci_priv);
2824 	cnss_pr_dbg("WLAN time sync period %u ms\n",
2825 		    plat_priv->ctrl_params.time_sync_period);
2826 
2827 	return 0;
2828 }
2829 
2830 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
2831 {
2832 	int ret = 0;
2833 	struct cnss_plat_data *plat_priv;
2834 
2835 	if (!pci_priv)
2836 		return -ENODEV;
2837 
2838 	plat_priv = pci_priv->plat_priv;
2839 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2840 		cnss_pr_err("Reboot is in progress, skip driver probe\n");
2841 		return -EINVAL;
2842 	}
2843 
2844 	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2845 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2846 		cnss_pr_dbg("Skip driver probe\n");
2847 		goto out;
2848 	}
2849 
2850 	if (!pci_priv->driver_ops) {
2851 		cnss_pr_err("driver_ops is NULL\n");
2852 		ret = -EINVAL;
2853 		goto out;
2854 	}
2855 
2856 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2857 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2858 		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
2859 						   pci_priv->pci_device_id);
2860 		if (ret) {
2861 			cnss_pr_err("Failed to reinit host driver, err = %d\n",
2862 				    ret);
2863 			goto out;
2864 		}
2865 		complete(&plat_priv->recovery_complete);
2866 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
2867 		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
2868 						  pci_priv->pci_device_id);
2869 		if (ret) {
2870 			cnss_pr_err("Failed to probe host driver, err = %d\n",
2871 				    ret);
2872 			complete_all(&plat_priv->power_up_complete);
2873 			goto out;
2874 		}
2875 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
2876 		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2877 		cnss_pci_free_blob_mem(pci_priv);
2878 		complete_all(&plat_priv->power_up_complete);
2879 	} else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
2880 			    &plat_priv->driver_state)) {
2881 		ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
2882 			pci_priv->pci_device_id);
2883 		if (ret) {
2884 			cnss_pr_err("Failed to idle restart host driver, err = %d\n",
2885 				    ret);
2886 			plat_priv->power_up_error = ret;
2887 			complete_all(&plat_priv->power_up_complete);
2888 			goto out;
2889 		}
2890 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
2891 		complete_all(&plat_priv->power_up_complete);
2892 	} else {
2893 		complete(&plat_priv->power_up_complete);
2894 	}
2895 
2896 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2897 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2898 		__pm_relax(plat_priv->recovery_ws);
2899 	}
2900 
2901 	cnss_pci_start_time_sync_update(pci_priv);
2902 
2903 	return 0;
2904 
2905 out:
2906 	return ret;
2907 }
2908 
2909 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
2910 {
2911 	struct cnss_plat_data *plat_priv;
2912 	int ret;
2913 
2914 	if (!pci_priv)
2915 		return -ENODEV;
2916 
2917 	plat_priv = pci_priv->plat_priv;
2918 
2919 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
2920 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
2921 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2922 		cnss_pr_dbg("Skip driver remove\n");
2923 		return 0;
2924 	}
2925 
2926 	if (!pci_priv->driver_ops) {
2927 		cnss_pr_err("driver_ops is NULL\n");
2928 		return -EINVAL;
2929 	}
2930 
2931 	cnss_pci_stop_time_sync_update(pci_priv);
2932 
2933 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2934 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2935 		complete(&plat_priv->rddm_complete);
2936 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
2937 	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
2938 		pci_priv->driver_ops->remove(pci_priv->pci_dev);
2939 		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2940 	} else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2941 			    &plat_priv->driver_state)) {
2942 		ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
2943 		if (ret == -EAGAIN) {
2944 			clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2945 				  &plat_priv->driver_state);
2946 			return ret;
2947 		}
2948 	}
2949 
2950 	plat_priv->get_info_cb_ctx = NULL;
2951 	plat_priv->get_info_cb = NULL;
2952 
2953 	return 0;
2954 }
2955 
2956 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
2957 				      int modem_current_status)
2958 {
2959 	struct cnss_wlan_driver *driver_ops;
2960 
2961 	if (!pci_priv)
2962 		return -ENODEV;
2963 
2964 	driver_ops = pci_priv->driver_ops;
2965 	if (!driver_ops || !driver_ops->modem_status)
2966 		return -EINVAL;
2967 
2968 	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
2969 
2970 	return 0;
2971 }
2972 
2973 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
2974 			   enum cnss_driver_status status)
2975 {
2976 	struct cnss_wlan_driver *driver_ops;
2977 
2978 	if (!pci_priv)
2979 		return -ENODEV;
2980 
2981 	driver_ops = pci_priv->driver_ops;
2982 	if (!driver_ops || !driver_ops->update_status)
2983 		return -EINVAL;
2984 
2985 	cnss_pr_dbg("Update driver status: %d\n", status);
2986 
2987 	driver_ops->update_status(pci_priv->pci_dev, status);
2988 
2989 	return 0;
2990 }
2991 
2992 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
2993 				   struct cnss_misc_reg *misc_reg,
2994 				   u32 misc_reg_size,
2995 				   char *reg_name)
2996 {
2997 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2998 	bool do_force_wake_put = true;
2999 	int i;
3000 
3001 	if (!misc_reg)
3002 		return;
3003 
3004 	if (in_interrupt() || irqs_disabled())
3005 		return;
3006 
3007 	if (cnss_pci_check_link_status(pci_priv))
3008 		return;
3009 
3010 	if (cnss_pci_force_wake_get(pci_priv)) {
3011 		/* Continue to dump when device has entered RDDM already */
3012 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3013 			return;
3014 		do_force_wake_put = false;
3015 	}
3016 
3017 	cnss_pr_dbg("Start to dump %s registers\n", reg_name);
3018 
3019 	for (i = 0; i < misc_reg_size; i++) {
3020 		if (!test_bit(pci_priv->misc_reg_dev_mask,
3021 			      &misc_reg[i].dev_mask))
3022 			continue;
3023 
3024 		if (misc_reg[i].wr) {
3025 			if (misc_reg[i].offset ==
3026 			    QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
3027 			    i >= 1)
3028 				misc_reg[i].val =
3029 				QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
3030 				misc_reg[i - 1].val;
3031 			if (cnss_pci_reg_write(pci_priv,
3032 					       misc_reg[i].offset,
3033 					       misc_reg[i].val))
3034 				goto force_wake_put;
3035 			cnss_pr_vdbg("Write 0x%X to 0x%X\n",
3036 				     misc_reg[i].val,
3037 				     misc_reg[i].offset);
3038 
3039 		} else {
3040 			if (cnss_pci_reg_read(pci_priv,
3041 					      misc_reg[i].offset,
3042 					      &misc_reg[i].val))
3043 				goto force_wake_put;
3044 		}
3045 	}
3046 
3047 force_wake_put:
3048 	if (do_force_wake_put)
3049 		cnss_pci_force_wake_put(pci_priv);
3050 }
3051 
3052 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
3053 {
3054 	if (in_interrupt() || irqs_disabled())
3055 		return;
3056 
3057 	if (cnss_pci_check_link_status(pci_priv))
3058 		return;
3059 
3060 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
3061 			       WCSS_REG_SIZE, "wcss");
3062 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
3063 			       PCIE_REG_SIZE, "pcie");
3064 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
3065 			       WLAON_REG_SIZE, "wlaon");
3066 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
3067 			       SYSPM_REG_SIZE, "syspm");
3068 }
3069 
3070 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
3071 {
3072 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
3073 	u32 reg_offset;
3074 	bool do_force_wake_put = true;
3075 
3076 	if (in_interrupt() || irqs_disabled())
3077 		return;
3078 
3079 	if (cnss_pci_check_link_status(pci_priv))
3080 		return;
3081 
3082 	if (!pci_priv->debug_reg) {
3083 		pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
3084 						   sizeof(*pci_priv->debug_reg)
3085 						   * array_size, GFP_KERNEL);
3086 		if (!pci_priv->debug_reg)
3087 			return;
3088 	}
3089 
3090 	if (cnss_pci_force_wake_get(pci_priv))
3091 		do_force_wake_put = false;
3092 
3093 	cnss_pr_dbg("Start to dump shadow registers\n");
3094 
3095 	for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
3096 		reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
3097 		pci_priv->debug_reg[j].offset = reg_offset;
3098 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3099 				      &pci_priv->debug_reg[j].val))
3100 			goto force_wake_put;
3101 	}
3102 
3103 	for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
3104 		reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
3105 		pci_priv->debug_reg[j].offset = reg_offset;
3106 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3107 				      &pci_priv->debug_reg[j].val))
3108 			goto force_wake_put;
3109 	}
3110 
3111 force_wake_put:
3112 	if (do_force_wake_put)
3113 		cnss_pci_force_wake_put(pci_priv);
3114 }
3115 
3116 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
3117 {
3118 	int ret = 0;
3119 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3120 
3121 	ret = cnss_power_on_device(plat_priv, false);
3122 	if (ret) {
3123 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3124 		goto out;
3125 	}
3126 
3127 	ret = cnss_resume_pci_link(pci_priv);
3128 	if (ret) {
3129 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3130 		goto power_off;
3131 	}
3132 
3133 	ret = cnss_pci_call_driver_probe(pci_priv);
3134 	if (ret)
3135 		goto suspend_link;
3136 
3137 	return 0;
3138 suspend_link:
3139 	cnss_suspend_pci_link(pci_priv);
3140 power_off:
3141 	cnss_power_off_device(plat_priv);
3142 out:
3143 	return ret;
3144 }
3145 
3146 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
3147 {
3148 	int ret = 0;
3149 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3150 
3151 	cnss_pci_pm_runtime_resume(pci_priv);
3152 
3153 	ret = cnss_pci_call_driver_remove(pci_priv);
3154 	if (ret == -EAGAIN)
3155 		goto out;
3156 
3157 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3158 				   CNSS_BUS_WIDTH_NONE);
3159 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3160 	cnss_pci_set_auto_suspended(pci_priv, 0);
3161 
3162 	ret = cnss_suspend_pci_link(pci_priv);
3163 	if (ret)
3164 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3165 
3166 	cnss_power_off_device(plat_priv);
3167 
3168 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3169 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3170 
3171 out:
3172 	return ret;
3173 }
3174 
3175 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
3176 {
3177 	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
3178 		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
3179 }
3180 
3181 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
3182 {
3183 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3184 	struct cnss_ramdump_info *ramdump_info;
3185 
3186 	ramdump_info = &plat_priv->ramdump_info;
3187 	if (!ramdump_info->ramdump_size)
3188 		return -EINVAL;
3189 
3190 	return cnss_do_ramdump(plat_priv);
3191 }
3192 
3193 static void cnss_get_driver_mode_update_fw_name(struct cnss_plat_data *plat_priv)
3194 {
3195 	struct cnss_pci_data *pci_priv;
3196 	struct cnss_wlan_driver *driver_ops;
3197 
3198 	pci_priv = plat_priv->bus_priv;
3199 	driver_ops = pci_priv->driver_ops;
3200 
3201 	if (driver_ops && driver_ops->get_driver_mode) {
3202 		plat_priv->driver_mode = driver_ops->get_driver_mode();
3203 		cnss_pci_update_fw_name(pci_priv);
3204 		cnss_pr_dbg("New driver mode is %d", plat_priv->driver_mode);
3205 	}
3206 }
3207 
3208 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
3209 {
3210 	int ret = 0;
3211 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3212 	unsigned int timeout;
3213 	int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
3214 	int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
3215 
3216 	if (plat_priv->ramdump_info_v2.dump_data_valid) {
3217 		cnss_pci_clear_dump_info(pci_priv);
3218 		cnss_pci_power_off_mhi(pci_priv);
3219 		cnss_suspend_pci_link(pci_priv);
3220 		cnss_pci_deinit_mhi(pci_priv);
3221 		cnss_power_off_device(plat_priv);
3222 	}
3223 
3224 	/* Clear QMI send usage count during every power up */
3225 	pci_priv->qmi_send_usage_count = 0;
3226 
3227 	plat_priv->power_up_error = 0;
3228 
3229 	cnss_get_driver_mode_update_fw_name(plat_priv);
3230 retry:
3231 	ret = cnss_power_on_device(plat_priv, false);
3232 	if (ret) {
3233 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3234 		goto out;
3235 	}
3236 
3237 	ret = cnss_resume_pci_link(pci_priv);
3238 	if (ret) {
3239 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3240 		cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3241 			    cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
3242 		if (test_bit(IGNORE_PCI_LINK_FAILURE,
3243 			     &plat_priv->ctrl_params.quirks)) {
3244 			cnss_pr_dbg("Ignore PCI link resume failure\n");
3245 			ret = 0;
3246 			goto out;
3247 		}
3248 		if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
3249 			cnss_power_off_device(plat_priv);
3250 			/* Force toggle BT_EN GPIO low */
3251 			if (retry == POWER_ON_RETRY_MAX_TIMES) {
3252 				cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
3253 					    retry, bt_en_gpio);
3254 				if (bt_en_gpio >= 0)
3255 					gpio_direction_output(bt_en_gpio, 0);
3256 				cnss_pr_dbg("BT_EN GPIO val: %d\n",
3257 					    gpio_get_value(bt_en_gpio));
3258 			}
3259 			cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
3260 			cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3261 				    cnss_get_input_gpio_value(plat_priv,
3262 							      sw_ctrl_gpio));
3263 			msleep(POWER_ON_RETRY_DELAY_MS * retry);
3264 			goto retry;
3265 		}
3266 		/* Assert when it reaches maximum retries */
3267 		CNSS_ASSERT(0);
3268 		goto power_off;
3269 	}
3270 
3271 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
3272 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
3273 
3274 	ret = cnss_pci_start_mhi(pci_priv);
3275 	if (ret) {
3276 		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
3277 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
3278 		    !pci_priv->pci_link_down_ind && timeout) {
3279 			/* Start recovery directly for MHI start failures */
3280 			cnss_schedule_recovery(&pci_priv->pci_dev->dev,
3281 					       CNSS_REASON_DEFAULT);
3282 		}
3283 		return 0;
3284 	}
3285 
3286 	if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
3287 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
3288 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3289 		return 0;
3290 	}
3291 
3292 	cnss_set_pin_connect_status(plat_priv);
3293 
3294 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
3295 		ret = cnss_pci_call_driver_probe(pci_priv);
3296 		if (ret)
3297 			goto stop_mhi;
3298 	} else if (timeout) {
3299 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
3300 			timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
3301 		else
3302 			timeout += WLAN_MISSION_MODE_TIMEOUT;
3303 		mod_timer(&plat_priv->fw_boot_timer,
3304 			  jiffies + msecs_to_jiffies(timeout));
3305 	}
3306 
3307 	return 0;
3308 
3309 stop_mhi:
3310 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
3311 	cnss_pci_power_off_mhi(pci_priv);
3312 	cnss_suspend_pci_link(pci_priv);
3313 	cnss_pci_deinit_mhi(pci_priv);
3314 power_off:
3315 	cnss_power_off_device(plat_priv);
3316 out:
3317 	return ret;
3318 }
3319 
3320 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
3321 {
3322 	int ret = 0;
3323 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3324 	int do_force_wake = true;
3325 
3326 	cnss_pci_pm_runtime_resume(pci_priv);
3327 
3328 	ret = cnss_pci_call_driver_remove(pci_priv);
3329 	if (ret == -EAGAIN)
3330 		goto out;
3331 
3332 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3333 				   CNSS_BUS_WIDTH_NONE);
3334 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3335 	cnss_pci_set_auto_suspended(pci_priv, 0);
3336 
3337 	if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
3338 	     test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3339 	     test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
3340 	     test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
3341 	     test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
3342 	    test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
3343 		del_timer(&pci_priv->dev_rddm_timer);
3344 		cnss_pci_collect_dump_info(pci_priv, false);
3345 
3346 		if (!plat_priv->recovery_enabled)
3347 			CNSS_ASSERT(0);
3348 	}
3349 
3350 	if (!cnss_is_device_powered_on(plat_priv)) {
3351 		cnss_pr_dbg("Device is already powered off, ignore\n");
3352 		goto skip_power_off;
3353 	}
3354 
3355 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3356 		do_force_wake = false;
3357 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
3358 
3359 	/* FBC image will be freed after powering off MHI, so skip
3360 	 * if RAM dump data is still valid.
3361 	 */
3362 	if (plat_priv->ramdump_info_v2.dump_data_valid)
3363 		goto skip_power_off;
3364 
3365 	cnss_pci_power_off_mhi(pci_priv);
3366 	ret = cnss_suspend_pci_link(pci_priv);
3367 	if (ret)
3368 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3369 	cnss_pci_deinit_mhi(pci_priv);
3370 	cnss_power_off_device(plat_priv);
3371 
3372 skip_power_off:
3373 	pci_priv->remap_window = 0;
3374 
3375 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
3376 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
3377 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3378 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
3379 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
3380 		pci_priv->pci_link_down_ind = false;
3381 	}
3382 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3383 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3384 	memset(&print_optimize, 0, sizeof(print_optimize));
3385 
3386 out:
3387 	return ret;
3388 }
3389 
3390 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
3391 {
3392 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3393 
3394 	set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3395 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
3396 		    plat_priv->driver_state);
3397 
3398 	cnss_pci_collect_dump_info(pci_priv, true);
3399 	clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3400 }
3401 
3402 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
3403 {
3404 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3405 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3406 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
3407 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3408 	int ret = 0;
3409 
3410 	if (!info_v2->dump_data_valid || !dump_seg ||
3411 	    dump_data->nentries == 0)
3412 		return 0;
3413 
3414 	ret = cnss_do_elf_ramdump(plat_priv);
3415 
3416 	cnss_pci_clear_dump_info(pci_priv);
3417 	cnss_pci_power_off_mhi(pci_priv);
3418 	cnss_suspend_pci_link(pci_priv);
3419 	cnss_pci_deinit_mhi(pci_priv);
3420 	cnss_power_off_device(plat_priv);
3421 
3422 	return ret;
3423 }
3424 
3425 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
3426 {
3427 	int ret = 0;
3428 
3429 	if (!pci_priv) {
3430 		cnss_pr_err("pci_priv is NULL\n");
3431 		return -ENODEV;
3432 	}
3433 
3434 	switch (pci_priv->device_id) {
3435 	case QCA6174_DEVICE_ID:
3436 		ret = cnss_qca6174_powerup(pci_priv);
3437 		break;
3438 	case QCA6290_DEVICE_ID:
3439 	case QCA6390_DEVICE_ID:
3440 	case QCN7605_DEVICE_ID:
3441 	case QCA6490_DEVICE_ID:
3442 	case KIWI_DEVICE_ID:
3443 	case MANGO_DEVICE_ID:
3444 	case PEACH_DEVICE_ID:
3445 		ret = cnss_qca6290_powerup(pci_priv);
3446 		break;
3447 	default:
3448 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3449 			    pci_priv->device_id);
3450 		ret = -ENODEV;
3451 	}
3452 
3453 	return ret;
3454 }
3455 
3456 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
3457 {
3458 	int ret = 0;
3459 
3460 	if (!pci_priv) {
3461 		cnss_pr_err("pci_priv is NULL\n");
3462 		return -ENODEV;
3463 	}
3464 
3465 	switch (pci_priv->device_id) {
3466 	case QCA6174_DEVICE_ID:
3467 		ret = cnss_qca6174_shutdown(pci_priv);
3468 		break;
3469 	case QCA6290_DEVICE_ID:
3470 	case QCA6390_DEVICE_ID:
3471 	case QCN7605_DEVICE_ID:
3472 	case QCA6490_DEVICE_ID:
3473 	case KIWI_DEVICE_ID:
3474 	case MANGO_DEVICE_ID:
3475 	case PEACH_DEVICE_ID:
3476 		ret = cnss_qca6290_shutdown(pci_priv);
3477 		break;
3478 	default:
3479 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3480 			    pci_priv->device_id);
3481 		ret = -ENODEV;
3482 	}
3483 
3484 	return ret;
3485 }
3486 
3487 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
3488 {
3489 	int ret = 0;
3490 
3491 	if (!pci_priv) {
3492 		cnss_pr_err("pci_priv is NULL\n");
3493 		return -ENODEV;
3494 	}
3495 
3496 	switch (pci_priv->device_id) {
3497 	case QCA6174_DEVICE_ID:
3498 		cnss_qca6174_crash_shutdown(pci_priv);
3499 		break;
3500 	case QCA6290_DEVICE_ID:
3501 	case QCA6390_DEVICE_ID:
3502 	case QCN7605_DEVICE_ID:
3503 	case QCA6490_DEVICE_ID:
3504 	case KIWI_DEVICE_ID:
3505 	case MANGO_DEVICE_ID:
3506 	case PEACH_DEVICE_ID:
3507 		cnss_qca6290_crash_shutdown(pci_priv);
3508 		break;
3509 	default:
3510 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3511 			    pci_priv->device_id);
3512 		ret = -ENODEV;
3513 	}
3514 
3515 	return ret;
3516 }
3517 
3518 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
3519 {
3520 	int ret = 0;
3521 
3522 	if (!pci_priv) {
3523 		cnss_pr_err("pci_priv is NULL\n");
3524 		return -ENODEV;
3525 	}
3526 
3527 	switch (pci_priv->device_id) {
3528 	case QCA6174_DEVICE_ID:
3529 		ret = cnss_qca6174_ramdump(pci_priv);
3530 		break;
3531 	case QCA6290_DEVICE_ID:
3532 	case QCA6390_DEVICE_ID:
3533 	case QCN7605_DEVICE_ID:
3534 	case QCA6490_DEVICE_ID:
3535 	case KIWI_DEVICE_ID:
3536 	case MANGO_DEVICE_ID:
3537 	case PEACH_DEVICE_ID:
3538 		ret = cnss_qca6290_ramdump(pci_priv);
3539 		break;
3540 	default:
3541 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3542 			    pci_priv->device_id);
3543 		ret = -ENODEV;
3544 	}
3545 
3546 	return ret;
3547 }
3548 
3549 int cnss_pci_is_drv_connected(struct device *dev)
3550 {
3551 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
3552 
3553 	if (!pci_priv)
3554 		return -ENODEV;
3555 
3556 	return pci_priv->drv_connected_last;
3557 }
3558 EXPORT_SYMBOL(cnss_pci_is_drv_connected);
3559 
3560 static void cnss_wlan_reg_driver_work(struct work_struct *work)
3561 {
3562 	struct cnss_plat_data *plat_priv =
3563 	container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
3564 	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
3565 	struct cnss_cal_info *cal_info;
3566 	unsigned int timeout;
3567 
3568 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
3569 		return;
3570 
3571 	if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
3572 		goto reg_driver;
3573 	} else {
3574 		if (plat_priv->charger_mode) {
3575 			cnss_pr_err("Ignore calibration timeout in charger mode\n");
3576 			return;
3577 		}
3578 		if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
3579 			      &plat_priv->driver_state)) {
3580 			timeout = cnss_get_timeout(plat_priv,
3581 						   CNSS_TIMEOUT_CALIBRATION);
3582 			cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
3583 				    timeout / 1000);
3584 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3585 					      msecs_to_jiffies(timeout));
3586 			return;
3587 		}
3588 
3589 		del_timer(&plat_priv->fw_boot_timer);
3590 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
3591 		    !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3592 			cnss_pr_err("Timeout waiting for calibration to complete\n");
3593 			CNSS_ASSERT(0);
3594 		}
3595 		cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
3596 		if (!cal_info)
3597 			return;
3598 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
3599 		cnss_driver_event_post(plat_priv,
3600 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
3601 				       0, cal_info);
3602 	}
3603 reg_driver:
3604 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3605 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3606 		return;
3607 	}
3608 	reinit_completion(&plat_priv->power_up_complete);
3609 	cnss_driver_event_post(plat_priv,
3610 			       CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3611 			       CNSS_EVENT_SYNC_UNKILLABLE,
3612 			       pci_priv->driver_ops);
3613 }
3614 
3615 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
3616 {
3617 	int ret = 0;
3618 	struct cnss_plat_data *plat_priv;
3619 	struct cnss_pci_data *pci_priv;
3620 	const struct pci_device_id *id_table = driver_ops->id_table;
3621 	unsigned int timeout;
3622 
3623 	if (!cnss_check_driver_loading_allowed()) {
3624 		cnss_pr_info("No cnss2 dtsi entry present");
3625 		return -ENODEV;
3626 	}
3627 
3628 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3629 
3630 	if (!plat_priv) {
3631 		cnss_pr_buf("plat_priv is not ready for register driver\n");
3632 		return -EAGAIN;
3633 	}
3634 
3635 	pci_priv = plat_priv->bus_priv;
3636 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
3637 		while (id_table && id_table->device) {
3638 			if (plat_priv->device_id == id_table->device) {
3639 				if (plat_priv->device_id == KIWI_DEVICE_ID &&
3640 				    driver_ops->chip_version != 2) {
3641 					cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
3642 					return -ENODEV;
3643 				}
3644 				cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
3645 					     id_table->device);
3646 				plat_priv->driver_ops = driver_ops;
3647 				return 0;
3648 			}
3649 			id_table++;
3650 		}
3651 		return -ENODEV;
3652 	}
3653 
3654 	if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
3655 		cnss_pr_info("pci probe not yet done for register driver\n");
3656 		return -EAGAIN;
3657 	}
3658 
3659 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
3660 		cnss_pr_err("Driver has already registered\n");
3661 		return -EEXIST;
3662 	}
3663 
3664 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3665 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3666 		return -EINVAL;
3667 	}
3668 
3669 	if (!id_table || !pci_dev_present(id_table)) {
3670 		/* id_table pointer will move from pci_dev_present(),
3671 		 * so check again using local pointer.
3672 		 */
3673 		id_table = driver_ops->id_table;
3674 		while (id_table && id_table->vendor) {
3675 			cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
3676 				     id_table->device);
3677 			id_table++;
3678 		}
3679 		cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
3680 			    pci_priv->device_id);
3681 		return -ENODEV;
3682 	}
3683 
3684 	if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
3685 	    driver_ops->chip_version != plat_priv->device_version.major_version) {
3686 		cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
3687 			    driver_ops->chip_version,
3688 			    plat_priv->device_version.major_version);
3689 		return -ENODEV;
3690 	}
3691 
3692 	cnss_get_driver_mode_update_fw_name(plat_priv);
3693 	set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
3694 
3695 	if (!plat_priv->cbc_enabled ||
3696 	    test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
3697 		goto register_driver;
3698 
3699 	pci_priv->driver_ops = driver_ops;
3700 	/* If Cold Boot Calibration is enabled, it is the 1st step in init
3701 	 * sequence.CBC is done on file system_ready trigger. Qcacld will be
3702 	 * loaded from vendor_modprobe.sh at early boot and must be deferred
3703 	 * until CBC is complete
3704 	 */
3705 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
3706 	INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
3707 			  cnss_wlan_reg_driver_work);
3708 	schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3709 			      msecs_to_jiffies(timeout));
3710 	cnss_pr_info("WLAN register driver deferred for Calibration\n");
3711 	return 0;
3712 register_driver:
3713 	reinit_completion(&plat_priv->power_up_complete);
3714 	ret = cnss_driver_event_post(plat_priv,
3715 				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3716 				     CNSS_EVENT_SYNC_UNKILLABLE,
3717 				     driver_ops);
3718 
3719 	return ret;
3720 }
3721 EXPORT_SYMBOL(cnss_wlan_register_driver);
3722 
3723 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
3724 {
3725 	struct cnss_plat_data *plat_priv;
3726 	int ret = 0;
3727 	unsigned int timeout;
3728 
3729 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3730 	if (!plat_priv) {
3731 		cnss_pr_err("plat_priv is NULL\n");
3732 		return;
3733 	}
3734 
3735 	mutex_lock(&plat_priv->driver_ops_lock);
3736 
3737 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
3738 		goto skip_wait_power_up;
3739 
3740 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
3741 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
3742 					  msecs_to_jiffies(timeout));
3743 	if (!ret) {
3744 		cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
3745 			    timeout);
3746 		CNSS_ASSERT(0);
3747 	}
3748 
3749 skip_wait_power_up:
3750 	if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3751 	    !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3752 		goto skip_wait_recovery;
3753 
3754 	reinit_completion(&plat_priv->recovery_complete);
3755 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
3756 	ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
3757 					  msecs_to_jiffies(timeout));
3758 	if (!ret) {
3759 		cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
3760 			    timeout);
3761 		CNSS_ASSERT(0);
3762 	}
3763 
3764 skip_wait_recovery:
3765 	cnss_driver_event_post(plat_priv,
3766 			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3767 			       CNSS_EVENT_SYNC_UNKILLABLE, NULL);
3768 
3769 	mutex_unlock(&plat_priv->driver_ops_lock);
3770 }
3771 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
3772 
3773 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
3774 				  void *data)
3775 {
3776 	int ret = 0;
3777 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3778 
3779 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3780 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
3781 		return -EINVAL;
3782 	}
3783 
3784 	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3785 	pci_priv->driver_ops = data;
3786 
3787 	ret = cnss_pci_dev_powerup(pci_priv);
3788 	if (ret) {
3789 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3790 		pci_priv->driver_ops = NULL;
3791 	} else {
3792 		set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3793 	}
3794 
3795 	return ret;
3796 }
3797 
3798 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
3799 {
3800 	struct cnss_plat_data *plat_priv;
3801 
3802 	if (!pci_priv)
3803 		return -EINVAL;
3804 
3805 	plat_priv = pci_priv->plat_priv;
3806 	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3807 	cnss_pci_dev_shutdown(pci_priv);
3808 	pci_priv->driver_ops = NULL;
3809 	clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3810 
3811 	return 0;
3812 }
3813 
3814 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
3815 {
3816 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3817 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3818 	int ret = 0;
3819 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3820 
3821 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
3822 
3823 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3824 	    driver_ops && driver_ops->suspend) {
3825 		ret = driver_ops->suspend(pci_dev, state);
3826 		if (ret) {
3827 			cnss_pr_err("Failed to suspend host driver, err = %d\n",
3828 				    ret);
3829 			ret = -EAGAIN;
3830 		}
3831 	}
3832 
3833 	return ret;
3834 }
3835 
3836 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
3837 {
3838 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3839 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3840 	int ret = 0;
3841 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3842 
3843 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3844 	    driver_ops && driver_ops->resume) {
3845 		ret = driver_ops->resume(pci_dev);
3846 		if (ret)
3847 			cnss_pr_err("Failed to resume host driver, err = %d\n",
3848 				    ret);
3849 	}
3850 
3851 	return ret;
3852 }
3853 
3854 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
3855 {
3856 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3857 	int ret = 0;
3858 
3859 	if (pci_priv->pci_link_state == PCI_LINK_DOWN)
3860 		goto out;
3861 
3862 	if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
3863 		ret = -EAGAIN;
3864 		goto out;
3865 	}
3866 
3867 	if (pci_priv->drv_connected_last)
3868 		goto skip_disable_pci;
3869 
3870 	pci_clear_master(pci_dev);
3871 	cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
3872 	pci_disable_device(pci_dev);
3873 
3874 	ret = pci_set_power_state(pci_dev, PCI_D3hot);
3875 	if (ret)
3876 		cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
3877 
3878 skip_disable_pci:
3879 	if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
3880 		ret = -EAGAIN;
3881 		goto resume_mhi;
3882 	}
3883 	pci_priv->pci_link_state = PCI_LINK_DOWN;
3884 
3885 	return 0;
3886 
3887 resume_mhi:
3888 	if (!pci_is_enabled(pci_dev))
3889 		if (pci_enable_device(pci_dev))
3890 			cnss_pr_err("Failed to enable PCI device\n");
3891 	if (pci_priv->saved_state)
3892 		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
3893 	pci_set_master(pci_dev);
3894 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3895 out:
3896 	return ret;
3897 }
3898 
3899 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
3900 {
3901 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3902 	int ret = 0;
3903 
3904 	if (pci_priv->pci_link_state == PCI_LINK_UP)
3905 		goto out;
3906 
3907 	if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
3908 		cnss_fatal_err("Failed to resume PCI link from suspend\n");
3909 		cnss_pci_link_down(&pci_dev->dev);
3910 		ret = -EAGAIN;
3911 		goto out;
3912 	}
3913 
3914 	pci_priv->pci_link_state = PCI_LINK_UP;
3915 
3916 	if (pci_priv->drv_connected_last)
3917 		goto skip_enable_pci;
3918 
3919 	ret = pci_enable_device(pci_dev);
3920 	if (ret) {
3921 		cnss_pr_err("Failed to enable PCI device, err = %d\n",
3922 			    ret);
3923 		goto out;
3924 	}
3925 
3926 	if (pci_priv->saved_state)
3927 		cnss_set_pci_config_space(pci_priv,
3928 					  RESTORE_PCI_CONFIG_SPACE);
3929 	pci_set_master(pci_dev);
3930 
3931 skip_enable_pci:
3932 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3933 out:
3934 	return ret;
3935 }
3936 
3937 static int cnss_pci_suspend(struct device *dev)
3938 {
3939 	int ret = 0;
3940 	struct pci_dev *pci_dev = to_pci_dev(dev);
3941 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3942 	struct cnss_plat_data *plat_priv;
3943 
3944 	if (!pci_priv)
3945 		goto out;
3946 
3947 	plat_priv = pci_priv->plat_priv;
3948 	if (!plat_priv)
3949 		goto out;
3950 
3951 	if (!cnss_is_device_powered_on(plat_priv))
3952 		goto out;
3953 
3954 	/* No mhi state bit set if only finish pcie enumeration,
3955 	 * so test_bit is not applicable to check if it is INIT state.
3956 	 */
3957 	if (pci_priv->mhi_state == CNSS_MHI_INIT) {
3958 		bool suspend = cnss_should_suspend_pwroff(pci_dev);
3959 
3960 		/* Do PCI link suspend and power off in the LPM case
3961 		 * if chipset didn't do that after pcie enumeration.
3962 		 */
3963 		if (!suspend) {
3964 			ret = cnss_suspend_pci_link(pci_priv);
3965 			if (ret)
3966 				cnss_pr_err("Failed to suspend PCI link, err = %d\n",
3967 					    ret);
3968 			cnss_power_off_device(plat_priv);
3969 			goto out;
3970 		}
3971 	}
3972 
3973 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
3974 	    pci_priv->drv_supported) {
3975 		pci_priv->drv_connected_last =
3976 			cnss_pci_get_drv_connected(pci_priv);
3977 		if (!pci_priv->drv_connected_last) {
3978 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
3979 			ret = -EAGAIN;
3980 			goto out;
3981 		}
3982 	}
3983 
3984 	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3985 
3986 	ret = cnss_pci_suspend_driver(pci_priv);
3987 	if (ret)
3988 		goto clear_flag;
3989 
3990 	if (!pci_priv->disable_pc) {
3991 		mutex_lock(&pci_priv->bus_lock);
3992 		ret = cnss_pci_suspend_bus(pci_priv);
3993 		mutex_unlock(&pci_priv->bus_lock);
3994 		if (ret)
3995 			goto resume_driver;
3996 	}
3997 
3998 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3999 
4000 	return 0;
4001 
4002 resume_driver:
4003 	cnss_pci_resume_driver(pci_priv);
4004 clear_flag:
4005 	pci_priv->drv_connected_last = 0;
4006 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4007 out:
4008 	return ret;
4009 }
4010 
4011 static int cnss_pci_resume(struct device *dev)
4012 {
4013 	int ret = 0;
4014 	struct pci_dev *pci_dev = to_pci_dev(dev);
4015 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4016 	struct cnss_plat_data *plat_priv;
4017 
4018 	if (!pci_priv)
4019 		goto out;
4020 
4021 	plat_priv = pci_priv->plat_priv;
4022 	if (!plat_priv)
4023 		goto out;
4024 
4025 	if (pci_priv->pci_link_down_ind)
4026 		goto out;
4027 
4028 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4029 		goto out;
4030 
4031 	if (!pci_priv->disable_pc) {
4032 		mutex_lock(&pci_priv->bus_lock);
4033 		ret = cnss_pci_resume_bus(pci_priv);
4034 		mutex_unlock(&pci_priv->bus_lock);
4035 		if (ret)
4036 			goto out;
4037 	}
4038 
4039 	ret = cnss_pci_resume_driver(pci_priv);
4040 
4041 	pci_priv->drv_connected_last = 0;
4042 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4043 
4044 out:
4045 	return ret;
4046 }
4047 
4048 static int cnss_pci_suspend_noirq(struct device *dev)
4049 {
4050 	int ret = 0;
4051 	struct pci_dev *pci_dev = to_pci_dev(dev);
4052 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4053 	struct cnss_wlan_driver *driver_ops;
4054 	struct cnss_plat_data *plat_priv;
4055 
4056 	if (!pci_priv)
4057 		goto out;
4058 
4059 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4060 		goto out;
4061 
4062 	driver_ops = pci_priv->driver_ops;
4063 	plat_priv = pci_priv->plat_priv;
4064 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4065 	    driver_ops && driver_ops->suspend_noirq)
4066 		ret = driver_ops->suspend_noirq(pci_dev);
4067 
4068 	if (pci_priv->disable_pc && !pci_dev->state_saved &&
4069 	    !pci_priv->plat_priv->use_pm_domain)
4070 		pci_save_state(pci_dev);
4071 
4072 out:
4073 	return ret;
4074 }
4075 
4076 static int cnss_pci_resume_noirq(struct device *dev)
4077 {
4078 	int ret = 0;
4079 	struct pci_dev *pci_dev = to_pci_dev(dev);
4080 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4081 	struct cnss_wlan_driver *driver_ops;
4082 	struct cnss_plat_data *plat_priv;
4083 
4084 	if (!pci_priv)
4085 		goto out;
4086 
4087 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4088 		goto out;
4089 
4090 	plat_priv = pci_priv->plat_priv;
4091 	driver_ops = pci_priv->driver_ops;
4092 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4093 	    driver_ops && driver_ops->resume_noirq &&
4094 	    !pci_priv->pci_link_down_ind)
4095 		ret = driver_ops->resume_noirq(pci_dev);
4096 
4097 out:
4098 	return ret;
4099 }
4100 
4101 static int cnss_pci_runtime_suspend(struct device *dev)
4102 {
4103 	int ret = 0;
4104 	struct pci_dev *pci_dev = to_pci_dev(dev);
4105 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4106 	struct cnss_plat_data *plat_priv;
4107 	struct cnss_wlan_driver *driver_ops;
4108 
4109 	if (!pci_priv)
4110 		return -EAGAIN;
4111 
4112 	plat_priv = pci_priv->plat_priv;
4113 	if (!plat_priv)
4114 		return -EAGAIN;
4115 
4116 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4117 		return -EAGAIN;
4118 
4119 	if (pci_priv->pci_link_down_ind) {
4120 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4121 		return -EAGAIN;
4122 	}
4123 
4124 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4125 	    pci_priv->drv_supported) {
4126 		pci_priv->drv_connected_last =
4127 			cnss_pci_get_drv_connected(pci_priv);
4128 		if (!pci_priv->drv_connected_last) {
4129 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4130 			return -EAGAIN;
4131 		}
4132 	}
4133 
4134 	cnss_pr_vdbg("Runtime suspend start\n");
4135 
4136 	driver_ops = pci_priv->driver_ops;
4137 	if (driver_ops && driver_ops->runtime_ops &&
4138 	    driver_ops->runtime_ops->runtime_suspend)
4139 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
4140 	else
4141 		ret = cnss_auto_suspend(dev);
4142 
4143 	if (ret)
4144 		pci_priv->drv_connected_last = 0;
4145 
4146 	cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
4147 
4148 	return ret;
4149 }
4150 
4151 static int cnss_pci_runtime_resume(struct device *dev)
4152 {
4153 	int ret = 0;
4154 	struct pci_dev *pci_dev = to_pci_dev(dev);
4155 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4156 	struct cnss_wlan_driver *driver_ops;
4157 
4158 	if (!pci_priv)
4159 		return -EAGAIN;
4160 
4161 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4162 		return -EAGAIN;
4163 
4164 	if (pci_priv->pci_link_down_ind) {
4165 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4166 		return -EAGAIN;
4167 	}
4168 
4169 	cnss_pr_vdbg("Runtime resume start\n");
4170 
4171 	driver_ops = pci_priv->driver_ops;
4172 	if (driver_ops && driver_ops->runtime_ops &&
4173 	    driver_ops->runtime_ops->runtime_resume)
4174 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
4175 	else
4176 		ret = cnss_auto_resume(dev);
4177 
4178 	if (!ret)
4179 		pci_priv->drv_connected_last = 0;
4180 
4181 	cnss_pr_vdbg("Runtime resume status: %d\n", ret);
4182 
4183 	return ret;
4184 }
4185 
4186 static int cnss_pci_runtime_idle(struct device *dev)
4187 {
4188 	cnss_pr_vdbg("Runtime idle\n");
4189 
4190 	pm_request_autosuspend(dev);
4191 
4192 	return -EBUSY;
4193 }
4194 
4195 int cnss_wlan_pm_control(struct device *dev, bool vote)
4196 {
4197 	struct pci_dev *pci_dev = to_pci_dev(dev);
4198 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4199 	int ret = 0;
4200 
4201 	if (!pci_priv)
4202 		return -ENODEV;
4203 
4204 	ret = cnss_pci_disable_pc(pci_priv, vote);
4205 	if (ret)
4206 		return ret;
4207 
4208 	pci_priv->disable_pc = vote;
4209 	cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
4210 
4211 	return 0;
4212 }
4213 EXPORT_SYMBOL(cnss_wlan_pm_control);
4214 
4215 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
4216 					   enum cnss_rtpm_id id)
4217 {
4218 	if (id >= RTPM_ID_MAX)
4219 		return;
4220 
4221 	atomic_inc(&pci_priv->pm_stats.runtime_get);
4222 	atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
4223 	pci_priv->pm_stats.runtime_get_timestamp_id[id] =
4224 		cnss_get_host_timestamp(pci_priv->plat_priv);
4225 }
4226 
4227 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
4228 					   enum cnss_rtpm_id id)
4229 {
4230 	if (id >= RTPM_ID_MAX)
4231 		return;
4232 
4233 	atomic_inc(&pci_priv->pm_stats.runtime_put);
4234 	atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
4235 	pci_priv->pm_stats.runtime_put_timestamp_id[id] =
4236 		cnss_get_host_timestamp(pci_priv->plat_priv);
4237 }
4238 
4239 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
4240 {
4241 	struct device *dev;
4242 
4243 	if (!pci_priv)
4244 		return;
4245 
4246 	dev = &pci_priv->pci_dev->dev;
4247 
4248 	cnss_pr_dbg("Runtime PM usage count: %d\n",
4249 		    atomic_read(&dev->power.usage_count));
4250 }
4251 
4252 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
4253 {
4254 	struct device *dev;
4255 	enum rpm_status status;
4256 
4257 	if (!pci_priv)
4258 		return -ENODEV;
4259 
4260 	dev = &pci_priv->pci_dev->dev;
4261 
4262 	status = dev->power.runtime_status;
4263 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4264 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4265 			     (void *)_RET_IP_);
4266 
4267 	return pm_request_resume(dev);
4268 }
4269 
4270 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
4271 {
4272 	struct device *dev;
4273 	enum rpm_status status;
4274 
4275 	if (!pci_priv)
4276 		return -ENODEV;
4277 
4278 	dev = &pci_priv->pci_dev->dev;
4279 
4280 	status = dev->power.runtime_status;
4281 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4282 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4283 			     (void *)_RET_IP_);
4284 
4285 	return pm_runtime_resume(dev);
4286 }
4287 
4288 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
4289 			    enum cnss_rtpm_id id)
4290 {
4291 	struct device *dev;
4292 	enum rpm_status status;
4293 
4294 	if (!pci_priv)
4295 		return -ENODEV;
4296 
4297 	dev = &pci_priv->pci_dev->dev;
4298 
4299 	status = dev->power.runtime_status;
4300 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4301 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4302 			     (void *)_RET_IP_);
4303 
4304 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4305 
4306 	return pm_runtime_get(dev);
4307 }
4308 
4309 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
4310 				 enum cnss_rtpm_id id)
4311 {
4312 	struct device *dev;
4313 	enum rpm_status status;
4314 
4315 	if (!pci_priv)
4316 		return -ENODEV;
4317 
4318 	dev = &pci_priv->pci_dev->dev;
4319 
4320 	status = dev->power.runtime_status;
4321 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4322 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4323 			     (void *)_RET_IP_);
4324 
4325 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4326 
4327 	return pm_runtime_get_sync(dev);
4328 }
4329 
4330 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
4331 				      enum cnss_rtpm_id id)
4332 {
4333 	if (!pci_priv)
4334 		return;
4335 
4336 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4337 	pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
4338 }
4339 
4340 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
4341 					enum cnss_rtpm_id id)
4342 {
4343 	struct device *dev;
4344 
4345 	if (!pci_priv)
4346 		return -ENODEV;
4347 
4348 	dev = &pci_priv->pci_dev->dev;
4349 
4350 	if (atomic_read(&dev->power.usage_count) == 0) {
4351 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4352 		return -EINVAL;
4353 	}
4354 
4355 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4356 
4357 	return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
4358 }
4359 
4360 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
4361 				    enum cnss_rtpm_id id)
4362 {
4363 	struct device *dev;
4364 
4365 	if (!pci_priv)
4366 		return;
4367 
4368 	dev = &pci_priv->pci_dev->dev;
4369 
4370 	if (atomic_read(&dev->power.usage_count) == 0) {
4371 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4372 		return;
4373 	}
4374 
4375 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4376 	pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
4377 }
4378 
4379 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
4380 {
4381 	if (!pci_priv)
4382 		return;
4383 
4384 	pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
4385 }
4386 
4387 int cnss_auto_suspend(struct device *dev)
4388 {
4389 	int ret = 0;
4390 	struct pci_dev *pci_dev = to_pci_dev(dev);
4391 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4392 	struct cnss_plat_data *plat_priv;
4393 
4394 	if (!pci_priv)
4395 		return -ENODEV;
4396 
4397 	plat_priv = pci_priv->plat_priv;
4398 	if (!plat_priv)
4399 		return -ENODEV;
4400 
4401 	mutex_lock(&pci_priv->bus_lock);
4402 	if (!pci_priv->qmi_send_usage_count) {
4403 		ret = cnss_pci_suspend_bus(pci_priv);
4404 		if (ret) {
4405 			mutex_unlock(&pci_priv->bus_lock);
4406 			return ret;
4407 		}
4408 	}
4409 
4410 	cnss_pci_set_auto_suspended(pci_priv, 1);
4411 	mutex_unlock(&pci_priv->bus_lock);
4412 
4413 	cnss_pci_set_monitor_wake_intr(pci_priv, true);
4414 
4415 	/* For suspend temporarily set bandwidth vote to NONE and dont save in
4416 	 * current_bw_vote as in resume path we should vote for last used
4417 	 * bandwidth vote. Also ignore error if bw voting is not setup.
4418 	 */
4419 	cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
4420 	return 0;
4421 }
4422 EXPORT_SYMBOL(cnss_auto_suspend);
4423 
4424 int cnss_auto_resume(struct device *dev)
4425 {
4426 	int ret = 0;
4427 	struct pci_dev *pci_dev = to_pci_dev(dev);
4428 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4429 	struct cnss_plat_data *plat_priv;
4430 
4431 	if (!pci_priv)
4432 		return -ENODEV;
4433 
4434 	plat_priv = pci_priv->plat_priv;
4435 	if (!plat_priv)
4436 		return -ENODEV;
4437 
4438 	mutex_lock(&pci_priv->bus_lock);
4439 	ret = cnss_pci_resume_bus(pci_priv);
4440 	if (ret) {
4441 		mutex_unlock(&pci_priv->bus_lock);
4442 		return ret;
4443 	}
4444 
4445 	cnss_pci_set_auto_suspended(pci_priv, 0);
4446 	mutex_unlock(&pci_priv->bus_lock);
4447 
4448 	cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
4449 
4450 	return 0;
4451 }
4452 EXPORT_SYMBOL(cnss_auto_resume);
4453 
4454 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
4455 {
4456 	struct pci_dev *pci_dev = to_pci_dev(dev);
4457 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4458 	struct cnss_plat_data *plat_priv;
4459 	struct mhi_controller *mhi_ctrl;
4460 
4461 	if (!pci_priv)
4462 		return -ENODEV;
4463 
4464 	switch (pci_priv->device_id) {
4465 	case QCA6390_DEVICE_ID:
4466 	case QCA6490_DEVICE_ID:
4467 	case KIWI_DEVICE_ID:
4468 	case MANGO_DEVICE_ID:
4469 	case PEACH_DEVICE_ID:
4470 		break;
4471 	default:
4472 		return 0;
4473 	}
4474 
4475 	mhi_ctrl = pci_priv->mhi_ctrl;
4476 	if (!mhi_ctrl)
4477 		return -EINVAL;
4478 
4479 	plat_priv = pci_priv->plat_priv;
4480 	if (!plat_priv)
4481 		return -ENODEV;
4482 
4483 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4484 		return -EAGAIN;
4485 
4486 	if (timeout_us) {
4487 		/* Busy wait for timeout_us */
4488 		return cnss_mhi_device_get_sync_atomic(pci_priv,
4489 						       timeout_us, false);
4490 	} else {
4491 		/* Sleep wait for mhi_ctrl->timeout_ms */
4492 		return mhi_device_get_sync(mhi_ctrl->mhi_dev);
4493 	}
4494 }
4495 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
4496 
4497 int cnss_pci_force_wake_request(struct device *dev)
4498 {
4499 	struct pci_dev *pci_dev = to_pci_dev(dev);
4500 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4501 	struct cnss_plat_data *plat_priv;
4502 	struct mhi_controller *mhi_ctrl;
4503 
4504 	if (!pci_priv)
4505 		return -ENODEV;
4506 
4507 	switch (pci_priv->device_id) {
4508 	case QCA6390_DEVICE_ID:
4509 	case QCA6490_DEVICE_ID:
4510 	case KIWI_DEVICE_ID:
4511 	case MANGO_DEVICE_ID:
4512 	case PEACH_DEVICE_ID:
4513 		break;
4514 	default:
4515 		return 0;
4516 	}
4517 
4518 	mhi_ctrl = pci_priv->mhi_ctrl;
4519 	if (!mhi_ctrl)
4520 		return -EINVAL;
4521 
4522 	plat_priv = pci_priv->plat_priv;
4523 	if (!plat_priv)
4524 		return -ENODEV;
4525 
4526 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4527 		return -EAGAIN;
4528 
4529 	mhi_device_get(mhi_ctrl->mhi_dev);
4530 
4531 	return 0;
4532 }
4533 EXPORT_SYMBOL(cnss_pci_force_wake_request);
4534 
4535 int cnss_pci_is_device_awake(struct device *dev)
4536 {
4537 	struct pci_dev *pci_dev = to_pci_dev(dev);
4538 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4539 	struct mhi_controller *mhi_ctrl;
4540 
4541 	if (!pci_priv)
4542 		return -ENODEV;
4543 
4544 	switch (pci_priv->device_id) {
4545 	case QCA6390_DEVICE_ID:
4546 	case QCA6490_DEVICE_ID:
4547 	case KIWI_DEVICE_ID:
4548 	case MANGO_DEVICE_ID:
4549 	case PEACH_DEVICE_ID:
4550 		break;
4551 	default:
4552 		return 0;
4553 	}
4554 
4555 	mhi_ctrl = pci_priv->mhi_ctrl;
4556 	if (!mhi_ctrl)
4557 		return -EINVAL;
4558 
4559 	return (mhi_ctrl->dev_state == MHI_STATE_M0);
4560 }
4561 EXPORT_SYMBOL(cnss_pci_is_device_awake);
4562 
4563 int cnss_pci_force_wake_release(struct device *dev)
4564 {
4565 	struct pci_dev *pci_dev = to_pci_dev(dev);
4566 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4567 	struct cnss_plat_data *plat_priv;
4568 	struct mhi_controller *mhi_ctrl;
4569 
4570 	if (!pci_priv)
4571 		return -ENODEV;
4572 
4573 	switch (pci_priv->device_id) {
4574 	case QCA6390_DEVICE_ID:
4575 	case QCA6490_DEVICE_ID:
4576 	case KIWI_DEVICE_ID:
4577 	case MANGO_DEVICE_ID:
4578 	case PEACH_DEVICE_ID:
4579 		break;
4580 	default:
4581 		return 0;
4582 	}
4583 
4584 	mhi_ctrl = pci_priv->mhi_ctrl;
4585 	if (!mhi_ctrl)
4586 		return -EINVAL;
4587 
4588 	plat_priv = pci_priv->plat_priv;
4589 	if (!plat_priv)
4590 		return -ENODEV;
4591 
4592 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4593 		return -EAGAIN;
4594 
4595 	mhi_device_put(mhi_ctrl->mhi_dev);
4596 
4597 	return 0;
4598 }
4599 EXPORT_SYMBOL(cnss_pci_force_wake_release);
4600 
4601 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
4602 {
4603 	int ret = 0;
4604 
4605 	if (!pci_priv)
4606 		return -ENODEV;
4607 
4608 	mutex_lock(&pci_priv->bus_lock);
4609 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4610 	    !pci_priv->qmi_send_usage_count)
4611 		ret = cnss_pci_resume_bus(pci_priv);
4612 	pci_priv->qmi_send_usage_count++;
4613 	cnss_pr_buf("Increased QMI send usage count to %d\n",
4614 		    pci_priv->qmi_send_usage_count);
4615 	mutex_unlock(&pci_priv->bus_lock);
4616 
4617 	return ret;
4618 }
4619 
4620 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
4621 {
4622 	int ret = 0;
4623 
4624 	if (!pci_priv)
4625 		return -ENODEV;
4626 
4627 	mutex_lock(&pci_priv->bus_lock);
4628 	if (pci_priv->qmi_send_usage_count)
4629 		pci_priv->qmi_send_usage_count--;
4630 	cnss_pr_buf("Decreased QMI send usage count to %d\n",
4631 		    pci_priv->qmi_send_usage_count);
4632 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4633 	    !pci_priv->qmi_send_usage_count &&
4634 	    !cnss_pcie_is_device_down(pci_priv))
4635 		ret = cnss_pci_suspend_bus(pci_priv);
4636 	mutex_unlock(&pci_priv->bus_lock);
4637 
4638 	return ret;
4639 }
4640 
4641 int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb,
4642 			       uint32_t len, uint8_t slotid)
4643 {
4644 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4645 	struct cnss_fw_mem *fw_mem;
4646 	void *mem = NULL;
4647 	int i, ret;
4648 	u32 *status;
4649 
4650 	if (!plat_priv)
4651 		return -EINVAL;
4652 
4653 	fw_mem = plat_priv->fw_mem;
4654 	if (slotid >= AFC_MAX_SLOT) {
4655 		cnss_pr_err("Invalid slot id %d\n", slotid);
4656 		ret = -EINVAL;
4657 		goto err;
4658 	}
4659 	if (len > AFC_SLOT_SIZE) {
4660 		cnss_pr_err("len %d greater than slot size", len);
4661 		ret = -EINVAL;
4662 		goto err;
4663 	}
4664 
4665 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4666 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4667 			mem = fw_mem[i].va;
4668 			status = mem + (slotid * AFC_SLOT_SIZE);
4669 			break;
4670 		}
4671 	}
4672 
4673 	if (!mem) {
4674 		cnss_pr_err("AFC mem is not available\n");
4675 		ret = -ENOMEM;
4676 		goto err;
4677 	}
4678 
4679 	memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
4680 	if (len < AFC_SLOT_SIZE)
4681 		memset(mem + (slotid * AFC_SLOT_SIZE) + len,
4682 		       0, AFC_SLOT_SIZE - len);
4683 	status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
4684 
4685 	return 0;
4686 err:
4687 	return ret;
4688 }
4689 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
4690 
4691 int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
4692 {
4693 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4694 	struct cnss_fw_mem *fw_mem;
4695 	void *mem = NULL;
4696 	int i, ret;
4697 
4698 	if (!plat_priv)
4699 		return -EINVAL;
4700 
4701 	fw_mem = plat_priv->fw_mem;
4702 	if (slotid >= AFC_MAX_SLOT) {
4703 		cnss_pr_err("Invalid slot id %d\n", slotid);
4704 		ret = -EINVAL;
4705 		goto err;
4706 	}
4707 
4708 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4709 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4710 			mem = fw_mem[i].va;
4711 			break;
4712 		}
4713 	}
4714 
4715 	if (!mem) {
4716 		cnss_pr_err("AFC mem is not available\n");
4717 		ret = -ENOMEM;
4718 		goto err;
4719 	}
4720 
4721 	memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
4722 	return 0;
4723 
4724 err:
4725 	return ret;
4726 }
4727 EXPORT_SYMBOL(cnss_reset_afcmem);
4728 
4729 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
4730 {
4731 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4732 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4733 	struct device *dev = &pci_priv->pci_dev->dev;
4734 	int i;
4735 
4736 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4737 		if (!fw_mem[i].va && fw_mem[i].size) {
4738 retry:
4739 			fw_mem[i].va =
4740 				dma_alloc_attrs(dev, fw_mem[i].size,
4741 						&fw_mem[i].pa, GFP_KERNEL,
4742 						fw_mem[i].attrs);
4743 
4744 			if (!fw_mem[i].va) {
4745 				if ((fw_mem[i].attrs &
4746 				    DMA_ATTR_FORCE_CONTIGUOUS)) {
4747 					fw_mem[i].attrs &=
4748 						~DMA_ATTR_FORCE_CONTIGUOUS;
4749 
4750 					cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
4751 						    fw_mem[i].type);
4752 					goto retry;
4753 				}
4754 				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
4755 					    fw_mem[i].size, fw_mem[i].type);
4756 				CNSS_ASSERT(0);
4757 				return -ENOMEM;
4758 			}
4759 		}
4760 	}
4761 
4762 	return 0;
4763 }
4764 
4765 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
4766 {
4767 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4768 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4769 	struct device *dev = &pci_priv->pci_dev->dev;
4770 	int i;
4771 
4772 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4773 		if (fw_mem[i].va && fw_mem[i].size) {
4774 			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
4775 				    fw_mem[i].va, &fw_mem[i].pa,
4776 				    fw_mem[i].size, fw_mem[i].type);
4777 			dma_free_attrs(dev, fw_mem[i].size,
4778 				       fw_mem[i].va, fw_mem[i].pa,
4779 				       fw_mem[i].attrs);
4780 			fw_mem[i].va = NULL;
4781 			fw_mem[i].pa = 0;
4782 			fw_mem[i].size = 0;
4783 			fw_mem[i].type = 0;
4784 		}
4785 	}
4786 
4787 	plat_priv->fw_mem_seg_len = 0;
4788 }
4789 
4790 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
4791 {
4792 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4793 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4794 	int i, j;
4795 
4796 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4797 		if (!qdss_mem[i].va && qdss_mem[i].size) {
4798 			qdss_mem[i].va =
4799 				dma_alloc_coherent(&pci_priv->pci_dev->dev,
4800 						   qdss_mem[i].size,
4801 						   &qdss_mem[i].pa,
4802 						   GFP_KERNEL);
4803 			if (!qdss_mem[i].va) {
4804 				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
4805 					    qdss_mem[i].size,
4806 					    qdss_mem[i].type, i);
4807 				break;
4808 			}
4809 		}
4810 	}
4811 
4812 	/* Best-effort allocation for QDSS trace */
4813 	if (i < plat_priv->qdss_mem_seg_len) {
4814 		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
4815 			qdss_mem[j].type = 0;
4816 			qdss_mem[j].size = 0;
4817 		}
4818 		plat_priv->qdss_mem_seg_len = i;
4819 	}
4820 
4821 	return 0;
4822 }
4823 
4824 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
4825 {
4826 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4827 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4828 	int i;
4829 
4830 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4831 		if (qdss_mem[i].va && qdss_mem[i].size) {
4832 			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
4833 				    &qdss_mem[i].pa, qdss_mem[i].size,
4834 				    qdss_mem[i].type);
4835 			dma_free_coherent(&pci_priv->pci_dev->dev,
4836 					  qdss_mem[i].size, qdss_mem[i].va,
4837 					  qdss_mem[i].pa);
4838 			qdss_mem[i].va = NULL;
4839 			qdss_mem[i].pa = 0;
4840 			qdss_mem[i].size = 0;
4841 			qdss_mem[i].type = 0;
4842 		}
4843 	}
4844 	plat_priv->qdss_mem_seg_len = 0;
4845 }
4846 
4847 int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv)
4848 {
4849 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4850 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4851 	char filename[MAX_FIRMWARE_NAME_LEN];
4852 	char *tme_patch_filename = NULL;
4853 	const struct firmware *fw_entry;
4854 	int ret = 0;
4855 
4856 	switch (pci_priv->device_id) {
4857 	case PEACH_DEVICE_ID:
4858 		tme_patch_filename = TME_PATCH_FILE_NAME;
4859 		break;
4860 	case QCA6174_DEVICE_ID:
4861 	case QCA6290_DEVICE_ID:
4862 	case QCA6390_DEVICE_ID:
4863 	case QCA6490_DEVICE_ID:
4864 	case KIWI_DEVICE_ID:
4865 	case MANGO_DEVICE_ID:
4866 	default:
4867 		cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n",
4868 			    pci_priv->device_id);
4869 		return 0;
4870 	}
4871 
4872 	if (!tme_lite_mem->va && !tme_lite_mem->size) {
4873 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4874 					    tme_patch_filename);
4875 
4876 		ret = firmware_request_nowarn(&fw_entry, filename,
4877 					      &pci_priv->pci_dev->dev);
4878 		if (ret) {
4879 			cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n",
4880 				    filename, ret);
4881 			return ret;
4882 		}
4883 
4884 		tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4885 						fw_entry->size, &tme_lite_mem->pa,
4886 						GFP_KERNEL);
4887 		if (!tme_lite_mem->va) {
4888 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4889 				    fw_entry->size);
4890 			release_firmware(fw_entry);
4891 			return -ENOMEM;
4892 		}
4893 
4894 		memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
4895 		tme_lite_mem->size = fw_entry->size;
4896 		release_firmware(fw_entry);
4897 	}
4898 
4899 	return 0;
4900 }
4901 
4902 static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv)
4903 {
4904 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4905 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4906 
4907 	if (tme_lite_mem->va && tme_lite_mem->size) {
4908 		cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4909 			    tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
4910 		dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size,
4911 				  tme_lite_mem->va, tme_lite_mem->pa);
4912 	}
4913 
4914 	tme_lite_mem->va = NULL;
4915 	tme_lite_mem->pa = 0;
4916 	tme_lite_mem->size = 0;
4917 }
4918 
4919 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
4920 {
4921 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4922 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4923 	char filename[MAX_FIRMWARE_NAME_LEN];
4924 	char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
4925 	const struct firmware *fw_entry;
4926 	int ret = 0;
4927 
4928 	/* Use forward compatibility here since for any recent device
4929 	 * it should use DEFAULT_PHY_UCODE_FILE_NAME.
4930 	 */
4931 	switch (pci_priv->device_id) {
4932 	case QCA6174_DEVICE_ID:
4933 		cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
4934 			    pci_priv->device_id);
4935 		return -EINVAL;
4936 	case QCA6290_DEVICE_ID:
4937 	case QCA6390_DEVICE_ID:
4938 	case QCA6490_DEVICE_ID:
4939 		phy_filename = DEFAULT_PHY_M3_FILE_NAME;
4940 		break;
4941 	case KIWI_DEVICE_ID:
4942 	case MANGO_DEVICE_ID:
4943 	case PEACH_DEVICE_ID:
4944 		switch (plat_priv->device_version.major_version) {
4945 		case FW_V2_NUMBER:
4946 			phy_filename = PHY_UCODE_V2_FILE_NAME;
4947 			break;
4948 		default:
4949 			break;
4950 		}
4951 		break;
4952 	default:
4953 		break;
4954 	}
4955 
4956 	if (!m3_mem->va && !m3_mem->size) {
4957 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4958 					    phy_filename);
4959 
4960 		ret = firmware_request_nowarn(&fw_entry, filename,
4961 					      &pci_priv->pci_dev->dev);
4962 		if (ret) {
4963 			cnss_pr_err("Failed to load M3 image: %s\n", filename);
4964 			return ret;
4965 		}
4966 
4967 		m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4968 						fw_entry->size, &m3_mem->pa,
4969 						GFP_KERNEL);
4970 		if (!m3_mem->va) {
4971 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4972 				    fw_entry->size);
4973 			release_firmware(fw_entry);
4974 			return -ENOMEM;
4975 		}
4976 
4977 		memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
4978 		m3_mem->size = fw_entry->size;
4979 		release_firmware(fw_entry);
4980 	}
4981 
4982 	return 0;
4983 }
4984 
4985 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
4986 {
4987 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4988 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4989 
4990 	if (m3_mem->va && m3_mem->size) {
4991 		cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4992 			    m3_mem->va, &m3_mem->pa, m3_mem->size);
4993 		dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
4994 				  m3_mem->va, m3_mem->pa);
4995 	}
4996 
4997 	m3_mem->va = NULL;
4998 	m3_mem->pa = 0;
4999 	m3_mem->size = 0;
5000 }
5001 
5002 #ifdef CONFIG_FREE_M3_BLOB_MEM
5003 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
5004 {
5005 	cnss_pci_free_m3_mem(pci_priv);
5006 }
5007 #else
5008 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
5009 {
5010 }
5011 #endif
5012 
5013 int cnss_pci_load_aux(struct cnss_pci_data *pci_priv)
5014 {
5015 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5016 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5017 	char filename[MAX_FIRMWARE_NAME_LEN];
5018 	char *aux_filename = DEFAULT_AUX_FILE_NAME;
5019 	const struct firmware *fw_entry;
5020 	int ret = 0;
5021 
5022 	if (!aux_mem->va && !aux_mem->size) {
5023 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
5024 					    aux_filename);
5025 
5026 		ret = firmware_request_nowarn(&fw_entry, filename,
5027 					      &pci_priv->pci_dev->dev);
5028 		if (ret) {
5029 			cnss_pr_err("Failed to load AUX image: %s\n", filename);
5030 			return ret;
5031 		}
5032 
5033 		aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5034 						fw_entry->size, &aux_mem->pa,
5035 						GFP_KERNEL);
5036 		if (!aux_mem->va) {
5037 			cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n",
5038 				    fw_entry->size);
5039 			release_firmware(fw_entry);
5040 			return -ENOMEM;
5041 		}
5042 
5043 		memcpy(aux_mem->va, fw_entry->data, fw_entry->size);
5044 		aux_mem->size = fw_entry->size;
5045 		release_firmware(fw_entry);
5046 	}
5047 
5048 	return 0;
5049 }
5050 
5051 static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv)
5052 {
5053 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5054 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5055 
5056 	if (aux_mem->va && aux_mem->size) {
5057 		cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5058 			    aux_mem->va, &aux_mem->pa, aux_mem->size);
5059 		dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size,
5060 				  aux_mem->va, aux_mem->pa);
5061 	}
5062 
5063 	aux_mem->va = NULL;
5064 	aux_mem->pa = 0;
5065 	aux_mem->size = 0;
5066 }
5067 
5068 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
5069 {
5070 	struct cnss_plat_data *plat_priv;
5071 
5072 	if (!pci_priv)
5073 		return;
5074 
5075 	cnss_fatal_err("Timeout waiting for FW ready indication\n");
5076 
5077 	plat_priv = pci_priv->plat_priv;
5078 	if (!plat_priv)
5079 		return;
5080 
5081 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
5082 		cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
5083 		return;
5084 	}
5085 
5086 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5087 			       CNSS_REASON_TIMEOUT);
5088 }
5089 
5090 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
5091 {
5092 	pci_priv->iommu_domain = NULL;
5093 }
5094 
5095 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5096 {
5097 	if (!pci_priv)
5098 		return -ENODEV;
5099 
5100 	if (!pci_priv->smmu_iova_len)
5101 		return -EINVAL;
5102 
5103 	*addr = pci_priv->smmu_iova_start;
5104 	*size = pci_priv->smmu_iova_len;
5105 
5106 	return 0;
5107 }
5108 
5109 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5110 {
5111 	if (!pci_priv)
5112 		return -ENODEV;
5113 
5114 	if (!pci_priv->smmu_iova_ipa_len)
5115 		return -EINVAL;
5116 
5117 	*addr = pci_priv->smmu_iova_ipa_start;
5118 	*size = pci_priv->smmu_iova_ipa_len;
5119 
5120 	return 0;
5121 }
5122 
5123 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
5124 {
5125 	if (pci_priv)
5126 		return pci_priv->smmu_s1_enable;
5127 
5128 	return false;
5129 }
5130 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
5131 {
5132 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5133 
5134 	if (!pci_priv)
5135 		return NULL;
5136 
5137 	return pci_priv->iommu_domain;
5138 }
5139 EXPORT_SYMBOL(cnss_smmu_get_domain);
5140 
5141 int cnss_smmu_map(struct device *dev,
5142 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
5143 {
5144 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5145 	struct cnss_plat_data *plat_priv;
5146 	unsigned long iova;
5147 	size_t len;
5148 	int ret = 0;
5149 	int flag = IOMMU_READ | IOMMU_WRITE;
5150 	struct pci_dev *root_port;
5151 	struct device_node *root_of_node;
5152 	bool dma_coherent = false;
5153 
5154 	if (!pci_priv)
5155 		return -ENODEV;
5156 
5157 	if (!iova_addr) {
5158 		cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
5159 			    &paddr, size);
5160 		return -EINVAL;
5161 	}
5162 
5163 	plat_priv = pci_priv->plat_priv;
5164 
5165 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
5166 	iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
5167 
5168 	if (pci_priv->iommu_geometry &&
5169 	    iova >= pci_priv->smmu_iova_ipa_start +
5170 		    pci_priv->smmu_iova_ipa_len) {
5171 		cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5172 			    iova,
5173 			    &pci_priv->smmu_iova_ipa_start,
5174 			    pci_priv->smmu_iova_ipa_len);
5175 		return -ENOMEM;
5176 	}
5177 
5178 	if (!test_bit(DISABLE_IO_COHERENCY,
5179 		      &plat_priv->ctrl_params.quirks)) {
5180 		root_port = pcie_find_root_port(pci_priv->pci_dev);
5181 		if (!root_port) {
5182 			cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
5183 		} else {
5184 			root_of_node = root_port->dev.of_node;
5185 			if (root_of_node && root_of_node->parent) {
5186 				dma_coherent =
5187 				    of_property_read_bool(root_of_node->parent,
5188 							  "dma-coherent");
5189 			cnss_pr_dbg("dma-coherent is %s\n",
5190 				    dma_coherent ? "enabled" : "disabled");
5191 			if (dma_coherent)
5192 				flag |= IOMMU_CACHE;
5193 			}
5194 		}
5195 	}
5196 
5197 	cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
5198 
5199 	ret = cnss_iommu_map(pci_priv->iommu_domain, iova,
5200 			     rounddown(paddr, PAGE_SIZE), len, flag);
5201 	if (ret) {
5202 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
5203 		return ret;
5204 	}
5205 
5206 	pci_priv->smmu_iova_ipa_current = iova + len;
5207 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
5208 	cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
5209 
5210 	return 0;
5211 }
5212 EXPORT_SYMBOL(cnss_smmu_map);
5213 
5214 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
5215 {
5216 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5217 	unsigned long iova;
5218 	size_t unmapped;
5219 	size_t len;
5220 
5221 	if (!pci_priv)
5222 		return -ENODEV;
5223 
5224 	iova = rounddown(iova_addr, PAGE_SIZE);
5225 	len = roundup(size + iova_addr - iova, PAGE_SIZE);
5226 
5227 	if (iova >= pci_priv->smmu_iova_ipa_start +
5228 		    pci_priv->smmu_iova_ipa_len) {
5229 		cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5230 			    iova,
5231 			    &pci_priv->smmu_iova_ipa_start,
5232 			    pci_priv->smmu_iova_ipa_len);
5233 		return -ENOMEM;
5234 	}
5235 
5236 	cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
5237 
5238 	unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
5239 	if (unmapped != len) {
5240 		cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
5241 			    unmapped, len);
5242 		return -EINVAL;
5243 	}
5244 
5245 	pci_priv->smmu_iova_ipa_current = iova;
5246 	return 0;
5247 }
5248 EXPORT_SYMBOL(cnss_smmu_unmap);
5249 
5250 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
5251 {
5252 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5253 	struct cnss_plat_data *plat_priv;
5254 
5255 	if (!pci_priv)
5256 		return -ENODEV;
5257 
5258 	plat_priv = pci_priv->plat_priv;
5259 	if (!plat_priv)
5260 		return -ENODEV;
5261 
5262 	info->va = pci_priv->bar;
5263 	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
5264 	info->chip_id = plat_priv->chip_info.chip_id;
5265 	info->chip_family = plat_priv->chip_info.chip_family;
5266 	info->board_id = plat_priv->board_info.board_id;
5267 	info->soc_id = plat_priv->soc_info.soc_id;
5268 	info->fw_version = plat_priv->fw_version_info.fw_version;
5269 	strlcpy(info->fw_build_timestamp,
5270 		plat_priv->fw_version_info.fw_build_timestamp,
5271 		sizeof(info->fw_build_timestamp));
5272 	memcpy(&info->device_version, &plat_priv->device_version,
5273 	       sizeof(info->device_version));
5274 	memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
5275 	       sizeof(info->dev_mem_info));
5276 	memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
5277 	       sizeof(info->fw_build_id));
5278 
5279 	return 0;
5280 }
5281 EXPORT_SYMBOL(cnss_get_soc_info);
5282 
5283 int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv,
5284 				     char *user_name,
5285 				     int *num_vectors,
5286 				     u32 *user_base_data,
5287 				     u32 *base_vector)
5288 {
5289 	return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5290 					    user_name,
5291 					    num_vectors,
5292 					    user_base_data,
5293 					    base_vector);
5294 }
5295 
5296 static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv,
5297 					  unsigned int vec,
5298 					  const struct cpumask *cpumask)
5299 {
5300 	int ret;
5301 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5302 
5303 	ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec),
5304 				    cpumask);
5305 
5306 	return ret;
5307 }
5308 
5309 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
5310 {
5311 	int ret = 0;
5312 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5313 	int num_vectors;
5314 	struct cnss_msi_config *msi_config;
5315 
5316 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5317 		return 0;
5318 
5319 	if (cnss_pci_is_force_one_msi(pci_priv)) {
5320 		ret = cnss_pci_get_one_msi_assignment(pci_priv);
5321 		cnss_pr_dbg("force one msi\n");
5322 	} else {
5323 		ret = cnss_pci_get_msi_assignment(pci_priv);
5324 	}
5325 	if (ret) {
5326 		cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
5327 		goto out;
5328 	}
5329 
5330 	msi_config = pci_priv->msi_config;
5331 	if (!msi_config) {
5332 		cnss_pr_err("msi_config is NULL!\n");
5333 		ret = -EINVAL;
5334 		goto out;
5335 	}
5336 
5337 	num_vectors = pci_alloc_irq_vectors(pci_dev,
5338 					    msi_config->total_vectors,
5339 					    msi_config->total_vectors,
5340 					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
5341 	if ((num_vectors != msi_config->total_vectors) &&
5342 	    !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
5343 		cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
5344 			    msi_config->total_vectors, num_vectors);
5345 		if (num_vectors >= 0)
5346 			ret = -EINVAL;
5347 		goto reset_msi_config;
5348 	}
5349 
5350 	/* With VT-d disabled on x86 platform, only one pci irq vector is
5351 	 * allocated. Once suspend the irq may be migrated to CPU0 if it was
5352 	 * affine to other CPU with one new msi vector re-allocated.
5353 	 * The observation cause the issue about no irq handler for vector
5354 	 * once resume.
5355 	 * The fix is to set irq vector affinity to CPU0 before calling
5356 	 * request_irq to avoid the irq migration.
5357 	 */
5358 	if (cnss_pci_is_one_msi(pci_priv)) {
5359 		ret = cnss_pci_irq_set_affinity_hint(pci_priv,
5360 						     0,
5361 						     cpumask_of(0));
5362 		if (ret) {
5363 			cnss_pr_err("Failed to affinize irq vector to CPU0\n");
5364 			goto free_msi_vector;
5365 		}
5366 	}
5367 
5368 	if (cnss_pci_config_msi_addr(pci_priv)) {
5369 		ret = -EINVAL;
5370 		goto free_msi_vector;
5371 	}
5372 
5373 	if (cnss_pci_config_msi_data(pci_priv)) {
5374 		ret = -EINVAL;
5375 		goto free_msi_vector;
5376 	}
5377 
5378 	return 0;
5379 
5380 free_msi_vector:
5381 	if (cnss_pci_is_one_msi(pci_priv))
5382 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5383 	pci_free_irq_vectors(pci_priv->pci_dev);
5384 reset_msi_config:
5385 	pci_priv->msi_config = NULL;
5386 out:
5387 	return ret;
5388 }
5389 
5390 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
5391 {
5392 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5393 		return;
5394 
5395 	if (cnss_pci_is_one_msi(pci_priv))
5396 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5397 
5398 	pci_free_irq_vectors(pci_priv->pci_dev);
5399 }
5400 
5401 int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
5402 				 int *num_vectors, u32 *user_base_data,
5403 				 u32 *base_vector)
5404 {
5405 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5406 	struct cnss_msi_config *msi_config;
5407 	int idx;
5408 
5409 	if (!pci_priv)
5410 		return -ENODEV;
5411 
5412 	msi_config = pci_priv->msi_config;
5413 	if (!msi_config) {
5414 		cnss_pr_err("MSI is not supported.\n");
5415 		return -EINVAL;
5416 	}
5417 
5418 	for (idx = 0; idx < msi_config->total_users; idx++) {
5419 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
5420 			*num_vectors = msi_config->users[idx].num_vectors;
5421 			*user_base_data = msi_config->users[idx].base_vector
5422 				+ pci_priv->msi_ep_base_data;
5423 			*base_vector = msi_config->users[idx].base_vector;
5424 			/*Add only single print for each user*/
5425 			if (print_optimize.msi_log_chk[idx]++)
5426 				goto skip_print;
5427 
5428 			cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
5429 				    user_name, *num_vectors, *user_base_data,
5430 				    *base_vector);
5431 skip_print:
5432 			return 0;
5433 		}
5434 	}
5435 
5436 	cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
5437 
5438 	return -EINVAL;
5439 }
5440 EXPORT_SYMBOL(cnss_get_user_msi_assignment);
5441 
5442 int cnss_get_msi_irq(struct device *dev, unsigned int vector)
5443 {
5444 	struct pci_dev *pci_dev = to_pci_dev(dev);
5445 	int irq_num;
5446 
5447 	irq_num = pci_irq_vector(pci_dev, vector);
5448 	cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
5449 
5450 	return irq_num;
5451 }
5452 EXPORT_SYMBOL(cnss_get_msi_irq);
5453 
5454 bool cnss_is_one_msi(struct device *dev)
5455 {
5456 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5457 
5458 	if (!pci_priv)
5459 		return false;
5460 
5461 	return cnss_pci_is_one_msi(pci_priv);
5462 }
5463 EXPORT_SYMBOL(cnss_is_one_msi);
5464 
5465 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
5466 			  u32 *msi_addr_high)
5467 {
5468 	struct pci_dev *pci_dev = to_pci_dev(dev);
5469 	struct cnss_pci_data *pci_priv;
5470 	u16 control;
5471 
5472 	if (!pci_dev)
5473 		return;
5474 
5475 	pci_priv = cnss_get_pci_priv(pci_dev);
5476 	if (!pci_priv)
5477 		return;
5478 
5479 	if (pci_dev->msix_enabled) {
5480 		*msi_addr_low = pci_priv->msix_addr;
5481 		*msi_addr_high = 0;
5482 		if (!print_optimize.msi_addr_chk++)
5483 			cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5484 				    *msi_addr_low, *msi_addr_high);
5485 		return;
5486 	}
5487 
5488 	pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
5489 			     &control);
5490 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
5491 			      msi_addr_low);
5492 	/* Return MSI high address only when device supports 64-bit MSI */
5493 	if (control & PCI_MSI_FLAGS_64BIT)
5494 		pci_read_config_dword(pci_dev,
5495 				      pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
5496 				      msi_addr_high);
5497 	else
5498 		*msi_addr_high = 0;
5499 	 /*Add only single print as the address is constant*/
5500 	 if (!print_optimize.msi_addr_chk++)
5501 		cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5502 			    *msi_addr_low, *msi_addr_high);
5503 }
5504 EXPORT_SYMBOL(cnss_get_msi_address);
5505 
5506 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
5507 {
5508 	int ret, num_vectors;
5509 	u32 user_base_data, base_vector;
5510 
5511 	if (!pci_priv)
5512 		return -ENODEV;
5513 
5514 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5515 					   WAKE_MSI_NAME, &num_vectors,
5516 					   &user_base_data, &base_vector);
5517 	if (ret) {
5518 		cnss_pr_err("WAKE MSI is not valid\n");
5519 		return 0;
5520 	}
5521 
5522 	return user_base_data;
5523 }
5524 
5525 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
5526 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5527 {
5528 	return dma_set_mask(&pci_dev->dev, mask);
5529 }
5530 
5531 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5532 	u64 mask)
5533 {
5534 	return dma_set_coherent_mask(&pci_dev->dev, mask);
5535 }
5536 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5537 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5538 {
5539 	return pci_set_dma_mask(pci_dev, mask);
5540 }
5541 
5542 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5543 	u64 mask)
5544 {
5545 	return pci_set_consistent_dma_mask(pci_dev, mask);
5546 }
5547 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5548 
5549 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
5550 {
5551 	int ret = 0;
5552 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5553 	u16 device_id;
5554 
5555 	pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
5556 	if (device_id != pci_priv->pci_device_id->device)  {
5557 		cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
5558 			    device_id, pci_priv->pci_device_id->device);
5559 		ret = -EIO;
5560 		goto out;
5561 	}
5562 
5563 	ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
5564 	if (ret) {
5565 		pr_err("Failed to assign PCI resource, err = %d\n", ret);
5566 		goto out;
5567 	}
5568 
5569 	ret = pci_enable_device(pci_dev);
5570 	if (ret) {
5571 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
5572 		goto out;
5573 	}
5574 
5575 	ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
5576 	if (ret) {
5577 		cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
5578 		goto disable_device;
5579 	}
5580 
5581 	switch (device_id) {
5582 	case QCA6174_DEVICE_ID:
5583 	case QCN7605_DEVICE_ID:
5584 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5585 		break;
5586 	case QCA6390_DEVICE_ID:
5587 	case QCA6490_DEVICE_ID:
5588 	case KIWI_DEVICE_ID:
5589 	case MANGO_DEVICE_ID:
5590 	case PEACH_DEVICE_ID:
5591 		pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
5592 		break;
5593 	default:
5594 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5595 		break;
5596 	}
5597 
5598 	cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
5599 
5600 	ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5601 	if (ret) {
5602 		cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
5603 		goto release_region;
5604 	}
5605 
5606 	ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5607 	if (ret) {
5608 		cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
5609 			    ret);
5610 		goto release_region;
5611 	}
5612 
5613 	pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
5614 	if (!pci_priv->bar) {
5615 		cnss_pr_err("Failed to do PCI IO map!\n");
5616 		ret = -EIO;
5617 		goto release_region;
5618 	}
5619 
5620 	/* Save default config space without BME enabled */
5621 	pci_save_state(pci_dev);
5622 	pci_priv->default_state = pci_store_saved_state(pci_dev);
5623 
5624 	pci_set_master(pci_dev);
5625 
5626 	return 0;
5627 
5628 release_region:
5629 	pci_release_region(pci_dev, PCI_BAR_NUM);
5630 disable_device:
5631 	pci_disable_device(pci_dev);
5632 out:
5633 	return ret;
5634 }
5635 
5636 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
5637 {
5638 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5639 
5640 	pci_clear_master(pci_dev);
5641 	pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
5642 	pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
5643 
5644 	if (pci_priv->bar) {
5645 		pci_iounmap(pci_dev, pci_priv->bar);
5646 		pci_priv->bar = NULL;
5647 	}
5648 
5649 	pci_release_region(pci_dev, PCI_BAR_NUM);
5650 	if (pci_is_enabled(pci_dev))
5651 		pci_disable_device(pci_dev);
5652 }
5653 
5654 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
5655 {
5656 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5657 	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
5658 	gfp_t gfp = GFP_KERNEL;
5659 	u32 reg_offset;
5660 
5661 	if (in_interrupt() || irqs_disabled())
5662 		gfp = GFP_ATOMIC;
5663 
5664 	if (!plat_priv->qdss_reg) {
5665 		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
5666 						   sizeof(*plat_priv->qdss_reg)
5667 						   * array_size, gfp);
5668 		if (!plat_priv->qdss_reg)
5669 			return;
5670 	}
5671 
5672 	cnss_pr_dbg("Start to dump qdss registers\n");
5673 
5674 	for (i = 0; qdss_csr[i].name; i++) {
5675 		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
5676 		if (cnss_pci_reg_read(pci_priv, reg_offset,
5677 				      &plat_priv->qdss_reg[i]))
5678 			return;
5679 		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
5680 			    plat_priv->qdss_reg[i]);
5681 	}
5682 }
5683 
5684 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
5685 				 enum cnss_ce_index ce)
5686 {
5687 	int i;
5688 	u32 ce_base = ce * CE_REG_INTERVAL;
5689 	u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
5690 
5691 	switch (pci_priv->device_id) {
5692 	case QCA6390_DEVICE_ID:
5693 		src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
5694 		dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
5695 		cmn_base = QCA6390_CE_COMMON_REG_BASE;
5696 		break;
5697 	case QCA6490_DEVICE_ID:
5698 		src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
5699 		dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
5700 		cmn_base = QCA6490_CE_COMMON_REG_BASE;
5701 		break;
5702 	default:
5703 		return;
5704 	}
5705 
5706 	switch (ce) {
5707 	case CNSS_CE_09:
5708 	case CNSS_CE_10:
5709 		for (i = 0; ce_src[i].name; i++) {
5710 			reg_offset = src_ring_base + ce_base + ce_src[i].offset;
5711 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5712 				return;
5713 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5714 				    ce, ce_src[i].name, reg_offset, val);
5715 		}
5716 
5717 		for (i = 0; ce_dst[i].name; i++) {
5718 			reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
5719 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5720 				return;
5721 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5722 				    ce, ce_dst[i].name, reg_offset, val);
5723 		}
5724 		break;
5725 	case CNSS_CE_COMMON:
5726 		for (i = 0; ce_cmn[i].name; i++) {
5727 			reg_offset = cmn_base  + ce_cmn[i].offset;
5728 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5729 				return;
5730 			cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
5731 				    ce_cmn[i].name, reg_offset, val);
5732 		}
5733 		break;
5734 	default:
5735 		cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
5736 	}
5737 }
5738 
5739 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
5740 {
5741 	if (cnss_pci_check_link_status(pci_priv))
5742 		return;
5743 
5744 	cnss_pr_dbg("Start to dump debug registers\n");
5745 
5746 	cnss_mhi_debug_reg_dump(pci_priv);
5747 	cnss_pci_bhi_debug_reg_dump(pci_priv);
5748 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5749 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
5750 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
5751 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
5752 }
5753 
5754 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
5755 {
5756 	if (cnss_get_host_sol_value(pci_priv->plat_priv))
5757 		return -EINVAL;
5758 
5759 	cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
5760 	cnss_set_host_sol_value(pci_priv->plat_priv, 1);
5761 
5762 	return 0;
5763 }
5764 
5765 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
5766 {
5767 	if (!cnss_pci_check_link_status(pci_priv))
5768 		cnss_mhi_debug_reg_dump(pci_priv);
5769 
5770 	cnss_pci_bhi_debug_reg_dump(pci_priv);
5771 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5772 	cnss_pci_dump_misc_reg(pci_priv);
5773 	cnss_pci_dump_shadow_reg(pci_priv);
5774 }
5775 
5776 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
5777 {
5778 	int ret;
5779 	int retry = 0;
5780 	enum mhi_ee_type mhi_ee;
5781 
5782 	switch (pci_priv->device_id) {
5783 	case QCA6390_DEVICE_ID:
5784 	case QCA6490_DEVICE_ID:
5785 	case KIWI_DEVICE_ID:
5786 	case MANGO_DEVICE_ID:
5787 	case PEACH_DEVICE_ID:
5788 		break;
5789 	default:
5790 		return -EOPNOTSUPP;
5791 	}
5792 
5793 	/* Always wait here to avoid missing WAKE assert for RDDM
5794 	 * before link recovery
5795 	 */
5796 	ret = wait_for_completion_timeout(&pci_priv->wake_event_complete,
5797 					  msecs_to_jiffies(WAKE_EVENT_TIMEOUT));
5798 	if (!ret)
5799 		cnss_pr_err("Timeout waiting for wake event after link down\n");
5800 
5801 	ret = cnss_suspend_pci_link(pci_priv);
5802 	if (ret)
5803 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
5804 
5805 	ret = cnss_resume_pci_link(pci_priv);
5806 	if (ret) {
5807 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
5808 		del_timer(&pci_priv->dev_rddm_timer);
5809 		return ret;
5810 	}
5811 
5812 retry:
5813 	/*
5814 	 * After PCIe link resumes, 20 to 400 ms delay is observerved
5815 	 * before device moves to RDDM.
5816 	 */
5817 	msleep(RDDM_LINK_RECOVERY_RETRY_DELAY_MS);
5818 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
5819 	if (mhi_ee == MHI_EE_RDDM) {
5820 		del_timer(&pci_priv->dev_rddm_timer);
5821 		cnss_pr_info("Device in RDDM after link recovery, try to collect dump\n");
5822 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5823 				       CNSS_REASON_RDDM);
5824 		return 0;
5825 	} else if (retry++ < RDDM_LINK_RECOVERY_RETRY) {
5826 		cnss_pr_dbg("Wait for RDDM after link recovery, retry #%d, Device EE: %d\n",
5827 			    retry, mhi_ee);
5828 		goto retry;
5829 	}
5830 
5831 	if (!cnss_pci_assert_host_sol(pci_priv))
5832 		return 0;
5833 	cnss_mhi_debug_reg_dump(pci_priv);
5834 	cnss_pci_bhi_debug_reg_dump(pci_priv);
5835 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5836 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5837 			       CNSS_REASON_TIMEOUT);
5838 	return 0;
5839 }
5840 
5841 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
5842 {
5843 	int ret;
5844 	struct cnss_plat_data *plat_priv;
5845 
5846 	if (!pci_priv)
5847 		return -ENODEV;
5848 
5849 	plat_priv = pci_priv->plat_priv;
5850 	if (!plat_priv)
5851 		return -ENODEV;
5852 
5853 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5854 	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
5855 		return -EINVAL;
5856 	/*
5857 	 * Call pm_runtime_get_sync insteat of auto_resume to get
5858 	 * reference and make sure runtime_suspend wont get called.
5859 	 */
5860 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
5861 	if (ret < 0)
5862 		goto runtime_pm_put;
5863 	/*
5864 	 * In some scenarios, cnss_pci_pm_runtime_get_sync
5865 	 * might not resume PCI bus. For those cases do auto resume.
5866 	 */
5867 	cnss_auto_resume(&pci_priv->pci_dev->dev);
5868 
5869 	if (!pci_priv->is_smmu_fault)
5870 		cnss_pci_mhi_reg_dump(pci_priv);
5871 
5872 	/* If link is still down here, directly trigger link down recovery */
5873 	ret = cnss_pci_check_link_status(pci_priv);
5874 	if (ret) {
5875 		cnss_pci_link_down(&pci_priv->pci_dev->dev);
5876 		cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5877 		cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5878 		return 0;
5879 	}
5880 
5881 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
5882 	if (ret) {
5883 		if (pci_priv->is_smmu_fault) {
5884 			cnss_pci_mhi_reg_dump(pci_priv);
5885 			pci_priv->is_smmu_fault = false;
5886 		}
5887 		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5888 		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
5889 			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
5890 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5891 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5892 			return 0;
5893 		}
5894 		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
5895 		if (!cnss_pci_assert_host_sol(pci_priv)) {
5896 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5897 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5898 			return 0;
5899 		}
5900 		cnss_pci_dump_debug_reg(pci_priv);
5901 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5902 				       CNSS_REASON_DEFAULT);
5903 		ret = 0;
5904 		goto runtime_pm_put;
5905 	}
5906 
5907 	if (pci_priv->is_smmu_fault) {
5908 		cnss_pci_mhi_reg_dump(pci_priv);
5909 		pci_priv->is_smmu_fault = false;
5910 	}
5911 
5912 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
5913 		mod_timer(&pci_priv->dev_rddm_timer,
5914 			  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
5915 	}
5916 
5917 runtime_pm_put:
5918 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5919 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5920 	return ret;
5921 }
5922 
5923 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
5924 				  struct cnss_dump_seg *dump_seg,
5925 				  enum cnss_fw_dump_type type, int seg_no,
5926 				  void *va, dma_addr_t dma, size_t size)
5927 {
5928 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5929 	struct device *dev = &pci_priv->pci_dev->dev;
5930 	phys_addr_t pa;
5931 
5932 	dump_seg->address = dma;
5933 	dump_seg->v_address = va;
5934 	dump_seg->size = size;
5935 	dump_seg->type = type;
5936 
5937 	cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
5938 		    seg_no, va, &dma, size);
5939 
5940 	if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
5941 		return;
5942 
5943 	cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
5944 }
5945 
5946 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
5947 				     struct cnss_dump_seg *dump_seg,
5948 				     enum cnss_fw_dump_type type, int seg_no,
5949 				     void *va, dma_addr_t dma, size_t size)
5950 {
5951 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5952 	struct device *dev = &pci_priv->pci_dev->dev;
5953 	phys_addr_t pa;
5954 
5955 	cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
5956 	cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
5957 }
5958 
5959 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
5960 				enum cnss_driver_status status, void *data)
5961 {
5962 	struct cnss_uevent_data uevent_data;
5963 	struct cnss_wlan_driver *driver_ops;
5964 
5965 	driver_ops = pci_priv->driver_ops;
5966 	if (!driver_ops || !driver_ops->update_event) {
5967 		cnss_pr_dbg("Hang event driver ops is NULL\n");
5968 		return -EINVAL;
5969 	}
5970 
5971 	cnss_pr_dbg("Calling driver uevent: %d\n", status);
5972 
5973 	uevent_data.status = status;
5974 	uevent_data.data = data;
5975 
5976 	return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
5977 }
5978 
5979 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
5980 {
5981 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5982 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5983 	struct cnss_hang_event hang_event;
5984 	void *hang_data_va = NULL;
5985 	u64 offset = 0;
5986 	u16 length = 0;
5987 	int i = 0;
5988 
5989 	if (!fw_mem || !plat_priv->fw_mem_seg_len)
5990 		return;
5991 
5992 	memset(&hang_event, 0, sizeof(hang_event));
5993 	switch (pci_priv->device_id) {
5994 	case QCA6390_DEVICE_ID:
5995 		offset = HST_HANG_DATA_OFFSET;
5996 		length = HANG_DATA_LENGTH;
5997 		break;
5998 	case QCA6490_DEVICE_ID:
5999 		/* Fallback to hard-coded values if hang event params not
6000 		 * present in QMI. Once all the firmware branches have the
6001 		 * fix to send params over QMI, this can be removed.
6002 		 */
6003 		if (plat_priv->hang_event_data_len) {
6004 			offset = plat_priv->hang_data_addr_offset;
6005 			length = plat_priv->hang_event_data_len;
6006 		} else {
6007 			offset = HSP_HANG_DATA_OFFSET;
6008 			length = HANG_DATA_LENGTH;
6009 		}
6010 		break;
6011 	case KIWI_DEVICE_ID:
6012 	case MANGO_DEVICE_ID:
6013 	case PEACH_DEVICE_ID:
6014 		offset = plat_priv->hang_data_addr_offset;
6015 		length = plat_priv->hang_event_data_len;
6016 		break;
6017 	case QCN7605_DEVICE_ID:
6018 		offset = GNO_HANG_DATA_OFFSET;
6019 		length = HANG_DATA_LENGTH;
6020 		break;
6021 	default:
6022 		cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
6023 			    pci_priv->device_id);
6024 		return;
6025 	}
6026 
6027 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
6028 		if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
6029 		    fw_mem[i].va) {
6030 			/* The offset must be < (fw_mem size- hangdata length) */
6031 			if (!(offset <= fw_mem[i].size - length))
6032 				goto exit;
6033 
6034 			hang_data_va = fw_mem[i].va + offset;
6035 			hang_event.hang_event_data = kmemdup(hang_data_va,
6036 							     length,
6037 							     GFP_ATOMIC);
6038 			if (!hang_event.hang_event_data) {
6039 				cnss_pr_dbg("Hang data memory alloc failed\n");
6040 				return;
6041 			}
6042 			hang_event.hang_event_data_len = length;
6043 			break;
6044 		}
6045 	}
6046 
6047 	cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
6048 
6049 	kfree(hang_event.hang_event_data);
6050 	hang_event.hang_event_data = NULL;
6051 	return;
6052 exit:
6053 	cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
6054 		    plat_priv->hang_data_addr_offset,
6055 		    plat_priv->hang_event_data_len);
6056 }
6057 
6058 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
6059 void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv)
6060 {
6061 	struct cnss_ssr_driver_dump_entry *ssr_entry;
6062 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6063 	size_t num_entries_loaded = 0;
6064 	int x;
6065 	int ret = -1;
6066 
6067 	ssr_entry = kmalloc(sizeof(*ssr_entry) * CNSS_HOST_DUMP_TYPE_MAX, GFP_KERNEL);
6068 	if (!ssr_entry) {
6069 		cnss_pr_err("ssr_entry malloc failed");
6070 		return;
6071 	}
6072 
6073 	if (pci_priv->driver_ops &&
6074 	    pci_priv->driver_ops->collect_driver_dump) {
6075 		ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev,
6076 								ssr_entry,
6077 								&num_entries_loaded);
6078 	}
6079 
6080 	if (!ret) {
6081 		for (x = 0; x < num_entries_loaded; x++) {
6082 			cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n",
6083 				     x, ssr_entry[x].buffer_pointer,
6084 				     ssr_entry[x].region_name,
6085 				     ssr_entry[x].buffer_size);
6086 		}
6087 
6088 		cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded);
6089 	} else {
6090 		cnss_pr_info("Host SSR elf dump collection feature disabled\n");
6091 	}
6092 
6093 	kfree(ssr_entry);
6094 }
6095 #endif
6096 
6097 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
6098 {
6099 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6100 	struct cnss_dump_data *dump_data =
6101 		&plat_priv->ramdump_info_v2.dump_data;
6102 	struct cnss_dump_seg *dump_seg =
6103 		plat_priv->ramdump_info_v2.dump_data_vaddr;
6104 	struct image_info *fw_image, *rddm_image;
6105 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6106 	int ret, i, j;
6107 
6108 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
6109 	    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
6110 		cnss_pci_send_hang_event(pci_priv);
6111 
6112 	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
6113 		cnss_pr_dbg("RAM dump is already collected, skip\n");
6114 		return;
6115 	}
6116 
6117 	if (!cnss_is_device_powered_on(plat_priv)) {
6118 		cnss_pr_dbg("Device is already powered off, skip\n");
6119 		return;
6120 	}
6121 
6122 	if (!in_panic) {
6123 		mutex_lock(&pci_priv->bus_lock);
6124 		ret = cnss_pci_check_link_status(pci_priv);
6125 		if (ret) {
6126 			if (ret != -EACCES) {
6127 				mutex_unlock(&pci_priv->bus_lock);
6128 				return;
6129 			}
6130 			if (cnss_pci_resume_bus(pci_priv)) {
6131 				mutex_unlock(&pci_priv->bus_lock);
6132 				return;
6133 			}
6134 		}
6135 		mutex_unlock(&pci_priv->bus_lock);
6136 	} else {
6137 		if (cnss_pci_check_link_status(pci_priv))
6138 			return;
6139 		/* Inside panic handler, reduce timeout for RDDM to avoid
6140 		 * unnecessary hypervisor watchdog bite.
6141 		 */
6142 		pci_priv->mhi_ctrl->timeout_ms /= 2;
6143 	}
6144 
6145 	cnss_mhi_debug_reg_dump(pci_priv);
6146 	cnss_pci_bhi_debug_reg_dump(pci_priv);
6147 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6148 	cnss_pci_dump_misc_reg(pci_priv);
6149 	cnss_rddm_trigger_debug(pci_priv);
6150 	ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
6151 	if (ret) {
6152 		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
6153 			       ret);
6154 		if (!cnss_pci_assert_host_sol(pci_priv))
6155 			return;
6156 		cnss_rddm_trigger_check(pci_priv);
6157 		cnss_pci_dump_debug_reg(pci_priv);
6158 		return;
6159 	}
6160 	cnss_rddm_trigger_check(pci_priv);
6161 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6162 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6163 	dump_data->nentries = 0;
6164 
6165 	if (plat_priv->qdss_mem_seg_len)
6166 		cnss_pci_dump_qdss_reg(pci_priv);
6167 	cnss_mhi_dump_sfr(pci_priv);
6168 
6169 	if (!dump_seg) {
6170 		cnss_pr_warn("FW image dump collection not setup");
6171 		goto skip_dump;
6172 	}
6173 
6174 	cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
6175 		    fw_image->entries);
6176 
6177 	for (i = 0; i < fw_image->entries; i++) {
6178 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6179 				      fw_image->mhi_buf[i].buf,
6180 				      fw_image->mhi_buf[i].dma_addr,
6181 				      fw_image->mhi_buf[i].len);
6182 		dump_seg++;
6183 	}
6184 
6185 	dump_data->nentries += fw_image->entries;
6186 
6187 	cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
6188 		    rddm_image->entries);
6189 
6190 	for (i = 0; i < rddm_image->entries; i++) {
6191 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6192 				      rddm_image->mhi_buf[i].buf,
6193 				      rddm_image->mhi_buf[i].dma_addr,
6194 				      rddm_image->mhi_buf[i].len);
6195 		dump_seg++;
6196 	}
6197 
6198 	dump_data->nentries += rddm_image->entries;
6199 
6200 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6201 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
6202 			if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
6203 				cnss_pr_dbg("Collect remote heap dump segment\n");
6204 				cnss_pci_add_dump_seg(pci_priv, dump_seg,
6205 						      CNSS_FW_REMOTE_HEAP, j,
6206 						      fw_mem[i].va,
6207 						      fw_mem[i].pa,
6208 						      fw_mem[i].size);
6209 				dump_seg++;
6210 				dump_data->nentries++;
6211 				j++;
6212 			} else {
6213 				cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
6214 			}
6215 		}
6216 	}
6217 
6218 	if (dump_data->nentries > 0)
6219 		plat_priv->ramdump_info_v2.dump_data_valid = true;
6220 
6221 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
6222 
6223 skip_dump:
6224 	complete(&plat_priv->rddm_complete);
6225 }
6226 
6227 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
6228 {
6229 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6230 	struct cnss_dump_seg *dump_seg =
6231 		plat_priv->ramdump_info_v2.dump_data_vaddr;
6232 	struct image_info *fw_image, *rddm_image;
6233 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6234 	int i, j;
6235 
6236 	if (!dump_seg)
6237 		return;
6238 
6239 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6240 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6241 
6242 	for (i = 0; i < fw_image->entries; i++) {
6243 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6244 					 fw_image->mhi_buf[i].buf,
6245 					 fw_image->mhi_buf[i].dma_addr,
6246 					 fw_image->mhi_buf[i].len);
6247 		dump_seg++;
6248 	}
6249 
6250 	for (i = 0; i < rddm_image->entries; i++) {
6251 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6252 					 rddm_image->mhi_buf[i].buf,
6253 					 rddm_image->mhi_buf[i].dma_addr,
6254 					 rddm_image->mhi_buf[i].len);
6255 		dump_seg++;
6256 	}
6257 
6258 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6259 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
6260 		    (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
6261 			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6262 						 CNSS_FW_REMOTE_HEAP, j,
6263 						 fw_mem[i].va, fw_mem[i].pa,
6264 						 fw_mem[i].size);
6265 			dump_seg++;
6266 			j++;
6267 		}
6268 	}
6269 
6270 	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
6271 	plat_priv->ramdump_info_v2.dump_data_valid = false;
6272 }
6273 
6274 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
6275 {
6276 	struct cnss_plat_data *plat_priv;
6277 
6278 	if (!pci_priv) {
6279 		cnss_pr_err("pci_priv is NULL\n");
6280 		return;
6281 	}
6282 
6283 	plat_priv = pci_priv->plat_priv;
6284 	if (!plat_priv) {
6285 		cnss_pr_err("plat_priv is NULL\n");
6286 		return;
6287 	}
6288 
6289 	if (plat_priv->recovery_enabled)
6290 		cnss_pci_collect_host_dump_info(pci_priv);
6291 
6292 	/* Call recovery handler in the DRIVER_RECOVERY event context
6293 	 * instead of scheduling work. In that way complete recovery
6294 	 * will be done as part of DRIVER_RECOVERY event and get
6295 	 * serialized with other events.
6296 	 */
6297 	cnss_recovery_handler(plat_priv);
6298 }
6299 
6300 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
6301 {
6302 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6303 
6304 	return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
6305 }
6306 
6307 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
6308 {
6309 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6310 
6311 	cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
6312 }
6313 
6314 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
6315 				 char *prefix_name, char *name)
6316 {
6317 	struct cnss_plat_data *plat_priv;
6318 
6319 	if (!pci_priv)
6320 		return;
6321 
6322 	plat_priv = pci_priv->plat_priv;
6323 
6324 	if (!plat_priv->use_fw_path_with_prefix) {
6325 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6326 		return;
6327 	}
6328 
6329 	switch (pci_priv->device_id) {
6330 	case QCN7605_DEVICE_ID:
6331 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6332 			  QCN7605_PATH_PREFIX "%s", name);
6333 		break;
6334 	case QCA6390_DEVICE_ID:
6335 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6336 			  QCA6390_PATH_PREFIX "%s", name);
6337 		break;
6338 	case QCA6490_DEVICE_ID:
6339 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6340 			  QCA6490_PATH_PREFIX "%s", name);
6341 		break;
6342 	case KIWI_DEVICE_ID:
6343 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6344 			  KIWI_PATH_PREFIX "%s", name);
6345 		break;
6346 	case MANGO_DEVICE_ID:
6347 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6348 			  MANGO_PATH_PREFIX "%s", name);
6349 		break;
6350 	case PEACH_DEVICE_ID:
6351 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6352 			  PEACH_PATH_PREFIX "%s", name);
6353 		break;
6354 	default:
6355 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6356 		break;
6357 	}
6358 
6359 	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
6360 }
6361 
6362 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
6363 {
6364 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6365 
6366 	switch (pci_priv->device_id) {
6367 	case QCA6390_DEVICE_ID:
6368 		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
6369 			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
6370 				    pci_priv->device_id,
6371 				    plat_priv->device_version.major_version);
6372 			return -EINVAL;
6373 		}
6374 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6375 					    FW_V2_FILE_NAME);
6376 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6377 			 FW_V2_FILE_NAME);
6378 		break;
6379 	case QCA6490_DEVICE_ID:
6380 		switch (plat_priv->device_version.major_version) {
6381 		case FW_V2_NUMBER:
6382 				cnss_pci_add_fw_prefix_name(pci_priv,
6383 							    plat_priv->firmware_name,
6384 							    FW_V2_FILE_NAME);
6385 				snprintf(plat_priv->fw_fallback_name,
6386 					 MAX_FIRMWARE_NAME_LEN,
6387 					 FW_V2_FILE_NAME);
6388 			break;
6389 		default:
6390 			cnss_pci_add_fw_prefix_name(pci_priv,
6391 						    plat_priv->firmware_name,
6392 						    DEFAULT_FW_FILE_NAME);
6393 			snprintf(plat_priv->fw_fallback_name,
6394 				 MAX_FIRMWARE_NAME_LEN,
6395 				 DEFAULT_FW_FILE_NAME);
6396 			break;
6397 		}
6398 		break;
6399 	case KIWI_DEVICE_ID:
6400 	case MANGO_DEVICE_ID:
6401 	case PEACH_DEVICE_ID:
6402 		switch (plat_priv->device_version.major_version) {
6403 		case FW_V2_NUMBER:
6404 			/*
6405 			 * kiwiv2 using seprate fw binary for MM and FTM mode,
6406 			 * platform driver loads corresponding binary according
6407 			 * to current mode indicated by wlan driver. Otherwise
6408 			 * use default binary.
6409 			 * Mission mode using same binary name as before,
6410 			 * if seprate binary is not there, fall back to default.
6411 			 */
6412 			if (plat_priv->driver_mode == CNSS_MISSION) {
6413 				cnss_pci_add_fw_prefix_name(pci_priv,
6414 							    plat_priv->firmware_name,
6415 							    FW_V2_FILE_NAME);
6416 				cnss_pci_add_fw_prefix_name(pci_priv,
6417 							    plat_priv->fw_fallback_name,
6418 							    FW_V2_FILE_NAME);
6419 			} else if (plat_priv->driver_mode == CNSS_FTM) {
6420 				cnss_pci_add_fw_prefix_name(pci_priv,
6421 							    plat_priv->firmware_name,
6422 							    FW_V2_FTM_FILE_NAME);
6423 				cnss_pci_add_fw_prefix_name(pci_priv,
6424 							    plat_priv->fw_fallback_name,
6425 							    FW_V2_FILE_NAME);
6426 			} else {
6427 				/*
6428 				 * Since during cold boot calibration phase,
6429 				 * wlan driver has not registered, so default
6430 				 * fw binary will be used.
6431 				 */
6432 				cnss_pci_add_fw_prefix_name(pci_priv,
6433 							    plat_priv->firmware_name,
6434 							    FW_V2_FILE_NAME);
6435 				snprintf(plat_priv->fw_fallback_name,
6436 					 MAX_FIRMWARE_NAME_LEN,
6437 					 FW_V2_FILE_NAME);
6438 			}
6439 			break;
6440 		default:
6441 			cnss_pci_add_fw_prefix_name(pci_priv,
6442 						    plat_priv->firmware_name,
6443 						    DEFAULT_FW_FILE_NAME);
6444 			snprintf(plat_priv->fw_fallback_name,
6445 				 MAX_FIRMWARE_NAME_LEN,
6446 				 DEFAULT_FW_FILE_NAME);
6447 			break;
6448 		}
6449 		break;
6450 	default:
6451 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6452 					    DEFAULT_FW_FILE_NAME);
6453 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6454 			 DEFAULT_FW_FILE_NAME);
6455 		break;
6456 	}
6457 
6458 	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
6459 		    plat_priv->firmware_name, plat_priv->fw_fallback_name);
6460 
6461 	return 0;
6462 }
6463 
6464 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
6465 {
6466 	switch (status) {
6467 	case MHI_CB_IDLE:
6468 		return "IDLE";
6469 	case MHI_CB_EE_RDDM:
6470 		return "RDDM";
6471 	case MHI_CB_SYS_ERROR:
6472 		return "SYS_ERROR";
6473 	case MHI_CB_FATAL_ERROR:
6474 		return "FATAL_ERROR";
6475 	case MHI_CB_EE_MISSION_MODE:
6476 		return "MISSION_MODE";
6477 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6478 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6479 	case MHI_CB_FALLBACK_IMG:
6480 		return "FW_FALLBACK";
6481 #endif
6482 	default:
6483 		return "UNKNOWN";
6484 	}
6485 };
6486 
6487 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
6488 {
6489 	struct cnss_pci_data *pci_priv =
6490 		from_timer(pci_priv, t, dev_rddm_timer);
6491 	enum mhi_ee_type mhi_ee;
6492 
6493 	if (!pci_priv)
6494 		return;
6495 
6496 	cnss_fatal_err("Timeout waiting for RDDM notification\n");
6497 
6498 	if (!cnss_pci_assert_host_sol(pci_priv))
6499 		return;
6500 
6501 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6502 	if (mhi_ee == MHI_EE_PBL)
6503 		cnss_pr_err("Device MHI EE is PBL, unable to collect dump\n");
6504 
6505 	if (mhi_ee == MHI_EE_RDDM) {
6506 		cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
6507 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6508 				       CNSS_REASON_RDDM);
6509 	} else {
6510 		cnss_mhi_debug_reg_dump(pci_priv);
6511 		cnss_pci_bhi_debug_reg_dump(pci_priv);
6512 		cnss_pci_soc_scratch_reg_dump(pci_priv);
6513 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6514 				       CNSS_REASON_TIMEOUT);
6515 	}
6516 }
6517 
6518 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
6519 {
6520 	struct cnss_pci_data *pci_priv =
6521 		from_timer(pci_priv, t, boot_debug_timer);
6522 
6523 	if (!pci_priv)
6524 		return;
6525 
6526 	if (cnss_pci_check_link_status(pci_priv))
6527 		return;
6528 
6529 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
6530 		return;
6531 
6532 	if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
6533 		return;
6534 
6535 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
6536 		return;
6537 
6538 	cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
6539 		    BOOT_DEBUG_TIMEOUT_MS / 1000);
6540 	cnss_mhi_debug_reg_dump(pci_priv);
6541 	cnss_pci_bhi_debug_reg_dump(pci_priv);
6542 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6543 	cnss_pci_dump_bl_sram_mem(pci_priv);
6544 
6545 	mod_timer(&pci_priv->boot_debug_timer,
6546 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
6547 }
6548 
6549 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
6550 {
6551 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6552 
6553 	cnss_ignore_qmi_failure(true);
6554 	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6555 	del_timer(&plat_priv->fw_boot_timer);
6556 	reinit_completion(&pci_priv->wake_event_complete);
6557 	mod_timer(&pci_priv->dev_rddm_timer,
6558 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6559 	cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6560 
6561 	return 0;
6562 }
6563 
6564 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
6565 {
6566 	return cnss_pci_handle_mhi_sys_err(pci_priv);
6567 }
6568 
6569 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
6570 				   enum mhi_callback reason)
6571 {
6572 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6573 	struct cnss_plat_data *plat_priv;
6574 	enum cnss_recovery_reason cnss_reason;
6575 
6576 	if (!pci_priv) {
6577 		cnss_pr_err("pci_priv is NULL");
6578 		return;
6579 	}
6580 
6581 	plat_priv = pci_priv->plat_priv;
6582 
6583 	if (reason != MHI_CB_IDLE)
6584 		cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
6585 			    cnss_mhi_notify_status_to_str(reason), reason);
6586 
6587 	switch (reason) {
6588 	case MHI_CB_IDLE:
6589 	case MHI_CB_EE_MISSION_MODE:
6590 		return;
6591 	case MHI_CB_FATAL_ERROR:
6592 		cnss_ignore_qmi_failure(true);
6593 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6594 		del_timer(&plat_priv->fw_boot_timer);
6595 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6596 		cnss_reason = CNSS_REASON_DEFAULT;
6597 		break;
6598 	case MHI_CB_SYS_ERROR:
6599 		cnss_pci_handle_mhi_sys_err(pci_priv);
6600 		return;
6601 	case MHI_CB_EE_RDDM:
6602 		cnss_ignore_qmi_failure(true);
6603 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6604 		del_timer(&plat_priv->fw_boot_timer);
6605 		del_timer(&pci_priv->dev_rddm_timer);
6606 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6607 		cnss_reason = CNSS_REASON_RDDM;
6608 		break;
6609 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6610 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6611 	case MHI_CB_FALLBACK_IMG:
6612 		/* for kiwi_v2 binary fallback is used, skip path fallback here */
6613 		if (!(pci_priv->device_id == KIWI_DEVICE_ID &&
6614 		      plat_priv->device_version.major_version == FW_V2_NUMBER)) {
6615 			plat_priv->use_fw_path_with_prefix = false;
6616 			cnss_pci_update_fw_name(pci_priv);
6617 		}
6618 		return;
6619 #endif
6620 
6621 	default:
6622 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
6623 		return;
6624 	}
6625 
6626 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
6627 }
6628 
6629 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
6630 {
6631 	int ret, num_vectors, i;
6632 	u32 user_base_data, base_vector;
6633 	int *irq;
6634 	unsigned int msi_data;
6635 	bool is_one_msi = false;
6636 
6637 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
6638 					   MHI_MSI_NAME, &num_vectors,
6639 					   &user_base_data, &base_vector);
6640 	if (ret)
6641 		return ret;
6642 
6643 	if (cnss_pci_is_one_msi(pci_priv)) {
6644 		is_one_msi = true;
6645 		num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
6646 	}
6647 	cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
6648 		    num_vectors, base_vector);
6649 
6650 	irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
6651 	if (!irq)
6652 		return -ENOMEM;
6653 
6654 	for (i = 0; i < num_vectors; i++) {
6655 		msi_data = base_vector;
6656 		if (!is_one_msi)
6657 			msi_data += i;
6658 		irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
6659 	}
6660 
6661 	pci_priv->mhi_ctrl->irq = irq;
6662 	pci_priv->mhi_ctrl->nr_irqs = num_vectors;
6663 
6664 	return 0;
6665 }
6666 
6667 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
6668 			     struct mhi_link_info *link_info)
6669 {
6670 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6671 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6672 	int ret = 0;
6673 
6674 	cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
6675 		    link_info->target_link_speed,
6676 		    link_info->target_link_width);
6677 
6678 	/* It has to set target link speed here before setting link bandwidth
6679 	 * when device requests link speed change. This can avoid setting link
6680 	 * bandwidth getting rejected if requested link speed is higher than
6681 	 * current one.
6682 	 */
6683 	ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
6684 					  link_info->target_link_speed);
6685 	if (ret)
6686 		cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
6687 			    link_info->target_link_speed, ret);
6688 
6689 	ret = cnss_pci_set_link_bandwidth(pci_priv,
6690 					  link_info->target_link_speed,
6691 					  link_info->target_link_width);
6692 
6693 	if (ret) {
6694 		cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
6695 		return ret;
6696 	}
6697 
6698 	pci_priv->def_link_speed = link_info->target_link_speed;
6699 	pci_priv->def_link_width = link_info->target_link_width;
6700 
6701 	return 0;
6702 }
6703 
6704 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
6705 			     void __iomem *addr, u32 *out)
6706 {
6707 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6708 
6709 	u32 tmp = readl_relaxed(addr);
6710 
6711 	/* Unexpected value, query the link status */
6712 	if (PCI_INVALID_READ(tmp) &&
6713 	    cnss_pci_check_link_status(pci_priv))
6714 		return -EIO;
6715 
6716 	*out = tmp;
6717 
6718 	return 0;
6719 }
6720 
6721 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
6722 			       void __iomem *addr, u32 val)
6723 {
6724 	writel_relaxed(val, addr);
6725 }
6726 
6727 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
6728 				 struct mhi_controller *mhi_ctrl)
6729 {
6730 	int ret = 0;
6731 
6732 	ret = mhi_get_soc_info(mhi_ctrl);
6733 	if (ret)
6734 		goto exit;
6735 
6736 	plat_priv->device_version.family_number = mhi_ctrl->family_number;
6737 	plat_priv->device_version.device_number = mhi_ctrl->device_number;
6738 	plat_priv->device_version.major_version = mhi_ctrl->major_version;
6739 	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
6740 
6741 	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
6742 		    plat_priv->device_version.family_number,
6743 		    plat_priv->device_version.device_number,
6744 		    plat_priv->device_version.major_version,
6745 		    plat_priv->device_version.minor_version);
6746 
6747 	/* Only keep lower 4 bits as real device major version */
6748 	plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
6749 
6750 exit:
6751 	return ret;
6752 }
6753 
6754 static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv)
6755 {
6756 	if (!pci_priv) {
6757 		cnss_pr_dbg("pci_priv is NULL");
6758 		return false;
6759 	}
6760 
6761 	switch (pci_priv->device_id) {
6762 	case PEACH_DEVICE_ID:
6763 		return true;
6764 	default:
6765 		return false;
6766 	}
6767 }
6768 
6769 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
6770 {
6771 	int ret = 0;
6772 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6773 	struct pci_dev *pci_dev = pci_priv->pci_dev;
6774 	struct mhi_controller *mhi_ctrl;
6775 	phys_addr_t bar_start;
6776 	const struct mhi_controller_config *cnss_mhi_config =
6777 						&cnss_mhi_config_default;
6778 
6779 	ret = cnss_qmi_init(plat_priv);
6780 	if (ret)
6781 		return -EINVAL;
6782 
6783 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6784 		return 0;
6785 
6786 	mhi_ctrl = mhi_alloc_controller();
6787 	if (!mhi_ctrl) {
6788 		cnss_pr_err("Invalid MHI controller context\n");
6789 		return -EINVAL;
6790 	}
6791 
6792 	pci_priv->mhi_ctrl = mhi_ctrl;
6793 	mhi_ctrl->cntrl_dev = &pci_dev->dev;
6794 
6795 	mhi_ctrl->fw_image = plat_priv->firmware_name;
6796 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6797 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6798 	mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
6799 #endif
6800 
6801 	mhi_ctrl->regs = pci_priv->bar;
6802 	mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
6803 	bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
6804 	cnss_pr_dbg("BAR starts at %pa, length is %x\n",
6805 		    &bar_start, mhi_ctrl->reg_len);
6806 
6807 	ret = cnss_pci_get_mhi_msi(pci_priv);
6808 	if (ret) {
6809 		cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
6810 		goto free_mhi_ctrl;
6811 	}
6812 
6813 	if (cnss_pci_is_one_msi(pci_priv))
6814 		mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
6815 
6816 	if (pci_priv->smmu_s1_enable) {
6817 		mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
6818 		mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
6819 					pci_priv->smmu_iova_len;
6820 	} else {
6821 		mhi_ctrl->iova_start = 0;
6822 		mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
6823 	}
6824 
6825 	mhi_ctrl->status_cb = cnss_mhi_notify_status;
6826 	mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
6827 	mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
6828 	mhi_ctrl->read_reg = cnss_mhi_read_reg;
6829 	mhi_ctrl->write_reg = cnss_mhi_write_reg;
6830 
6831 	mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
6832 	if (!mhi_ctrl->rddm_size)
6833 		mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
6834 
6835 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
6836 		mhi_ctrl->sbl_size = SZ_256K;
6837 	else
6838 		mhi_ctrl->sbl_size = SZ_512K;
6839 
6840 	mhi_ctrl->seg_len = SZ_512K;
6841 	mhi_ctrl->fbc_download = true;
6842 
6843 	ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
6844 	if (ret)
6845 		goto free_mhi_irq;
6846 
6847 	/* Satellite config only supported on KIWI V2 and later chipset */
6848 	if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
6849 			(plat_priv->device_id == KIWI_DEVICE_ID &&
6850 			 plat_priv->device_version.major_version == 1)) {
6851 		if (plat_priv->device_id == QCN7605_DEVICE_ID)
6852 			cnss_mhi_config = &cnss_mhi_config_genoa;
6853 		else
6854 			cnss_mhi_config = &cnss_mhi_config_no_satellite;
6855 	}
6856 
6857 	mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv);
6858 
6859 	ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
6860 	if (ret) {
6861 		cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
6862 		goto free_mhi_irq;
6863 	}
6864 
6865 	/* MHI satellite driver only needs to connect when DRV is supported */
6866 	if (cnss_pci_get_drv_supported(pci_priv))
6867 		cnss_mhi_controller_set_base(pci_priv, bar_start);
6868 
6869 	cnss_get_bwscal_info(plat_priv);
6870 	cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
6871 
6872 	/* BW scale CB needs to be set after registering MHI per requirement */
6873 	if (!plat_priv->no_bwscale)
6874 		cnss_mhi_controller_set_bw_scale_cb(pci_priv,
6875 						    cnss_mhi_bw_scale);
6876 
6877 	ret = cnss_pci_update_fw_name(pci_priv);
6878 	if (ret)
6879 		goto unreg_mhi;
6880 
6881 	return 0;
6882 
6883 unreg_mhi:
6884 	mhi_unregister_controller(mhi_ctrl);
6885 free_mhi_irq:
6886 	kfree(mhi_ctrl->irq);
6887 free_mhi_ctrl:
6888 	mhi_free_controller(mhi_ctrl);
6889 
6890 	return ret;
6891 }
6892 
6893 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
6894 {
6895 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
6896 
6897 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6898 		return;
6899 
6900 	mhi_unregister_controller(mhi_ctrl);
6901 	kfree(mhi_ctrl->irq);
6902 	mhi_ctrl->irq = NULL;
6903 	mhi_free_controller(mhi_ctrl);
6904 	pci_priv->mhi_ctrl = NULL;
6905 }
6906 
6907 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
6908 {
6909 	switch (pci_priv->device_id) {
6910 	case QCA6390_DEVICE_ID:
6911 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
6912 		pci_priv->wcss_reg = wcss_reg_access_seq;
6913 		pci_priv->pcie_reg = pcie_reg_access_seq;
6914 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6915 		pci_priv->syspm_reg = syspm_reg_access_seq;
6916 
6917 		/* Configure WDOG register with specific value so that we can
6918 		 * know if HW is in the process of WDOG reset recovery or not
6919 		 * when reading the registers.
6920 		 */
6921 		cnss_pci_reg_write
6922 		(pci_priv,
6923 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
6924 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
6925 		break;
6926 	case QCA6490_DEVICE_ID:
6927 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
6928 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6929 		break;
6930 	default:
6931 		return;
6932 	}
6933 }
6934 
6935 #if !IS_ENABLED(CONFIG_ARCH_QCOM)
6936 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
6937 {
6938 	return 0;
6939 }
6940 
6941 static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
6942 {
6943 	struct cnss_pci_data *pci_priv = data;
6944 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6945 	enum rpm_status status;
6946 	struct device *dev;
6947 
6948 	pci_priv->wake_counter++;
6949 	cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
6950 		    pci_priv->wake_irq, pci_priv->wake_counter);
6951 
6952 	/* Make sure abort current suspend */
6953 	cnss_pm_stay_awake(plat_priv);
6954 	cnss_pm_relax(plat_priv);
6955 	/* Above two pm* API calls will abort system suspend only when
6956 	 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
6957 	 * calling pm_system_wakeup() is just to guarantee system suspend
6958 	 * can be aborted if it is not initiated in any case.
6959 	 */
6960 	pm_system_wakeup();
6961 
6962 	dev = &pci_priv->pci_dev->dev;
6963 	status = dev->power.runtime_status;
6964 
6965 	if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
6966 	     cnss_pci_get_auto_suspended(pci_priv)) ||
6967 	    (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
6968 		cnss_pci_set_monitor_wake_intr(pci_priv, false);
6969 		cnss_pci_pm_request_resume(pci_priv);
6970 	}
6971 
6972 	return IRQ_HANDLED;
6973 }
6974 
6975 /**
6976  * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
6977  * @pci_priv: driver PCI bus context pointer
6978  *
6979  * This function initializes WLAN PCI wake GPIO and corresponding
6980  * interrupt. It should be used in non-MSM platforms whose PCIe
6981  * root complex driver doesn't handle the GPIO.
6982  *
6983  * Return: 0 for success or skip, negative value for error
6984  */
6985 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
6986 {
6987 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6988 	struct device *dev = &plat_priv->plat_dev->dev;
6989 	int ret = 0;
6990 
6991 	pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
6992 						"wlan-pci-wake-gpio", 0);
6993 	if (pci_priv->wake_gpio < 0)
6994 		goto out;
6995 
6996 	cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
6997 		    pci_priv->wake_gpio);
6998 
6999 	ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
7000 	if (ret) {
7001 		cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
7002 			    ret);
7003 		goto out;
7004 	}
7005 
7006 	gpio_direction_input(pci_priv->wake_gpio);
7007 	pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
7008 
7009 	ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
7010 			  IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
7011 	if (ret) {
7012 		cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
7013 		goto free_gpio;
7014 	}
7015 
7016 	ret = enable_irq_wake(pci_priv->wake_irq);
7017 	if (ret) {
7018 		cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
7019 		goto free_irq;
7020 	}
7021 
7022 	return 0;
7023 
7024 free_irq:
7025 	free_irq(pci_priv->wake_irq, pci_priv);
7026 free_gpio:
7027 	gpio_free(pci_priv->wake_gpio);
7028 out:
7029 	return ret;
7030 }
7031 
7032 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
7033 {
7034 	if (pci_priv->wake_gpio < 0)
7035 		return;
7036 
7037 	disable_irq_wake(pci_priv->wake_irq);
7038 	free_irq(pci_priv->wake_irq, pci_priv);
7039 	gpio_free(pci_priv->wake_gpio);
7040 }
7041 #endif
7042 
7043 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
7044 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
7045 {
7046 	int ret = 0;
7047 
7048 	/* in the dual wlan card case, if call pci_register_driver after
7049 	 * finishing the first pcie device enumeration, it will cause
7050 	 * the cnss_pci_probe called in advance with the second wlan card,
7051 	 * and the sequence like this:
7052 	 * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe
7053 	 * -> exit msm_pcie_enumerate.
7054 	 * But the correct sequence we expected is like this:
7055 	 * enter msm_pcie_enumerate -> pci_bus_add_devices  ->
7056 	 * exit msm_pcie_enumerate -> cnss_pci_probe.
7057 	 * And this unexpected sequence will make the second wlan card do
7058 	 * pcie link suspend while the pcie enumeration not finished.
7059 	 * So need to add below logical to avoid doing pcie link suspend
7060 	 * if the enumeration has not finish.
7061 	 */
7062 	plat_priv->enumerate_done = true;
7063 
7064 	/* Now enumeration is finished, try to suspend PCIe link */
7065 	if (plat_priv->bus_priv) {
7066 		struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
7067 		struct pci_dev *pci_dev = pci_priv->pci_dev;
7068 
7069 		switch (pci_dev->device) {
7070 		case QCA6390_DEVICE_ID:
7071 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv,
7072 						    false,
7073 						    true,
7074 						    false);
7075 
7076 			cnss_pci_suspend_pwroff(pci_dev);
7077 			break;
7078 		default:
7079 			cnss_pr_err("Unknown PCI device found: 0x%x\n",
7080 				    pci_dev->device);
7081 			ret = -ENODEV;
7082 		}
7083 	}
7084 
7085 	return ret;
7086 }
7087 #else
7088 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
7089 {
7090 	return 0;
7091 }
7092 #endif
7093 
7094 /* Setting to use this cnss_pm_domain ops will let PM framework override the
7095  * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
7096  * has to take care everything device driver needed which is currently done
7097  * from pci_dev_pm_ops.
7098  */
7099 static struct dev_pm_domain cnss_pm_domain = {
7100 	.ops = {
7101 		SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7102 		SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7103 					      cnss_pci_resume_noirq)
7104 		SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
7105 				   cnss_pci_runtime_resume,
7106 				   cnss_pci_runtime_idle)
7107 	}
7108 };
7109 
7110 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
7111 {
7112 	struct device_node *child;
7113 	u32 id, i;
7114 	int id_n, ret;
7115 
7116 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
7117 		return 0;
7118 
7119 	if (!plat_priv->device_id) {
7120 		cnss_pr_err("Invalid device id\n");
7121 		return -EINVAL;
7122 	}
7123 
7124 	for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
7125 					 child) {
7126 		if (strcmp(child->name, "chip_cfg"))
7127 			continue;
7128 
7129 		id_n = of_property_count_u32_elems(child, "supported-ids");
7130 		if (id_n <= 0) {
7131 			cnss_pr_err("Device id is NOT set\n");
7132 			return -EINVAL;
7133 		}
7134 
7135 		for (i = 0; i < id_n; i++) {
7136 			ret = of_property_read_u32_index(child,
7137 							 "supported-ids",
7138 							 i, &id);
7139 			if (ret) {
7140 				cnss_pr_err("Failed to read supported ids\n");
7141 				return -EINVAL;
7142 			}
7143 
7144 			if (id == plat_priv->device_id) {
7145 				plat_priv->dev_node = child;
7146 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
7147 					    child->name, i, id);
7148 				return 0;
7149 			}
7150 		}
7151 	}
7152 
7153 	return -EINVAL;
7154 }
7155 
7156 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
7157 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7158 {
7159 	bool suspend_pwroff;
7160 
7161 	switch (pci_dev->device) {
7162 	case QCA6390_DEVICE_ID:
7163 	case QCA6490_DEVICE_ID:
7164 		suspend_pwroff = false;
7165 		break;
7166 	default:
7167 		suspend_pwroff = true;
7168 	}
7169 
7170 	return suspend_pwroff;
7171 }
7172 #else
7173 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7174 {
7175 	return true;
7176 }
7177 #endif
7178 
7179 static int cnss_pci_set_gen2_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7180 {
7181 	int ret;
7182 
7183 	/* Always set initial target PCIe link speed to Gen2 for QCA6490 device
7184 	 * since there may be link issues if it boots up with Gen3 link speed.
7185 	 * Device is able to change it later at any time. It will be rejected
7186 	 * if requested speed is higher than the one specified in PCIe DT.
7187 	 */
7188 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7189 					  PCI_EXP_LNKSTA_CLS_5_0GB);
7190 	if (ret && ret != -EPROBE_DEFER)
7191 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
7192 				rc_num, ret);
7193 
7194 	return ret;
7195 }
7196 
7197 #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED
7198 static void
7199 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7200 {
7201 	int ret;
7202 
7203 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7204 					  PCI_EXP_LNKSTA_CLS_2_5GB);
7205 	if (ret)
7206 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n",
7207 			     rc_num, ret);
7208 }
7209 
7210 static void
7211 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7212 {
7213 	int ret;
7214 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7215 
7216 	/* if not Genoa, do not restore rc speed */
7217 	if (pci_priv->device_id == QCA6490_DEVICE_ID) {
7218 		cnss_pci_set_gen2_speed(plat_priv, plat_priv->rc_num);
7219 	} else if (pci_priv->device_id != QCN7605_DEVICE_ID) {
7220 		/* The request 0 will reset maximum GEN speed to default */
7221 		ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 0);
7222 		if (ret)
7223 			cnss_pr_err("Failed to reset max PCIe RC%x link speed to default, err = %d\n",
7224 				     plat_priv->rc_num, ret);
7225 	}
7226 }
7227 
7228 static void
7229 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7230 {
7231 	int ret;
7232 
7233 	/* suspend/resume will trigger retain to re-establish link speed */
7234 	ret = cnss_suspend_pci_link(pci_priv);
7235 	if (ret)
7236 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
7237 
7238 	ret = cnss_resume_pci_link(pci_priv);
7239 	if (ret)
7240 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
7241 
7242 	cnss_pci_get_link_status(pci_priv);
7243 }
7244 #else
7245 static void
7246 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7247 {
7248 }
7249 
7250 static void
7251 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7252 {
7253 }
7254 
7255 static void
7256 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7257 {
7258 }
7259 #endif
7260 
7261 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
7262 {
7263 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7264 	int rc_num = pci_dev->bus->domain_nr;
7265 	struct cnss_plat_data *plat_priv;
7266 	int ret = 0;
7267 	bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
7268 
7269 	plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7270 
7271 	if (suspend_pwroff) {
7272 		ret = cnss_suspend_pci_link(pci_priv);
7273 		if (ret)
7274 			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
7275 				    ret);
7276 		cnss_power_off_device(plat_priv);
7277 	} else {
7278 		cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
7279 			    pci_dev->device);
7280 		cnss_pci_link_retrain_trigger(pci_priv);
7281 	}
7282 }
7283 
7284 static int cnss_pci_probe(struct pci_dev *pci_dev,
7285 			  const struct pci_device_id *id)
7286 {
7287 	int ret = 0;
7288 	struct cnss_pci_data *pci_priv;
7289 	struct device *dev = &pci_dev->dev;
7290 	int rc_num = pci_dev->bus->domain_nr;
7291 	struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7292 
7293 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n",
7294 		    id->vendor, pci_dev->device, rc_num);
7295 	if (!plat_priv) {
7296 		cnss_pr_err("Find match plat_priv with rc number failure\n");
7297 		ret = -ENODEV;
7298 		goto out;
7299 	}
7300 
7301 	pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
7302 	if (!pci_priv) {
7303 		ret = -ENOMEM;
7304 		goto out;
7305 	}
7306 
7307 	pci_priv->pci_link_state = PCI_LINK_UP;
7308 	pci_priv->plat_priv = plat_priv;
7309 	pci_priv->pci_dev = pci_dev;
7310 	pci_priv->pci_device_id = id;
7311 	pci_priv->device_id = pci_dev->device;
7312 	cnss_set_pci_priv(pci_dev, pci_priv);
7313 	plat_priv->device_id = pci_dev->device;
7314 	plat_priv->bus_priv = pci_priv;
7315 	mutex_init(&pci_priv->bus_lock);
7316 	if (plat_priv->use_pm_domain)
7317 		dev->pm_domain = &cnss_pm_domain;
7318 
7319 	cnss_pci_restore_rc_speed(pci_priv);
7320 
7321 	ret = cnss_pci_get_dev_cfg_node(plat_priv);
7322 	if (ret) {
7323 		cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
7324 		goto reset_ctx;
7325 	}
7326 
7327 	cnss_get_sleep_clk_supported(plat_priv);
7328 
7329 	ret = cnss_dev_specific_power_on(plat_priv);
7330 	if (ret < 0)
7331 		goto reset_ctx;
7332 
7333 	cnss_pci_of_reserved_mem_device_init(pci_priv);
7334 
7335 	ret = cnss_register_subsys(plat_priv);
7336 	if (ret)
7337 		goto reset_ctx;
7338 
7339 	ret = cnss_register_ramdump(plat_priv);
7340 	if (ret)
7341 		goto unregister_subsys;
7342 
7343 	ret = cnss_pci_init_smmu(pci_priv);
7344 	if (ret)
7345 		goto unregister_ramdump;
7346 
7347 	/* update drv support flag */
7348 	cnss_pci_update_drv_supported(pci_priv);
7349 
7350 	cnss_update_supported_link_info(pci_priv);
7351 
7352 	init_completion(&pci_priv->wake_event_complete);
7353 
7354 	ret = cnss_reg_pci_event(pci_priv);
7355 	if (ret) {
7356 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
7357 		goto deinit_smmu;
7358 	}
7359 
7360 	ret = cnss_pci_enable_bus(pci_priv);
7361 	if (ret)
7362 		goto dereg_pci_event;
7363 
7364 	ret = cnss_pci_enable_msi(pci_priv);
7365 	if (ret)
7366 		goto disable_bus;
7367 
7368 	ret = cnss_pci_register_mhi(pci_priv);
7369 	if (ret)
7370 		goto disable_msi;
7371 
7372 	switch (pci_dev->device) {
7373 	case QCA6174_DEVICE_ID:
7374 		pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
7375 				     &pci_priv->revision_id);
7376 		break;
7377 	case QCA6290_DEVICE_ID:
7378 	case QCA6390_DEVICE_ID:
7379 	case QCN7605_DEVICE_ID:
7380 	case QCA6490_DEVICE_ID:
7381 	case KIWI_DEVICE_ID:
7382 	case MANGO_DEVICE_ID:
7383 	case PEACH_DEVICE_ID:
7384 		if ((cnss_is_dual_wlan_enabled() &&
7385 		     plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled())
7386 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false,
7387 						    false);
7388 
7389 		timer_setup(&pci_priv->dev_rddm_timer,
7390 			    cnss_dev_rddm_timeout_hdlr, 0);
7391 		timer_setup(&pci_priv->boot_debug_timer,
7392 			    cnss_boot_debug_timeout_hdlr, 0);
7393 		INIT_DELAYED_WORK(&pci_priv->time_sync_work,
7394 				  cnss_pci_time_sync_work_hdlr);
7395 		cnss_pci_get_link_status(pci_priv);
7396 		cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
7397 		cnss_pci_wake_gpio_init(pci_priv);
7398 		break;
7399 	default:
7400 		cnss_pr_err("Unknown PCI device found: 0x%x\n",
7401 			    pci_dev->device);
7402 		ret = -ENODEV;
7403 		goto unreg_mhi;
7404 	}
7405 
7406 	cnss_pci_config_regs(pci_priv);
7407 	if (EMULATION_HW)
7408 		goto out;
7409 	if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done)
7410 		goto probe_done;
7411 	cnss_pci_suspend_pwroff(pci_dev);
7412 
7413 probe_done:
7414 	set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7415 
7416 	return 0;
7417 
7418 unreg_mhi:
7419 	cnss_pci_unregister_mhi(pci_priv);
7420 disable_msi:
7421 	cnss_pci_disable_msi(pci_priv);
7422 disable_bus:
7423 	cnss_pci_disable_bus(pci_priv);
7424 dereg_pci_event:
7425 	cnss_dereg_pci_event(pci_priv);
7426 deinit_smmu:
7427 	cnss_pci_deinit_smmu(pci_priv);
7428 unregister_ramdump:
7429 	cnss_unregister_ramdump(plat_priv);
7430 unregister_subsys:
7431 	cnss_unregister_subsys(plat_priv);
7432 reset_ctx:
7433 	plat_priv->bus_priv = NULL;
7434 out:
7435 	return ret;
7436 }
7437 
7438 static void cnss_pci_remove(struct pci_dev *pci_dev)
7439 {
7440 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7441 	struct cnss_plat_data *plat_priv =
7442 		cnss_bus_dev_to_plat_priv(&pci_dev->dev);
7443 
7444 	clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7445 	cnss_pci_unregister_driver_hdlr(pci_priv);
7446 	cnss_pci_free_aux_mem(pci_priv);
7447 	cnss_pci_free_tme_lite_mem(pci_priv);
7448 	cnss_pci_free_m3_mem(pci_priv);
7449 	cnss_pci_free_fw_mem(pci_priv);
7450 	cnss_pci_free_qdss_mem(pci_priv);
7451 
7452 	switch (pci_dev->device) {
7453 	case QCA6290_DEVICE_ID:
7454 	case QCA6390_DEVICE_ID:
7455 	case QCN7605_DEVICE_ID:
7456 	case QCA6490_DEVICE_ID:
7457 	case KIWI_DEVICE_ID:
7458 	case MANGO_DEVICE_ID:
7459 	case PEACH_DEVICE_ID:
7460 		cnss_pci_wake_gpio_deinit(pci_priv);
7461 		del_timer(&pci_priv->boot_debug_timer);
7462 		del_timer(&pci_priv->dev_rddm_timer);
7463 		break;
7464 	default:
7465 		break;
7466 	}
7467 
7468 	cnss_pci_unregister_mhi(pci_priv);
7469 	cnss_pci_disable_msi(pci_priv);
7470 	cnss_pci_disable_bus(pci_priv);
7471 	cnss_dereg_pci_event(pci_priv);
7472 	cnss_pci_deinit_smmu(pci_priv);
7473 	if (plat_priv) {
7474 		cnss_unregister_ramdump(plat_priv);
7475 		cnss_unregister_subsys(plat_priv);
7476 		plat_priv->bus_priv = NULL;
7477 	} else {
7478 		cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
7479 	}
7480 }
7481 
7482 static const struct pci_device_id cnss_pci_id_table[] = {
7483 	{ QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7484 	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7485 	{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7486 	{ QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7487 	{ QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7488 	{ KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7489 	{ MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7490 	{ PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7491 	{ 0 }
7492 };
7493 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
7494 
7495 static const struct dev_pm_ops cnss_pm_ops = {
7496 	SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7497 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7498 				      cnss_pci_resume_noirq)
7499 	SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
7500 			   cnss_pci_runtime_idle)
7501 };
7502 
7503 static struct pci_driver cnss_pci_driver = {
7504 	.name     = "cnss_pci",
7505 	.id_table = cnss_pci_id_table,
7506 	.probe    = cnss_pci_probe,
7507 	.remove   = cnss_pci_remove,
7508 	.driver = {
7509 		.pm = &cnss_pm_ops,
7510 	},
7511 };
7512 
7513 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
7514 {
7515 	int ret, retry = 0;
7516 
7517 	if (plat_priv->device_id == QCA6490_DEVICE_ID) {
7518 		cnss_pci_set_gen2_speed(plat_priv, rc_num);
7519 	} else {
7520 		cnss_pci_downgrade_rc_speed(plat_priv, rc_num);
7521 	}
7522 
7523 	cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
7524 retry:
7525 	ret = _cnss_pci_enumerate(plat_priv, rc_num);
7526 	if (ret) {
7527 		if (ret == -EPROBE_DEFER) {
7528 			cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
7529 			goto out;
7530 		}
7531 		cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
7532 			    rc_num, ret);
7533 		if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
7534 			cnss_pr_dbg("Retry PCI link training #%d\n", retry);
7535 			goto retry;
7536 		} else {
7537 			goto out;
7538 		}
7539 	}
7540 
7541 	plat_priv->rc_num = rc_num;
7542 
7543 out:
7544 	return ret;
7545 }
7546 
7547 int cnss_pci_init(struct cnss_plat_data *plat_priv)
7548 {
7549 	struct device *dev = &plat_priv->plat_dev->dev;
7550 	const __be32 *prop;
7551 	int ret = 0, prop_len = 0, rc_count, i;
7552 
7553 	prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
7554 	if (!prop || !prop_len) {
7555 		cnss_pr_err("Failed to get PCIe RC number from DT\n");
7556 		goto out;
7557 	}
7558 
7559 	rc_count = prop_len / sizeof(__be32);
7560 	for (i = 0; i < rc_count; i++) {
7561 		ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
7562 		if (!ret)
7563 			break;
7564 		else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
7565 			goto out;
7566 	}
7567 
7568 	ret = cnss_try_suspend(plat_priv);
7569 	if (ret) {
7570 		cnss_pr_err("Failed to suspend, ret: %d\n", ret);
7571 		goto out;
7572 	}
7573 
7574 	if (!cnss_driver_registered) {
7575 		ret = pci_register_driver(&cnss_pci_driver);
7576 		if (ret) {
7577 			cnss_pr_err("Failed to register to PCI framework, err = %d\n",
7578 				    ret);
7579 			goto out;
7580 		}
7581 		if (!plat_priv->bus_priv) {
7582 			cnss_pr_err("Failed to probe PCI driver\n");
7583 			ret = -ENODEV;
7584 			goto unreg_pci;
7585 		}
7586 		cnss_driver_registered = true;
7587 	}
7588 
7589 	return 0;
7590 
7591 unreg_pci:
7592 	pci_unregister_driver(&cnss_pci_driver);
7593 out:
7594 	return ret;
7595 }
7596 
7597 void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
7598 {
7599 	if (cnss_driver_registered) {
7600 		pci_unregister_driver(&cnss_pci_driver);
7601 		cnss_driver_registered = false;
7602 	}
7603 }
7604