xref: /wlan-dirver/platform/cnss2/pci.c (revision f0a7661f004dab1dd7b548f9cdea21fc45b07d09)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/completion.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/memblock.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/suspend.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 
20 #include "main.h"
21 #include "bus.h"
22 #include "debug.h"
23 #include "pci.h"
24 #include "pci_platform.h"
25 #include "reg.h"
26 
27 #define PCI_LINK_UP			1
28 #define PCI_LINK_DOWN			0
29 
30 #define SAVE_PCI_CONFIG_SPACE		1
31 #define RESTORE_PCI_CONFIG_SPACE	0
32 
33 #define PCI_BAR_NUM			0
34 #define PCI_INVALID_READ(val)		((val) == U32_MAX)
35 
36 #define PCI_DMA_MASK_32_BIT		DMA_BIT_MASK(32)
37 #define PCI_DMA_MASK_36_BIT		DMA_BIT_MASK(36)
38 #define PCI_DMA_MASK_64_BIT		DMA_BIT_MASK(64)
39 
40 #define MHI_NODE_NAME			"qcom,mhi"
41 #define MHI_MSI_NAME			"MHI"
42 
43 #define QCA6390_PATH_PREFIX		"qca6390/"
44 #define QCA6490_PATH_PREFIX		"qca6490/"
45 #define QCN7605_PATH_PREFIX             "qcn7605/"
46 #define KIWI_PATH_PREFIX		"kiwi/"
47 #define MANGO_PATH_PREFIX		"mango/"
48 #define PEACH_PATH_PREFIX		"peach/"
49 #define DEFAULT_PHY_M3_FILE_NAME	"m3.bin"
50 #define DEFAULT_AUX_FILE_NAME		"aux_ucode.elf"
51 #define DEFAULT_PHY_UCODE_FILE_NAME	"phy_ucode.elf"
52 #define TME_PATCH_FILE_NAME		"tmel_patch.elf"
53 #define PHY_UCODE_V2_FILE_NAME		"phy_ucode20.elf"
54 #define DEFAULT_FW_FILE_NAME		"amss.bin"
55 #define FW_V2_FILE_NAME			"amss20.bin"
56 #define FW_V2_FTM_FILE_NAME		"amss20_ftm.bin"
57 #define DEVICE_MAJOR_VERSION_MASK	0xF
58 
59 #define WAKE_MSI_NAME			"WAKE"
60 
61 #define DEV_RDDM_TIMEOUT		5000
62 #define WAKE_EVENT_TIMEOUT		5000
63 
64 #ifdef CONFIG_CNSS_EMULATION
65 #define EMULATION_HW			1
66 #else
67 #define EMULATION_HW			0
68 #endif
69 
70 #define RAMDUMP_SIZE_DEFAULT		0x420000
71 #define CNSS_256KB_SIZE			0x40000
72 #define DEVICE_RDDM_COOKIE		0xCAFECACE
73 
74 static bool cnss_driver_registered;
75 
76 static DEFINE_SPINLOCK(pci_link_down_lock);
77 static DEFINE_SPINLOCK(pci_reg_window_lock);
78 static DEFINE_SPINLOCK(time_sync_lock);
79 
80 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
81 #define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
82 
83 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US	1000
84 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US	2000
85 
86 #define FORCE_WAKE_DELAY_MIN_US			4000
87 #define FORCE_WAKE_DELAY_MAX_US			6000
88 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
89 
90 #define REG_RETRY_MAX_TIMES		3
91 
92 #define MHI_SUSPEND_RETRY_MAX_TIMES		3
93 #define MHI_SUSPEND_RETRY_DELAY_US		5000
94 
95 #define BOOT_DEBUG_TIMEOUT_MS			7000
96 
97 #define HANG_DATA_LENGTH		384
98 #define HST_HANG_DATA_OFFSET		((3 * 1024 * 1024) - HANG_DATA_LENGTH)
99 #define HSP_HANG_DATA_OFFSET		((2 * 1024 * 1024) - HANG_DATA_LENGTH)
100 
101 #define AFC_SLOT_SIZE                   0x1000
102 #define AFC_MAX_SLOT                    2
103 #define AFC_MEM_SIZE                    (AFC_SLOT_SIZE * AFC_MAX_SLOT)
104 #define AFC_AUTH_STATUS_OFFSET          1
105 #define AFC_AUTH_SUCCESS                1
106 #define AFC_AUTH_ERROR                  0
107 
108 static const struct mhi_channel_config cnss_mhi_channels[] = {
109 	{
110 		.num = 0,
111 		.name = "LOOPBACK",
112 		.num_elements = 32,
113 		.event_ring = 1,
114 		.dir = DMA_TO_DEVICE,
115 		.ee_mask = 0x4,
116 		.pollcfg = 0,
117 		.doorbell = MHI_DB_BRST_DISABLE,
118 		.lpm_notify = false,
119 		.offload_channel = false,
120 		.doorbell_mode_switch = false,
121 		.auto_queue = false,
122 	},
123 	{
124 		.num = 1,
125 		.name = "LOOPBACK",
126 		.num_elements = 32,
127 		.event_ring = 1,
128 		.dir = DMA_FROM_DEVICE,
129 		.ee_mask = 0x4,
130 		.pollcfg = 0,
131 		.doorbell = MHI_DB_BRST_DISABLE,
132 		.lpm_notify = false,
133 		.offload_channel = false,
134 		.doorbell_mode_switch = false,
135 		.auto_queue = false,
136 	},
137 	{
138 		.num = 4,
139 		.name = "DIAG",
140 		.num_elements = 64,
141 		.event_ring = 1,
142 		.dir = DMA_TO_DEVICE,
143 		.ee_mask = 0x4,
144 		.pollcfg = 0,
145 		.doorbell = MHI_DB_BRST_DISABLE,
146 		.lpm_notify = false,
147 		.offload_channel = false,
148 		.doorbell_mode_switch = false,
149 		.auto_queue = false,
150 	},
151 	{
152 		.num = 5,
153 		.name = "DIAG",
154 		.num_elements = 64,
155 		.event_ring = 1,
156 		.dir = DMA_FROM_DEVICE,
157 		.ee_mask = 0x4,
158 		.pollcfg = 0,
159 		.doorbell = MHI_DB_BRST_DISABLE,
160 		.lpm_notify = false,
161 		.offload_channel = false,
162 		.doorbell_mode_switch = false,
163 		.auto_queue = false,
164 	},
165 	{
166 		.num = 20,
167 		.name = "IPCR",
168 		.num_elements = 64,
169 		.event_ring = 1,
170 		.dir = DMA_TO_DEVICE,
171 		.ee_mask = 0x4,
172 		.pollcfg = 0,
173 		.doorbell = MHI_DB_BRST_DISABLE,
174 		.lpm_notify = false,
175 		.offload_channel = false,
176 		.doorbell_mode_switch = false,
177 		.auto_queue = false,
178 	},
179 	{
180 		.num = 21,
181 		.name = "IPCR",
182 		.num_elements = 64,
183 		.event_ring = 1,
184 		.dir = DMA_FROM_DEVICE,
185 		.ee_mask = 0x4,
186 		.pollcfg = 0,
187 		.doorbell = MHI_DB_BRST_DISABLE,
188 		.lpm_notify = false,
189 		.offload_channel = false,
190 		.doorbell_mode_switch = false,
191 		.auto_queue = true,
192 	},
193 /* All MHI satellite config to be at the end of data struct */
194 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
195 	{
196 		.num = 50,
197 		.name = "ADSP_0",
198 		.num_elements = 64,
199 		.event_ring = 3,
200 		.dir = DMA_BIDIRECTIONAL,
201 		.ee_mask = 0x4,
202 		.pollcfg = 0,
203 		.doorbell = MHI_DB_BRST_DISABLE,
204 		.lpm_notify = false,
205 		.offload_channel = true,
206 		.doorbell_mode_switch = false,
207 		.auto_queue = false,
208 	},
209 	{
210 		.num = 51,
211 		.name = "ADSP_1",
212 		.num_elements = 64,
213 		.event_ring = 3,
214 		.dir = DMA_BIDIRECTIONAL,
215 		.ee_mask = 0x4,
216 		.pollcfg = 0,
217 		.doorbell = MHI_DB_BRST_DISABLE,
218 		.lpm_notify = false,
219 		.offload_channel = true,
220 		.doorbell_mode_switch = false,
221 		.auto_queue = false,
222 	},
223 	{
224 		.num = 70,
225 		.name = "ADSP_2",
226 		.num_elements = 64,
227 		.event_ring = 3,
228 		.dir = DMA_BIDIRECTIONAL,
229 		.ee_mask = 0x4,
230 		.pollcfg = 0,
231 		.doorbell = MHI_DB_BRST_DISABLE,
232 		.lpm_notify = false,
233 		.offload_channel = true,
234 		.doorbell_mode_switch = false,
235 		.auto_queue = false,
236 	},
237 	{
238 		.num = 71,
239 		.name = "ADSP_3",
240 		.num_elements = 64,
241 		.event_ring = 3,
242 		.dir = DMA_BIDIRECTIONAL,
243 		.ee_mask = 0x4,
244 		.pollcfg = 0,
245 		.doorbell = MHI_DB_BRST_DISABLE,
246 		.lpm_notify = false,
247 		.offload_channel = true,
248 		.doorbell_mode_switch = false,
249 		.auto_queue = false,
250 	},
251 #endif
252 };
253 
254 static const struct mhi_channel_config cnss_mhi_channels_genoa[] = {
255 	{
256 		.num = 0,
257 		.name = "LOOPBACK",
258 		.num_elements = 32,
259 		.event_ring = 1,
260 		.dir = DMA_TO_DEVICE,
261 		.ee_mask = 0x4,
262 		.pollcfg = 0,
263 		.doorbell = MHI_DB_BRST_DISABLE,
264 		.lpm_notify = false,
265 		.offload_channel = false,
266 		.doorbell_mode_switch = false,
267 		.auto_queue = false,
268 	},
269 	{
270 		.num = 1,
271 		.name = "LOOPBACK",
272 		.num_elements = 32,
273 		.event_ring = 1,
274 		.dir = DMA_FROM_DEVICE,
275 		.ee_mask = 0x4,
276 		.pollcfg = 0,
277 		.doorbell = MHI_DB_BRST_DISABLE,
278 		.lpm_notify = false,
279 		.offload_channel = false,
280 		.doorbell_mode_switch = false,
281 		.auto_queue = false,
282 	},
283 	{
284 		.num = 4,
285 		.name = "DIAG",
286 		.num_elements = 64,
287 		.event_ring = 1,
288 		.dir = DMA_TO_DEVICE,
289 		.ee_mask = 0x4,
290 		.pollcfg = 0,
291 		.doorbell = MHI_DB_BRST_DISABLE,
292 		.lpm_notify = false,
293 		.offload_channel = false,
294 		.doorbell_mode_switch = false,
295 		.auto_queue = false,
296 	},
297 	{
298 		.num = 5,
299 		.name = "DIAG",
300 		.num_elements = 64,
301 		.event_ring = 1,
302 		.dir = DMA_FROM_DEVICE,
303 		.ee_mask = 0x4,
304 		.pollcfg = 0,
305 		.doorbell = MHI_DB_BRST_DISABLE,
306 		.lpm_notify = false,
307 		.offload_channel = false,
308 		.doorbell_mode_switch = false,
309 		.auto_queue = false,
310 	},
311 	{
312 		.num = 16,
313 		.name = "IPCR",
314 		.num_elements = 64,
315 		.event_ring = 1,
316 		.dir = DMA_TO_DEVICE,
317 		.ee_mask = 0x4,
318 		.pollcfg = 0,
319 		.doorbell = MHI_DB_BRST_DISABLE,
320 		.lpm_notify = false,
321 		.offload_channel = false,
322 		.doorbell_mode_switch = false,
323 		.auto_queue = false,
324 	},
325 	{
326 		.num = 17,
327 		.name = "IPCR",
328 		.num_elements = 64,
329 		.event_ring = 1,
330 		.dir = DMA_FROM_DEVICE,
331 		.ee_mask = 0x4,
332 		.pollcfg = 0,
333 		.doorbell = MHI_DB_BRST_DISABLE,
334 		.lpm_notify = false,
335 		.offload_channel = false,
336 		.doorbell_mode_switch = false,
337 		.auto_queue = true,
338 	},
339 };
340 
341 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
342 static struct mhi_event_config cnss_mhi_events[] = {
343 #else
344 static const struct mhi_event_config cnss_mhi_events[] = {
345 #endif
346 	{
347 		.num_elements = 32,
348 		.irq_moderation_ms = 0,
349 		.irq = 1,
350 		.mode = MHI_DB_BRST_DISABLE,
351 		.data_type = MHI_ER_CTRL,
352 		.priority = 0,
353 		.hardware_event = false,
354 		.client_managed = false,
355 		.offload_channel = false,
356 	},
357 	{
358 		.num_elements = 256,
359 		.irq_moderation_ms = 0,
360 		.irq = 2,
361 		.mode = MHI_DB_BRST_DISABLE,
362 		.priority = 1,
363 		.hardware_event = false,
364 		.client_managed = false,
365 		.offload_channel = false,
366 	},
367 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
368 	{
369 		.num_elements = 32,
370 		.irq_moderation_ms = 0,
371 		.irq = 1,
372 		.mode = MHI_DB_BRST_DISABLE,
373 		.data_type = MHI_ER_BW_SCALE,
374 		.priority = 2,
375 		.hardware_event = false,
376 		.client_managed = false,
377 		.offload_channel = false,
378 	},
379 #endif
380 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
381 	{
382 		.num_elements = 256,
383 		.irq_moderation_ms = 0,
384 		.irq = 2,
385 		.mode = MHI_DB_BRST_DISABLE,
386 		.data_type = MHI_ER_DATA,
387 		.priority = 1,
388 		.hardware_event = false,
389 		.client_managed = true,
390 		.offload_channel = true,
391 	},
392 #endif
393 };
394 
395 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
396 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
397 #define CNSS_MHI_SATELLITE_EVT_COUNT 1
398 #else
399 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
400 #define CNSS_MHI_SATELLITE_EVT_COUNT 0
401 #endif
402 
403 static const struct mhi_controller_config cnss_mhi_config_default = {
404 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
405 	.max_channels = 72,
406 #else
407 	.max_channels = 32,
408 #endif
409 	.timeout_ms = 10000,
410 	.use_bounce_buf = false,
411 	.buf_len = 0x8000,
412 	.num_channels = ARRAY_SIZE(cnss_mhi_channels),
413 	.ch_cfg = cnss_mhi_channels,
414 	.num_events = ARRAY_SIZE(cnss_mhi_events),
415 	.event_cfg = cnss_mhi_events,
416 	.m2_no_db = true,
417 };
418 
419 static const struct mhi_controller_config cnss_mhi_config_genoa = {
420 	.max_channels = 32,
421 	.timeout_ms = 10000,
422 	.use_bounce_buf = false,
423 	.buf_len = 0x8000,
424 	.num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa),
425 	.ch_cfg = cnss_mhi_channels_genoa,
426 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
427 		CNSS_MHI_SATELLITE_EVT_COUNT,
428 	.event_cfg = cnss_mhi_events,
429 	.m2_no_db = true,
430 	.bhie_offset = 0x0324,
431 };
432 
433 static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
434 	.max_channels = 32,
435 	.timeout_ms = 10000,
436 	.use_bounce_buf = false,
437 	.buf_len = 0x8000,
438 	.num_channels = ARRAY_SIZE(cnss_mhi_channels) -
439 			CNSS_MHI_SATELLITE_CH_CFG_COUNT,
440 	.ch_cfg = cnss_mhi_channels,
441 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
442 			CNSS_MHI_SATELLITE_EVT_COUNT,
443 	.event_cfg = cnss_mhi_events,
444 	.m2_no_db = true,
445 };
446 
447 static struct cnss_pci_reg ce_src[] = {
448 	{ "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
449 	{ "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
450 	{ "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
451 	{ "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
452 	{ "SRC_CTRL", CE_SRC_CTRL_OFFSET },
453 	{ "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
454 	{ "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
455 	{ "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
456 	{ NULL },
457 };
458 
459 static struct cnss_pci_reg ce_dst[] = {
460 	{ "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
461 	{ "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
462 	{ "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
463 	{ "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
464 	{ "DEST_CTRL", CE_DEST_CTRL_OFFSET },
465 	{ "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
466 	{ "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
467 	{ "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
468 	{ "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
469 	{ "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
470 	{ "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
471 	{ "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
472 	{ "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
473 	{ "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
474 	{ "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
475 	{ NULL },
476 };
477 
478 static struct cnss_pci_reg ce_cmn[] = {
479 	{ "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
480 	{ "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
481 	{ "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
482 	{ "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
483 	{ "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
484 	{ NULL },
485 };
486 
487 static struct cnss_pci_reg qdss_csr[] = {
488 	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
489 	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
490 	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
491 	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
492 	{ NULL },
493 };
494 
495 static struct cnss_pci_reg pci_scratch[] = {
496 	{ "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
497 	{ "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
498 	{ "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
499 	{ NULL },
500 };
501 
502 /* First field of the structure is the device bit mask. Use
503  * enum cnss_pci_reg_mask as reference for the value.
504  */
505 static struct cnss_misc_reg wcss_reg_access_seq[] = {
506 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
507 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
508 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
509 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
510 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
511 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
512 	{1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
513 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
514 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
515 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
516 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
517 	{1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
518 	{1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
519 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
520 	{1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
521 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
522 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
523 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
524 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
525 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
526 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
527 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
528 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
529 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
530 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
531 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
532 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
533 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
534 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
535 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
536 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
537 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
538 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
539 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
540 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
541 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
542 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
543 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
544 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
545 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
546 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
547 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
548 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
549 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
550 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
551 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
552 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
553 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
554 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
555 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
556 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
557 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
558 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
559 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
560 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
561 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
562 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
563 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
564 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
565 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
566 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
567 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
568 	{1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
569 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
570 };
571 
572 static struct cnss_misc_reg pcie_reg_access_seq[] = {
573 	{1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
574 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
575 	{1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
576 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
577 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
578 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
579 	{1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
580 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
581 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
582 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
583 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
584 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
585 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
586 	{1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
587 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
588 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
589 	{1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
590 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
591 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
592 	{1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
593 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
594 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
595 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
596 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
597 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
598 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
599 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
600 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
601 	{1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
602 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
603 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
604 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
605 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
606 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
607 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
608 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
609 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
610 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
611 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
612 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
613 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
614 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
615 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
616 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
617 	{1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
618 };
619 
620 static struct cnss_misc_reg wlaon_reg_access_seq[] = {
621 	{3, 0, WLAON_SOC_POWER_CTRL, 0},
622 	{3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
623 	{3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
624 	{3, 0, WLAON_SW_COLD_RESET, 0},
625 	{3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
626 	{3, 0, WLAON_GDSC_DELAY_SETTING, 0},
627 	{3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
628 	{3, 0, WLAON_WL_PWR_STATUS_REG, 0},
629 	{3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
630 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
631 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
632 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
633 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
634 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
635 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
636 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
637 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
638 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
639 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
640 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
641 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
642 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
643 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
644 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
645 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
646 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
647 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
648 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
649 	{2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
650 	{2, 0, WLAON_WL_AON_CXPC_REG, 0},
651 	{2, 0, WLAON_WL_AON_APM_STATUS0, 0},
652 	{2, 0, WLAON_WL_AON_APM_STATUS1, 0},
653 	{2, 0, WLAON_WL_AON_APM_STATUS2, 0},
654 	{2, 0, WLAON_WL_AON_APM_STATUS3, 0},
655 	{2, 0, WLAON_WL_AON_APM_STATUS4, 0},
656 	{2, 0, WLAON_WL_AON_APM_STATUS5, 0},
657 	{2, 0, WLAON_WL_AON_APM_STATUS6, 0},
658 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
659 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
660 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
661 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
662 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
663 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
664 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
665 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
666 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
667 	{3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
668 	{3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
669 	{3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
670 	{3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
671 	{3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
672 	{3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
673 	{3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
674 	{3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
675 	{3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
676 	{3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
677 	{3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
678 	{3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
679 	{3, 0, WLAON_QDSS_WCSS_REG, 0},
680 	{3, 0, WLAON_QDSS_WCSS_ACK, 0},
681 	{3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
682 	{3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
683 	{3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
684 	{3, 0, WLAON_DLY_CONFIG, 0},
685 	{3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
686 	{3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
687 	{3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
688 	{3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
689 	{3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
690 	{3, 0, WLAON_Q6_COOKIE_BIT, 0},
691 	{3, 0, WLAON_WARM_SW_ENTRY, 0},
692 	{3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
693 	{3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
694 	{3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
695 	{3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
696 	{3, 0, WLAON_DEBUG, 0},
697 	{3, 0, WLAON_SOC_PARAMETERS, 0},
698 	{3, 0, WLAON_WLPM_SIGNAL, 0},
699 	{3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
700 	{3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
701 	{3, 0, WLAON_PBL_STACK_CANARY, 0},
702 	{3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
703 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
704 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
705 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
706 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
707 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
708 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
709 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
710 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
711 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
712 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
713 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
714 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
715 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
716 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
717 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
718 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
719 	{3, 0, WLAON_MEM_CNT_SEL_REG, 0},
720 	{3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
721 	{3, 0, WLAON_MEM_DEBUG_REG, 0},
722 	{3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
723 	{3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
724 	{3, 0, WLAON_WL_AON_SPARE2, 0},
725 	{3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
726 	{3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
727 	{3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
728 	{3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
729 	{3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
730 	{3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
731 	{3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
732 	{3, 0, WLAON_POWERCTRL_PMU_REG, 0},
733 	{3, 0, WLAON_POWERCTRL_MEM_REG, 0},
734 	{3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
735 	{3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
736 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
737 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
738 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
739 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
740 	{3, 0, WLAON_MEM_SVS_CFG_REG, 0},
741 	{3, 0, WLAON_CMN_AON_MISC_REG, 0},
742 	{3, 0, WLAON_INTR_STATUS, 0},
743 	{2, 0, WLAON_INTR_ENABLE, 0},
744 	{2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
745 	{2, 0, WLAON_NOC_DBG_BUS_REG, 0},
746 	{2, 0, WLAON_WL_CTRL_MISC_REG, 0},
747 	{2, 0, WLAON_DBG_STATUS0, 0},
748 	{2, 0, WLAON_DBG_STATUS1, 0},
749 	{2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
750 	{2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
751 	{2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
752 };
753 
754 static struct cnss_misc_reg syspm_reg_access_seq[] = {
755 	{1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
756 	{1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
757 	{1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
758 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
759 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
760 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
761 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
762 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
763 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
764 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
765 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
766 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
767 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
768 };
769 
770 static struct cnss_print_optimize print_optimize;
771 
772 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
773 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
774 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
775 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
776 
777 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
778 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev);
779 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev);
780 
781 
782 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
783 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
784 {
785 	mhi_debug_reg_dump(pci_priv->mhi_ctrl);
786 }
787 
788 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
789 {
790 	mhi_dump_sfr(pci_priv->mhi_ctrl);
791 }
792 
793 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
794 				      u32 cookie)
795 {
796 	return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
797 }
798 
799 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
800 				    bool notify_clients)
801 {
802 	return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
803 }
804 
805 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
806 				   bool notify_clients)
807 {
808 	return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
809 }
810 
811 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
812 				       u32 timeout)
813 {
814 	return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
815 }
816 
817 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
818 					   int timeout_us, bool in_panic)
819 {
820 	return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
821 					  timeout_us, in_panic);
822 }
823 
824 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
825 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
826 {
827 	return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl);
828 }
829 #endif
830 
831 static void
832 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
833 				    int (*cb)(struct mhi_controller *mhi_ctrl,
834 					      struct mhi_link_info *link_info))
835 {
836 	mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
837 }
838 
839 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
840 {
841 	return mhi_force_reset(pci_priv->mhi_ctrl);
842 }
843 
844 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
845 				  phys_addr_t base)
846 {
847 	return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
848 }
849 #else
850 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
851 {
852 }
853 
854 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
855 {
856 }
857 
858 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
859 				      u32 cookie)
860 {
861 	return false;
862 }
863 
864 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
865 				    bool notify_clients)
866 {
867 	return -EOPNOTSUPP;
868 }
869 
870 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
871 				   bool notify_clients)
872 {
873 	return -EOPNOTSUPP;
874 }
875 
876 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
877 				       u32 timeout)
878 {
879 }
880 
881 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
882 					   int timeout_us, bool in_panic)
883 {
884 	return -EOPNOTSUPP;
885 }
886 
887 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
888 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
889 {
890 	return -EOPNOTSUPP;
891 }
892 #endif
893 
894 static void
895 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
896 				    int (*cb)(struct mhi_controller *mhi_ctrl,
897 					      struct mhi_link_info *link_info))
898 {
899 }
900 
901 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
902 {
903 	return -EOPNOTSUPP;
904 }
905 
906 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
907 				  phys_addr_t base)
908 {
909 }
910 #endif /* CONFIG_MHI_BUS_MISC */
911 
912 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
913 #define CNSS_MHI_WAKE_TIMEOUT		500000
914 
915 static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv,
916 					     enum cnss_smmu_fault_time id)
917 {
918 	if (id >= SMMU_CB_MAX)
919 		return;
920 
921 	pci_priv->smmu_fault_timestamp[id] = sched_clock();
922 }
923 
924 static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain,
925 					    void *handler_token)
926 {
927 	struct cnss_pci_data *pci_priv = handler_token;
928 	int ret = 0;
929 
930 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY);
931 	ret = cnss_mhi_device_get_sync_atomic(pci_priv,
932 					      CNSS_MHI_WAKE_TIMEOUT, true);
933 	if (ret < 0) {
934 		cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret);
935 		return;
936 	}
937 
938 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING);
939 	ret = cnss_mhi_host_notify_db_disable_trace(pci_priv);
940 	if (ret < 0)
941 		cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret);
942 
943 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT);
944 }
945 
946 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
947 {
948 	qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain,
949 					 cnss_pci_smmu_fault_handler_irq, pci_priv);
950 }
951 #else
952 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
953 {
954 }
955 #endif
956 
957 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
958 {
959 	u16 device_id;
960 
961 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
962 		cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
963 			    (void *)_RET_IP_);
964 		return -EACCES;
965 	}
966 
967 	if (pci_priv->pci_link_down_ind) {
968 		cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
969 		return -EIO;
970 	}
971 
972 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
973 	if (device_id != pci_priv->device_id)  {
974 		cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
975 			       (void *)_RET_IP_, device_id,
976 			       pci_priv->device_id);
977 		return -EIO;
978 	}
979 
980 	return 0;
981 }
982 
983 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
984 {
985 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
986 
987 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
988 	u32 window_enable = WINDOW_ENABLE_BIT | window;
989 	u32 val;
990 
991 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
992 		writel_relaxed(window_enable, pci_priv->bar +
993 			       PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
994 	} else {
995 		writel_relaxed(window_enable, pci_priv->bar +
996 			       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
997 	}
998 
999 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
1000 		window_enable = QCN7605_WINDOW_ENABLE_BIT | window;
1001 
1002 	if (window != pci_priv->remap_window) {
1003 		pci_priv->remap_window = window;
1004 		cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
1005 			    window_enable);
1006 	}
1007 
1008 	/* Read it back to make sure the write has taken effect */
1009 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
1010 		val = readl_relaxed(pci_priv->bar +
1011 			PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1012 	} else {
1013 		val = readl_relaxed(pci_priv->bar +
1014 			QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1015 	}
1016 	if (val != window_enable) {
1017 		cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
1018 			    window_enable, val);
1019 		if (!cnss_pci_check_link_status(pci_priv) &&
1020 		    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
1021 			CNSS_ASSERT(0);
1022 	}
1023 }
1024 
1025 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
1026 			     u32 offset, u32 *val)
1027 {
1028 	int ret;
1029 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1030 
1031 	if (!in_interrupt() && !irqs_disabled()) {
1032 		ret = cnss_pci_check_link_status(pci_priv);
1033 		if (ret)
1034 			return ret;
1035 	}
1036 
1037 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1038 	    offset < MAX_UNWINDOWED_ADDRESS) {
1039 		*val = readl_relaxed(pci_priv->bar + offset);
1040 		return 0;
1041 	}
1042 
1043 	/* If in panic, assumption is kernel panic handler will hold all threads
1044 	 * and interrupts. Further pci_reg_window_lock could be held before
1045 	 * panic. So only lock during normal operation.
1046 	 */
1047 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1048 		cnss_pci_select_window(pci_priv, offset);
1049 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1050 				     (offset & WINDOW_RANGE_MASK));
1051 	} else {
1052 		spin_lock_bh(&pci_reg_window_lock);
1053 		cnss_pci_select_window(pci_priv, offset);
1054 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1055 				     (offset & WINDOW_RANGE_MASK));
1056 		spin_unlock_bh(&pci_reg_window_lock);
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1063 			      u32 val)
1064 {
1065 	int ret;
1066 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1067 
1068 	if (!in_interrupt() && !irqs_disabled()) {
1069 		ret = cnss_pci_check_link_status(pci_priv);
1070 		if (ret)
1071 			return ret;
1072 	}
1073 
1074 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1075 	    offset < MAX_UNWINDOWED_ADDRESS) {
1076 		writel_relaxed(val, pci_priv->bar + offset);
1077 		return 0;
1078 	}
1079 
1080 	/* Same constraint as PCI register read in panic */
1081 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1082 		cnss_pci_select_window(pci_priv, offset);
1083 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1084 			  (offset & WINDOW_RANGE_MASK));
1085 	} else {
1086 		spin_lock_bh(&pci_reg_window_lock);
1087 		cnss_pci_select_window(pci_priv, offset);
1088 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1089 			  (offset & WINDOW_RANGE_MASK));
1090 		spin_unlock_bh(&pci_reg_window_lock);
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
1097 {
1098 	struct device *dev = &pci_priv->pci_dev->dev;
1099 	int ret;
1100 
1101 	ret = cnss_pci_force_wake_request_sync(dev,
1102 					       FORCE_WAKE_DELAY_TIMEOUT_US);
1103 	if (ret) {
1104 		if (ret != -EAGAIN)
1105 			cnss_pr_err("Failed to request force wake\n");
1106 		return ret;
1107 	}
1108 
1109 	/* If device's M1 state-change event races here, it can be ignored,
1110 	 * as the device is expected to immediately move from M2 to M0
1111 	 * without entering low power state.
1112 	 */
1113 	if (cnss_pci_is_device_awake(dev) != true)
1114 		cnss_pr_warn("MHI not in M0, while reg still accessible\n");
1115 
1116 	return 0;
1117 }
1118 
1119 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
1120 {
1121 	struct device *dev = &pci_priv->pci_dev->dev;
1122 	int ret;
1123 
1124 	ret = cnss_pci_force_wake_release(dev);
1125 	if (ret && ret != -EAGAIN)
1126 		cnss_pr_err("Failed to release force wake\n");
1127 
1128 	return ret;
1129 }
1130 
1131 #if IS_ENABLED(CONFIG_INTERCONNECT)
1132 /**
1133  * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
1134  * @plat_priv: Platform private data struct
1135  * @bw: bandwidth
1136  * @save: toggle flag to save bandwidth to current_bw_vote
1137  *
1138  * Setup bandwidth votes for configured interconnect paths
1139  *
1140  * Return: 0 for success
1141  */
1142 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1143 				    u32 bw, bool save)
1144 {
1145 	int ret = 0;
1146 	struct cnss_bus_bw_info *bus_bw_info;
1147 
1148 	if (!plat_priv->icc.path_count)
1149 		return -EOPNOTSUPP;
1150 
1151 	if (bw >= plat_priv->icc.bus_bw_cfg_count) {
1152 		cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
1153 		return -EINVAL;
1154 	}
1155 
1156 	cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
1157 
1158 	list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
1159 		ret = icc_set_bw(bus_bw_info->icc_path,
1160 				 bus_bw_info->cfg_table[bw].avg_bw,
1161 				 bus_bw_info->cfg_table[bw].peak_bw);
1162 		if (ret) {
1163 			cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
1164 				    bw, ret, bus_bw_info->icc_name,
1165 				    bus_bw_info->cfg_table[bw].avg_bw,
1166 				    bus_bw_info->cfg_table[bw].peak_bw);
1167 			break;
1168 		}
1169 	}
1170 	if (ret == 0 && save)
1171 		plat_priv->icc.current_bw_vote = bw;
1172 	return ret;
1173 }
1174 
1175 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1176 {
1177 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1178 
1179 	if (!plat_priv)
1180 		return -ENODEV;
1181 
1182 	if (bandwidth < 0)
1183 		return -EINVAL;
1184 
1185 	return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
1186 }
1187 #else
1188 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1189 				    u32 bw, bool save)
1190 {
1191 	return 0;
1192 }
1193 
1194 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1195 {
1196 	return 0;
1197 }
1198 #endif
1199 EXPORT_SYMBOL(cnss_request_bus_bandwidth);
1200 
1201 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
1202 			    u32 *val, bool raw_access)
1203 {
1204 	int ret = 0;
1205 	bool do_force_wake_put = true;
1206 
1207 	if (raw_access) {
1208 		ret = cnss_pci_reg_read(pci_priv, offset, val);
1209 		goto out;
1210 	}
1211 
1212 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1213 	if (ret)
1214 		goto out;
1215 
1216 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1217 	if (ret < 0)
1218 		goto runtime_pm_put;
1219 
1220 	ret = cnss_pci_force_wake_get(pci_priv);
1221 	if (ret)
1222 		do_force_wake_put = false;
1223 
1224 	ret = cnss_pci_reg_read(pci_priv, offset, val);
1225 	if (ret) {
1226 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
1227 			    offset, ret);
1228 		goto force_wake_put;
1229 	}
1230 
1231 force_wake_put:
1232 	if (do_force_wake_put)
1233 		cnss_pci_force_wake_put(pci_priv);
1234 runtime_pm_put:
1235 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1236 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1237 out:
1238 	return ret;
1239 }
1240 
1241 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1242 			     u32 val, bool raw_access)
1243 {
1244 	int ret = 0;
1245 	bool do_force_wake_put = true;
1246 
1247 	if (raw_access) {
1248 		ret = cnss_pci_reg_write(pci_priv, offset, val);
1249 		goto out;
1250 	}
1251 
1252 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1253 	if (ret)
1254 		goto out;
1255 
1256 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1257 	if (ret < 0)
1258 		goto runtime_pm_put;
1259 
1260 	ret = cnss_pci_force_wake_get(pci_priv);
1261 	if (ret)
1262 		do_force_wake_put = false;
1263 
1264 	ret = cnss_pci_reg_write(pci_priv, offset, val);
1265 	if (ret) {
1266 		cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
1267 			    val, offset, ret);
1268 		goto force_wake_put;
1269 	}
1270 
1271 force_wake_put:
1272 	if (do_force_wake_put)
1273 		cnss_pci_force_wake_put(pci_priv);
1274 runtime_pm_put:
1275 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1276 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1277 out:
1278 	return ret;
1279 }
1280 
1281 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
1282 {
1283 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1284 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1285 	bool link_down_or_recovery;
1286 
1287 	if (!plat_priv)
1288 		return -ENODEV;
1289 
1290 	link_down_or_recovery = pci_priv->pci_link_down_ind ||
1291 		(test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
1292 
1293 	if (save) {
1294 		if (link_down_or_recovery) {
1295 			pci_priv->saved_state = NULL;
1296 		} else {
1297 			pci_save_state(pci_dev);
1298 			pci_priv->saved_state = pci_store_saved_state(pci_dev);
1299 		}
1300 	} else {
1301 		if (link_down_or_recovery) {
1302 			pci_load_saved_state(pci_dev, pci_priv->default_state);
1303 			pci_restore_state(pci_dev);
1304 		} else if (pci_priv->saved_state) {
1305 			pci_load_and_free_saved_state(pci_dev,
1306 						      &pci_priv->saved_state);
1307 			pci_restore_state(pci_dev);
1308 		}
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv)
1315 {
1316 	int ret = 0;
1317 	struct pci_dev *root_port;
1318 	struct device_node *root_of_node;
1319 	struct cnss_plat_data *plat_priv;
1320 
1321 	if (!pci_priv)
1322 		return -EINVAL;
1323 
1324 	if (pci_priv->device_id != KIWI_DEVICE_ID)
1325 		return ret;
1326 
1327 	plat_priv = pci_priv->plat_priv;
1328 	root_port = pcie_find_root_port(pci_priv->pci_dev);
1329 
1330 	if (!root_port) {
1331 		cnss_pr_err("PCIe root port is null\n");
1332 		return -EINVAL;
1333 	}
1334 
1335 	root_of_node = root_port->dev.of_node;
1336 	if (root_of_node && root_of_node->parent) {
1337 		ret = of_property_read_u32(root_of_node->parent,
1338 					   "qcom,target-link-speed",
1339 					   &plat_priv->supported_link_speed);
1340 		if (!ret)
1341 			cnss_pr_dbg("Supported PCIe Link Speed: %d\n",
1342 				    plat_priv->supported_link_speed);
1343 		else
1344 			plat_priv->supported_link_speed = 0;
1345 	}
1346 
1347 	return ret;
1348 }
1349 
1350 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
1351 {
1352 	u16 link_status;
1353 	int ret;
1354 
1355 	ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
1356 					&link_status);
1357 	if (ret)
1358 		return ret;
1359 
1360 	cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
1361 
1362 	pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
1363 	pci_priv->def_link_width =
1364 		(link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
1365 	pci_priv->cur_link_speed = pci_priv->def_link_speed;
1366 
1367 	cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
1368 		    pci_priv->def_link_speed, pci_priv->def_link_width);
1369 
1370 	return 0;
1371 }
1372 
1373 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
1374 {
1375 	u32 reg_offset, val;
1376 	int i;
1377 
1378 	switch (pci_priv->device_id) {
1379 	case QCA6390_DEVICE_ID:
1380 	case QCA6490_DEVICE_ID:
1381 	case KIWI_DEVICE_ID:
1382 	case MANGO_DEVICE_ID:
1383 	case PEACH_DEVICE_ID:
1384 		break;
1385 	default:
1386 		return;
1387 	}
1388 
1389 	if (in_interrupt() || irqs_disabled())
1390 		return;
1391 
1392 	if (cnss_pci_check_link_status(pci_priv))
1393 		return;
1394 
1395 	cnss_pr_dbg("Start to dump SOC Scratch registers\n");
1396 
1397 	for (i = 0; pci_scratch[i].name; i++) {
1398 		reg_offset = pci_scratch[i].offset;
1399 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1400 			return;
1401 		cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
1402 			    pci_scratch[i].name, val);
1403 	}
1404 }
1405 
1406 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
1407 {
1408 	int ret = 0;
1409 
1410 	if (!pci_priv)
1411 		return -ENODEV;
1412 
1413 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1414 		cnss_pr_info("PCI link is already suspended\n");
1415 		goto out;
1416 	}
1417 
1418 	pci_clear_master(pci_priv->pci_dev);
1419 
1420 	ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
1421 	if (ret)
1422 		goto out;
1423 
1424 	pci_disable_device(pci_priv->pci_dev);
1425 
1426 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1427 		if (pci_set_power_state(pci_priv->pci_dev, PCI_D3hot))
1428 			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
1429 	}
1430 
1431 	/* Always do PCIe L2 suspend during power off/PCIe link recovery */
1432 	pci_priv->drv_connected_last = 0;
1433 
1434 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
1435 	if (ret)
1436 		goto out;
1437 
1438 	pci_priv->pci_link_state = PCI_LINK_DOWN;
1439 
1440 	return 0;
1441 out:
1442 	return ret;
1443 }
1444 
1445 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
1446 {
1447 	int ret = 0;
1448 
1449 	if (!pci_priv)
1450 		return -ENODEV;
1451 
1452 	if (pci_priv->pci_link_state == PCI_LINK_UP) {
1453 		cnss_pr_info("PCI link is already resumed\n");
1454 		goto out;
1455 	}
1456 
1457 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
1458 	if (ret) {
1459 		ret = -EAGAIN;
1460 		goto out;
1461 	}
1462 
1463 	pci_priv->pci_link_state = PCI_LINK_UP;
1464 
1465 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1466 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
1467 		if (ret) {
1468 			cnss_pr_err("Failed to set D0, err = %d\n", ret);
1469 			goto out;
1470 		}
1471 	}
1472 
1473 	ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
1474 	if (ret)
1475 		goto out;
1476 
1477 	ret = pci_enable_device(pci_priv->pci_dev);
1478 	if (ret) {
1479 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
1480 		goto out;
1481 	}
1482 
1483 	pci_set_master(pci_priv->pci_dev);
1484 
1485 	if (pci_priv->pci_link_down_ind)
1486 		pci_priv->pci_link_down_ind = false;
1487 
1488 	return 0;
1489 out:
1490 	return ret;
1491 }
1492 
1493 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
1494 {
1495 	int ret;
1496 
1497 	switch (pci_priv->device_id) {
1498 	case QCA6390_DEVICE_ID:
1499 	case QCA6490_DEVICE_ID:
1500 	case KIWI_DEVICE_ID:
1501 	case MANGO_DEVICE_ID:
1502 	case PEACH_DEVICE_ID:
1503 		break;
1504 	default:
1505 		return -EOPNOTSUPP;
1506 	}
1507 
1508 	/* Always wait here to avoid missing WAKE assert for RDDM
1509 	 * before link recovery
1510 	 */
1511 	msleep(WAKE_EVENT_TIMEOUT);
1512 
1513 	ret = cnss_suspend_pci_link(pci_priv);
1514 	if (ret)
1515 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
1516 
1517 	ret = cnss_resume_pci_link(pci_priv);
1518 	if (ret) {
1519 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
1520 		del_timer(&pci_priv->dev_rddm_timer);
1521 		return ret;
1522 	}
1523 
1524 	mod_timer(&pci_priv->dev_rddm_timer,
1525 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1526 
1527 	cnss_mhi_debug_reg_dump(pci_priv);
1528 	cnss_pci_soc_scratch_reg_dump(pci_priv);
1529 
1530 	return 0;
1531 }
1532 
1533 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
1534 				       enum cnss_bus_event_type type,
1535 				       void *data)
1536 {
1537 	struct cnss_bus_event bus_event;
1538 
1539 	bus_event.etype = type;
1540 	bus_event.event_data = data;
1541 	cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
1542 }
1543 
1544 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
1545 {
1546 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1547 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1548 	unsigned long flags;
1549 
1550 	if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
1551 		     &plat_priv->ctrl_params.quirks))
1552 		panic("cnss: PCI link is down\n");
1553 
1554 	spin_lock_irqsave(&pci_link_down_lock, flags);
1555 	if (pci_priv->pci_link_down_ind) {
1556 		cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
1557 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
1558 		return;
1559 	}
1560 	pci_priv->pci_link_down_ind = true;
1561 	spin_unlock_irqrestore(&pci_link_down_lock, flags);
1562 
1563 	if (pci_priv->mhi_ctrl) {
1564 		/* Notify MHI about link down*/
1565 		mhi_report_error(pci_priv->mhi_ctrl);
1566 	}
1567 
1568 	if (pci_dev->device == QCA6174_DEVICE_ID)
1569 		disable_irq(pci_dev->irq);
1570 
1571 	/* Notify bus related event. Now for all supported chips.
1572 	 * Here PCIe LINK_DOWN notification taken care.
1573 	 * uevent buffer can be extended later, to cover more bus info.
1574 	 */
1575 	cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
1576 
1577 	cnss_fatal_err("PCI link down, schedule recovery\n");
1578 	cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
1579 }
1580 
1581 int cnss_pci_link_down(struct device *dev)
1582 {
1583 	struct pci_dev *pci_dev = to_pci_dev(dev);
1584 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1585 	struct cnss_plat_data *plat_priv = NULL;
1586 	int ret;
1587 
1588 	if (!pci_priv) {
1589 		cnss_pr_err("pci_priv is NULL\n");
1590 		return -EINVAL;
1591 	}
1592 
1593 	plat_priv = pci_priv->plat_priv;
1594 	if (!plat_priv) {
1595 		cnss_pr_err("plat_priv is NULL\n");
1596 		return -ENODEV;
1597 	}
1598 
1599 	if (pci_priv->pci_link_down_ind) {
1600 		cnss_pr_dbg("PCI link down recovery is already in progress\n");
1601 		return -EBUSY;
1602 	}
1603 
1604 	if (pci_priv->drv_connected_last &&
1605 	    of_property_read_bool(plat_priv->plat_dev->dev.of_node,
1606 				  "cnss-enable-self-recovery"))
1607 		plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
1608 
1609 	cnss_pr_err("PCI link down is detected by drivers\n");
1610 
1611 	ret = cnss_pci_assert_perst(pci_priv);
1612 	if (ret)
1613 		cnss_pci_handle_linkdown(pci_priv);
1614 
1615 	return ret;
1616 }
1617 EXPORT_SYMBOL(cnss_pci_link_down);
1618 
1619 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
1620 {
1621 	struct pci_dev *pci_dev = to_pci_dev(dev);
1622 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1623 
1624 	if (!pci_priv) {
1625 		cnss_pr_err("pci_priv is NULL\n");
1626 		return -ENODEV;
1627 	}
1628 
1629 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1630 		cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
1631 		return -EACCES;
1632 	}
1633 
1634 	cnss_pr_dbg("Start to get PCIe reg dump\n");
1635 
1636 	return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
1637 }
1638 EXPORT_SYMBOL(cnss_pci_get_reg_dump);
1639 
1640 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
1641 {
1642 	struct cnss_plat_data *plat_priv;
1643 
1644 	if (!pci_priv) {
1645 		cnss_pr_err("pci_priv is NULL\n");
1646 		return -ENODEV;
1647 	}
1648 
1649 	plat_priv = pci_priv->plat_priv;
1650 	if (!plat_priv) {
1651 		cnss_pr_err("plat_priv is NULL\n");
1652 		return -ENODEV;
1653 	}
1654 
1655 	return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
1656 		pci_priv->pci_link_down_ind;
1657 }
1658 
1659 int cnss_pci_is_device_down(struct device *dev)
1660 {
1661 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
1662 
1663 	return cnss_pcie_is_device_down(pci_priv);
1664 }
1665 EXPORT_SYMBOL(cnss_pci_is_device_down);
1666 
1667 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
1668 {
1669 	spin_lock_bh(&pci_reg_window_lock);
1670 }
1671 EXPORT_SYMBOL(cnss_pci_lock_reg_window);
1672 
1673 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
1674 {
1675 	spin_unlock_bh(&pci_reg_window_lock);
1676 }
1677 EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
1678 
1679 int cnss_get_pci_slot(struct device *dev)
1680 {
1681 	struct pci_dev *pci_dev = to_pci_dev(dev);
1682 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1683 	struct cnss_plat_data *plat_priv = NULL;
1684 
1685 	if (!pci_priv) {
1686 		cnss_pr_err("pci_priv is NULL\n");
1687 		return -EINVAL;
1688 	}
1689 
1690 	plat_priv = pci_priv->plat_priv;
1691 	if (!plat_priv) {
1692 		cnss_pr_err("plat_priv is NULL\n");
1693 		return -ENODEV;
1694 	}
1695 
1696 	return plat_priv->rc_num;
1697 }
1698 EXPORT_SYMBOL(cnss_get_pci_slot);
1699 
1700 /**
1701  * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
1702  * @pci_priv: driver PCI bus context pointer
1703  *
1704  * Dump primary and secondary bootloader debug log data. For SBL check the
1705  * log struct address and size for validity.
1706  *
1707  * Return: None
1708  */
1709 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
1710 {
1711 	enum mhi_ee_type ee;
1712 	u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
1713 	u32 pbl_log_sram_start;
1714 	u32 pbl_stage, sbl_log_start, sbl_log_size;
1715 	u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
1716 	u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
1717 	u32 sbl_log_def_start = SRAM_START;
1718 	u32 sbl_log_def_end = SRAM_END;
1719 	int i;
1720 
1721 	switch (pci_priv->device_id) {
1722 	case QCA6390_DEVICE_ID:
1723 		pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
1724 		pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1725 		sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1726 		break;
1727 	case QCA6490_DEVICE_ID:
1728 		pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
1729 		pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1730 		sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1731 		break;
1732 	case KIWI_DEVICE_ID:
1733 		pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
1734 		pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
1735 		pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1736 		sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1737 		break;
1738 	case MANGO_DEVICE_ID:
1739 		pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
1740 		pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
1741 		pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1742 		sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1743 		break;
1744 	case PEACH_DEVICE_ID:
1745 		pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS;
1746 		pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START;
1747 		pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1748 		sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1749 		break;
1750 	default:
1751 		return;
1752 	}
1753 
1754 	if (cnss_pci_check_link_status(pci_priv))
1755 		return;
1756 
1757 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1758 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1759 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1760 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1761 	cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
1762 			  &pbl_bootstrap_status);
1763 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
1764 		    pbl_stage, sbl_log_start, sbl_log_size);
1765 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
1766 		    pbl_wlan_boot_cfg, pbl_bootstrap_status);
1767 
1768 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1769 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1770 		cnss_pr_dbg("Avoid Dumping PBL log data in Mission mode\n");
1771 		return;
1772 	}
1773 
1774 	cnss_pr_dbg("Dumping PBL log data\n");
1775 	for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
1776 		mem_addr = pbl_log_sram_start + i;
1777 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1778 			break;
1779 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1780 	}
1781 
1782 	sbl_log_size = (sbl_log_size > sbl_log_max_size ?
1783 			sbl_log_max_size : sbl_log_size);
1784 	if (sbl_log_start < sbl_log_def_start ||
1785 	    sbl_log_start > sbl_log_def_end ||
1786 	    (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
1787 		cnss_pr_err("Invalid SBL log data\n");
1788 		return;
1789 	}
1790 
1791 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1792 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1793 		cnss_pr_dbg("Avoid Dumping SBL log data in Mission mode\n");
1794 		return;
1795 	}
1796 
1797 	cnss_pr_dbg("Dumping SBL log data\n");
1798 	for (i = 0; i < sbl_log_size; i += sizeof(val)) {
1799 		mem_addr = sbl_log_start + i;
1800 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1801 			break;
1802 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1803 	}
1804 }
1805 
1806 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
1807 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1808 {
1809 }
1810 #else
1811 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1812 {
1813 	struct cnss_plat_data *plat_priv;
1814 	u32 i, mem_addr;
1815 	u32 *dump_ptr;
1816 
1817 	plat_priv = pci_priv->plat_priv;
1818 
1819 	if (plat_priv->device_id != QCA6490_DEVICE_ID ||
1820 	    cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
1821 		return;
1822 
1823 	if (!plat_priv->sram_dump) {
1824 		cnss_pr_err("SRAM dump memory is not allocated\n");
1825 		return;
1826 	}
1827 
1828 	if (cnss_pci_check_link_status(pci_priv))
1829 		return;
1830 
1831 	cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
1832 
1833 	for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
1834 		mem_addr = SRAM_START + i;
1835 		dump_ptr = (u32 *)(plat_priv->sram_dump + i);
1836 		if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
1837 			cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
1838 			break;
1839 		}
1840 		/* Relinquish CPU after dumping 256KB chunks*/
1841 		if (!(i % CNSS_256KB_SIZE))
1842 			cond_resched();
1843 	}
1844 }
1845 #endif
1846 
1847 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
1848 {
1849 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1850 
1851 	cnss_fatal_err("MHI power up returns timeout\n");
1852 
1853 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
1854 	    cnss_get_dev_sol_value(plat_priv) > 0) {
1855 		/* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
1856 		 * high. If RDDM times out, PBL/SBL error region may have been
1857 		 * erased so no need to dump them either.
1858 		 */
1859 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
1860 		    !pci_priv->pci_link_down_ind) {
1861 			mod_timer(&pci_priv->dev_rddm_timer,
1862 				  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1863 		}
1864 	} else {
1865 		cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
1866 		cnss_mhi_debug_reg_dump(pci_priv);
1867 		cnss_pci_soc_scratch_reg_dump(pci_priv);
1868 		/* Dump PBL/SBL error log if RDDM cookie is not set */
1869 		cnss_pci_dump_bl_sram_mem(pci_priv);
1870 		cnss_pci_dump_sram(pci_priv);
1871 		return -ETIMEDOUT;
1872 	}
1873 
1874 	return 0;
1875 }
1876 
1877 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
1878 {
1879 	switch (mhi_state) {
1880 	case CNSS_MHI_INIT:
1881 		return "INIT";
1882 	case CNSS_MHI_DEINIT:
1883 		return "DEINIT";
1884 	case CNSS_MHI_POWER_ON:
1885 		return "POWER_ON";
1886 	case CNSS_MHI_POWERING_OFF:
1887 		return "POWERING_OFF";
1888 	case CNSS_MHI_POWER_OFF:
1889 		return "POWER_OFF";
1890 	case CNSS_MHI_FORCE_POWER_OFF:
1891 		return "FORCE_POWER_OFF";
1892 	case CNSS_MHI_SUSPEND:
1893 		return "SUSPEND";
1894 	case CNSS_MHI_RESUME:
1895 		return "RESUME";
1896 	case CNSS_MHI_TRIGGER_RDDM:
1897 		return "TRIGGER_RDDM";
1898 	case CNSS_MHI_RDDM_DONE:
1899 		return "RDDM_DONE";
1900 	default:
1901 		return "UNKNOWN";
1902 	}
1903 };
1904 
1905 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
1906 					enum cnss_mhi_state mhi_state)
1907 {
1908 	switch (mhi_state) {
1909 	case CNSS_MHI_INIT:
1910 		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
1911 			return 0;
1912 		break;
1913 	case CNSS_MHI_DEINIT:
1914 	case CNSS_MHI_POWER_ON:
1915 		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
1916 		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1917 			return 0;
1918 		break;
1919 	case CNSS_MHI_FORCE_POWER_OFF:
1920 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1921 			return 0;
1922 		break;
1923 	case CNSS_MHI_POWER_OFF:
1924 	case CNSS_MHI_SUSPEND:
1925 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1926 		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1927 			return 0;
1928 		break;
1929 	case CNSS_MHI_RESUME:
1930 		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1931 			return 0;
1932 		break;
1933 	case CNSS_MHI_TRIGGER_RDDM:
1934 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1935 		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
1936 			return 0;
1937 		break;
1938 	case CNSS_MHI_RDDM_DONE:
1939 		return 0;
1940 	default:
1941 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
1942 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
1943 	}
1944 
1945 	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
1946 		    cnss_mhi_state_to_str(mhi_state), mhi_state,
1947 		    pci_priv->mhi_state);
1948 	if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
1949 		CNSS_ASSERT(0);
1950 
1951 	return -EINVAL;
1952 }
1953 
1954 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
1955 {
1956 	int read_val, ret;
1957 
1958 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1959 		return -EOPNOTSUPP;
1960 
1961 	if (cnss_pci_check_link_status(pci_priv))
1962 		return -EINVAL;
1963 
1964 	cnss_pr_err("Write GCC Spare with ACE55 Pattern");
1965 	cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
1966 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1967 	cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
1968 	ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
1969 				&read_val);
1970 	cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
1971 	return ret;
1972 }
1973 
1974 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
1975 {
1976 	int read_val, ret;
1977 	u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg;
1978 
1979 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1980 		return -EOPNOTSUPP;
1981 
1982 	if (cnss_pci_check_link_status(pci_priv))
1983 		return -EINVAL;
1984 
1985 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1986 	cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
1987 		    read_val, ret);
1988 
1989 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1990 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1991 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1992 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1993 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n",
1994 		    pbl_stage, sbl_log_start, sbl_log_size);
1995 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg);
1996 
1997 	return ret;
1998 }
1999 
2000 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
2001 				       enum cnss_mhi_state mhi_state)
2002 {
2003 	switch (mhi_state) {
2004 	case CNSS_MHI_INIT:
2005 		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2006 		break;
2007 	case CNSS_MHI_DEINIT:
2008 		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2009 		break;
2010 	case CNSS_MHI_POWER_ON:
2011 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2012 		break;
2013 	case CNSS_MHI_POWERING_OFF:
2014 		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2015 		break;
2016 	case CNSS_MHI_POWER_OFF:
2017 	case CNSS_MHI_FORCE_POWER_OFF:
2018 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2019 		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2020 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2021 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2022 		break;
2023 	case CNSS_MHI_SUSPEND:
2024 		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2025 		break;
2026 	case CNSS_MHI_RESUME:
2027 		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2028 		break;
2029 	case CNSS_MHI_TRIGGER_RDDM:
2030 		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2031 		break;
2032 	case CNSS_MHI_RDDM_DONE:
2033 		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2034 		break;
2035 	default:
2036 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2037 	}
2038 }
2039 
2040 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
2041 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2042 {
2043 	return mhi_pm_resume_force(pci_priv->mhi_ctrl);
2044 }
2045 #else
2046 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2047 {
2048 	return mhi_pm_resume(pci_priv->mhi_ctrl);
2049 }
2050 #endif
2051 
2052 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
2053 				  enum cnss_mhi_state mhi_state)
2054 {
2055 	int ret = 0, retry = 0;
2056 
2057 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
2058 		return 0;
2059 
2060 	if (mhi_state < 0) {
2061 		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
2062 		return -EINVAL;
2063 	}
2064 
2065 	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
2066 	if (ret)
2067 		goto out;
2068 
2069 	cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
2070 		     cnss_mhi_state_to_str(mhi_state), mhi_state);
2071 
2072 	switch (mhi_state) {
2073 	case CNSS_MHI_INIT:
2074 		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
2075 		break;
2076 	case CNSS_MHI_DEINIT:
2077 		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
2078 		ret = 0;
2079 		break;
2080 	case CNSS_MHI_POWER_ON:
2081 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
2082 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
2083 		/* Only set img_pre_alloc when power up succeeds */
2084 		if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
2085 			cnss_pr_dbg("Notify MHI to use already allocated images\n");
2086 			pci_priv->mhi_ctrl->img_pre_alloc = true;
2087 		}
2088 #endif
2089 		break;
2090 	case CNSS_MHI_POWER_OFF:
2091 		mhi_power_down(pci_priv->mhi_ctrl, true);
2092 		ret = 0;
2093 		break;
2094 	case CNSS_MHI_FORCE_POWER_OFF:
2095 		mhi_power_down(pci_priv->mhi_ctrl, false);
2096 		ret = 0;
2097 		break;
2098 	case CNSS_MHI_SUSPEND:
2099 retry_mhi_suspend:
2100 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2101 		if (pci_priv->drv_connected_last)
2102 			ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
2103 		else
2104 			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
2105 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2106 		if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
2107 			cnss_pr_dbg("Retry MHI suspend #%d\n", retry);
2108 			usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
2109 				     MHI_SUSPEND_RETRY_DELAY_US + 1000);
2110 			goto retry_mhi_suspend;
2111 		}
2112 		break;
2113 	case CNSS_MHI_RESUME:
2114 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2115 		if (pci_priv->drv_connected_last) {
2116 			ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
2117 			if (ret) {
2118 				mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2119 				break;
2120 			}
2121 			ret = cnss_mhi_pm_fast_resume(pci_priv, true);
2122 			cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
2123 		} else {
2124 			if (pci_priv->device_id == QCA6390_DEVICE_ID)
2125 				ret = cnss_mhi_pm_force_resume(pci_priv);
2126 			else
2127 				ret = mhi_pm_resume(pci_priv->mhi_ctrl);
2128 		}
2129 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2130 		break;
2131 	case CNSS_MHI_TRIGGER_RDDM:
2132 		cnss_rddm_trigger_debug(pci_priv);
2133 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
2134 		if (ret) {
2135 			cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
2136 
2137 			cnss_pr_dbg("Sending host reset req\n");
2138 			ret = cnss_mhi_force_reset(pci_priv);
2139 			cnss_rddm_trigger_check(pci_priv);
2140 		}
2141 		break;
2142 	case CNSS_MHI_RDDM_DONE:
2143 		break;
2144 	default:
2145 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2146 		ret = -EINVAL;
2147 	}
2148 
2149 	if (ret)
2150 		goto out;
2151 
2152 	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
2153 
2154 	return 0;
2155 
2156 out:
2157 	cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
2158 		    cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
2159 	return ret;
2160 }
2161 
2162 static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv)
2163 {
2164 	int ret = 0;
2165 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2166 	struct cnss_plat_data *plat_priv;
2167 
2168 	if (!pci_dev)
2169 		return -ENODEV;
2170 
2171 	if (!pci_dev->msix_enabled)
2172 		return ret;
2173 
2174 	plat_priv = pci_priv->plat_priv;
2175 	if (!plat_priv) {
2176 		cnss_pr_err("plat_priv is NULL\n");
2177 		return -ENODEV;
2178 	}
2179 
2180 	ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
2181 				   "msix-match-addr",
2182 				   &pci_priv->msix_addr);
2183 	cnss_pr_dbg("MSI-X Match address is 0x%X\n",
2184 		    pci_priv->msix_addr);
2185 
2186 	return ret;
2187 }
2188 
2189 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
2190 {
2191 	struct msi_desc *msi_desc;
2192 	struct cnss_msi_config *msi_config;
2193 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2194 
2195 	msi_config = pci_priv->msi_config;
2196 
2197 	if (pci_dev->msix_enabled) {
2198 		pci_priv->msi_ep_base_data = msi_config->users[0].base_vector;
2199 		cnss_pr_dbg("MSI-X base data is %d\n",
2200 			    pci_priv->msi_ep_base_data);
2201 		return 0;
2202 	}
2203 
2204 	msi_desc = irq_get_msi_desc(pci_dev->irq);
2205 	if (!msi_desc) {
2206 		cnss_pr_err("msi_desc is NULL!\n");
2207 		return -EINVAL;
2208 	}
2209 
2210 	pci_priv->msi_ep_base_data = msi_desc->msg.data;
2211 	cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
2212 
2213 	return 0;
2214 }
2215 
2216 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
2217 #define PLC_PCIE_NAME_LEN		14
2218 
2219 static struct cnss_plat_data *
2220 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2221 {
2222 	int plat_env_count = cnss_get_plat_env_count();
2223 	struct cnss_plat_data *plat_env;
2224 	struct cnss_pci_data *pci_priv;
2225 	int i = 0;
2226 
2227 	if (!driver_ops) {
2228 		cnss_pr_err("No cnss driver\n");
2229 		return NULL;
2230 	}
2231 
2232 	for (i = 0; i < plat_env_count; i++) {
2233 		plat_env = cnss_get_plat_env(i);
2234 		if (!plat_env)
2235 			continue;
2236 		if (driver_ops->name && plat_env->pld_bus_ops_name) {
2237 			/* driver_ops->name = PLD_PCIE_OPS_NAME
2238 			 * #ifdef MULTI_IF_NAME
2239 			 * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME
2240 			 * #else
2241 			 * #define PLD_PCIE_OPS_NAME "pld_pcie"
2242 			 * #endif
2243 			 */
2244 			if (memcmp(driver_ops->name,
2245 				   plat_env->pld_bus_ops_name,
2246 				   PLC_PCIE_NAME_LEN) == 0)
2247 				return plat_env;
2248 		}
2249 	}
2250 
2251 	cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name);
2252 	/* in the dual wlan card case, the pld_bus_ops_name from dts
2253 	 * and driver_ops-> name from ko should match, otherwise
2254 	 * wlanhost driver don't know which plat_env it can use;
2255 	 * if doesn't find the match one, then get first available
2256 	 * instance insteadly.
2257 	 */
2258 
2259 	for (i = 0; i < plat_env_count; i++) {
2260 		plat_env = cnss_get_plat_env(i);
2261 
2262 		if (!plat_env)
2263 			continue;
2264 
2265 		pci_priv = plat_env->bus_priv;
2266 		if (!pci_priv) {
2267 			cnss_pr_err("pci_priv is NULL\n");
2268 			continue;
2269 		}
2270 
2271 		if (driver_ops == pci_priv->driver_ops)
2272 			return plat_env;
2273 	}
2274 	/* Doesn't find the existing instance,
2275 	 * so return the fist empty instance
2276 	 */
2277 	for (i = 0; i < plat_env_count; i++) {
2278 		plat_env = cnss_get_plat_env(i);
2279 
2280 		if (!plat_env)
2281 			continue;
2282 		pci_priv = plat_env->bus_priv;
2283 		if (!pci_priv) {
2284 			cnss_pr_err("pci_priv is NULL\n");
2285 			continue;
2286 		}
2287 
2288 		if (!pci_priv->driver_ops)
2289 			return plat_env;
2290 	}
2291 
2292 	return NULL;
2293 }
2294 
2295 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2296 {
2297 	int ret = 0;
2298 	u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG;
2299 	struct cnss_plat_data *plat_priv;
2300 
2301 	if (!pci_priv) {
2302 		cnss_pr_err("pci_priv is NULL\n");
2303 		return -ENODEV;
2304 	}
2305 
2306 	plat_priv = pci_priv->plat_priv;
2307 	/**
2308 	 * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0,
2309 	 * wlan fw will use the hardcode 7 as the qrtr node id.
2310 	 * in the dual Hastings case, we will read qrtr node id
2311 	 * from device tree and pass to get plat_priv->qrtr_node_id,
2312 	 * which always is not zero. And then store this new value
2313 	 * to pcie register, wlan fw will read out this qrtr node id
2314 	 * from this register and overwrite to the hardcode one
2315 	 * while do initialization for ipc router.
2316 	 * without this change, two Hastings will use the same
2317 	 * qrtr node instance id, which will mess up qmi message
2318 	 * exchange. According to qrtr spec, every node should
2319 	 * have unique qrtr node id
2320 	 */
2321 	if (plat_priv->device_id == QCA6390_DEVICE_ID &&
2322 	    plat_priv->qrtr_node_id) {
2323 		u32 val;
2324 
2325 		cnss_pr_dbg("write 0x%x to SCRATCH REG\n",
2326 			    plat_priv->qrtr_node_id);
2327 		ret = cnss_pci_reg_write(pci_priv, scratch,
2328 					 plat_priv->qrtr_node_id);
2329 		if (ret) {
2330 			cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2331 				    scratch, ret);
2332 			goto out;
2333 		}
2334 
2335 		ret = cnss_pci_reg_read(pci_priv, scratch, &val);
2336 		if (ret) {
2337 			cnss_pr_err("Failed to read SCRATCH REG");
2338 			goto out;
2339 		}
2340 
2341 		if (val != plat_priv->qrtr_node_id) {
2342 			cnss_pr_err("qrtr node id write to register doesn't match with readout value");
2343 			return -ERANGE;
2344 		}
2345 	}
2346 out:
2347 	return ret;
2348 }
2349 #else
2350 static struct cnss_plat_data *
2351 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2352 {
2353 	return cnss_bus_dev_to_plat_priv(NULL);
2354 }
2355 
2356 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2357 {
2358 	return 0;
2359 }
2360 #endif
2361 
2362 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
2363 {
2364 	int ret = 0;
2365 	struct cnss_plat_data *plat_priv;
2366 	unsigned int timeout = 0;
2367 	int retry = 0;
2368 
2369 	if (!pci_priv) {
2370 		cnss_pr_err("pci_priv is NULL\n");
2371 		return -ENODEV;
2372 	}
2373 
2374 	plat_priv = pci_priv->plat_priv;
2375 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2376 		return 0;
2377 
2378 	if (MHI_TIMEOUT_OVERWRITE_MS)
2379 		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
2380 	cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
2381 
2382 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
2383 	if (ret)
2384 		return ret;
2385 
2386 	timeout = pci_priv->mhi_ctrl->timeout_ms;
2387 	/* For non-perf builds the timeout is 10 (default) * 6 seconds */
2388 	if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2389 		pci_priv->mhi_ctrl->timeout_ms *= 6;
2390 	else /* For perf builds the timeout is 10 (default) * 3 seconds */
2391 		pci_priv->mhi_ctrl->timeout_ms *= 3;
2392 
2393 retry:
2394 	ret = cnss_pci_store_qrtr_node_id(pci_priv);
2395 	if (ret) {
2396 		if (retry++ < REG_RETRY_MAX_TIMES)
2397 			goto retry;
2398 		else
2399 			return ret;
2400 	}
2401 
2402 	/* Start the timer to dump MHI/PBL/SBL debug data periodically */
2403 	mod_timer(&pci_priv->boot_debug_timer,
2404 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
2405 
2406 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
2407 	del_timer_sync(&pci_priv->boot_debug_timer);
2408 	if (ret == 0)
2409 		cnss_wlan_adsp_pc_enable(pci_priv, false);
2410 
2411 	pci_priv->mhi_ctrl->timeout_ms = timeout;
2412 
2413 	if (ret == -ETIMEDOUT) {
2414 		/* This is a special case needs to be handled that if MHI
2415 		 * power on returns -ETIMEDOUT, controller needs to take care
2416 		 * the cleanup by calling MHI power down. Force to set the bit
2417 		 * for driver internal MHI state to make sure it can be handled
2418 		 * properly later.
2419 		 */
2420 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2421 		ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
2422 	} else if (!ret) {
2423 		/* kernel may allocate a dummy vector before request_irq and
2424 		 * then allocate a real vector when request_irq is called.
2425 		 * So get msi_data here again to avoid spurious interrupt
2426 		 * as msi_data will configured to srngs.
2427 		 */
2428 		if (cnss_pci_is_one_msi(pci_priv))
2429 			ret = cnss_pci_config_msi_data(pci_priv);
2430 	}
2431 
2432 	return ret;
2433 }
2434 
2435 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
2436 {
2437 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2438 
2439 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2440 		return;
2441 
2442 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
2443 		cnss_pr_dbg("MHI is already powered off\n");
2444 		return;
2445 	}
2446 	cnss_wlan_adsp_pc_enable(pci_priv, true);
2447 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
2448 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
2449 
2450 	if (!pci_priv->pci_link_down_ind)
2451 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
2452 	else
2453 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
2454 }
2455 
2456 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
2457 {
2458 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2459 
2460 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2461 		return;
2462 
2463 	if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
2464 		cnss_pr_dbg("MHI is already deinited\n");
2465 		return;
2466 	}
2467 
2468 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
2469 }
2470 
2471 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
2472 					bool set_vddd4blow, bool set_shutdown,
2473 					bool do_force_wake)
2474 {
2475 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2476 	int ret;
2477 	u32 val;
2478 
2479 	if (!plat_priv->set_wlaon_pwr_ctrl)
2480 		return;
2481 
2482 	if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
2483 	    pci_priv->pci_link_down_ind)
2484 		return;
2485 
2486 	if (do_force_wake)
2487 		if (cnss_pci_force_wake_get(pci_priv))
2488 			return;
2489 
2490 	ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
2491 	if (ret) {
2492 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
2493 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2494 		goto force_wake_put;
2495 	}
2496 
2497 	cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
2498 		    WLAON_QFPROM_PWR_CTRL_REG, val);
2499 
2500 	if (set_vddd4blow)
2501 		val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2502 	else
2503 		val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2504 
2505 	if (set_shutdown)
2506 		val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2507 	else
2508 		val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2509 
2510 	ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
2511 	if (ret) {
2512 		cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2513 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2514 		goto force_wake_put;
2515 	}
2516 
2517 	cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
2518 		    WLAON_QFPROM_PWR_CTRL_REG);
2519 
2520 	if (set_shutdown)
2521 		usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
2522 			     WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
2523 
2524 force_wake_put:
2525 	if (do_force_wake)
2526 		cnss_pci_force_wake_put(pci_priv);
2527 }
2528 
2529 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
2530 					 u64 *time_us)
2531 {
2532 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2533 	u32 low, high;
2534 	u64 device_ticks;
2535 
2536 	if (!plat_priv->device_freq_hz) {
2537 		cnss_pr_err("Device time clock frequency is not valid\n");
2538 		return -EINVAL;
2539 	}
2540 
2541 	switch (pci_priv->device_id) {
2542 	case KIWI_DEVICE_ID:
2543 	case MANGO_DEVICE_ID:
2544 	case PEACH_DEVICE_ID:
2545 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
2546 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
2547 		break;
2548 	default:
2549 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
2550 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
2551 		break;
2552 	}
2553 
2554 	device_ticks = (u64)high << 32 | low;
2555 	do_div(device_ticks, plat_priv->device_freq_hz / 100000);
2556 	*time_us = device_ticks * 10;
2557 
2558 	return 0;
2559 }
2560 
2561 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
2562 {
2563 	switch (pci_priv->device_id) {
2564 	case KIWI_DEVICE_ID:
2565 	case MANGO_DEVICE_ID:
2566 	case PEACH_DEVICE_ID:
2567 		return;
2568 	default:
2569 		break;
2570 	}
2571 
2572 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2573 			   TIME_SYNC_ENABLE);
2574 }
2575 
2576 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
2577 {
2578 	switch (pci_priv->device_id) {
2579 	case KIWI_DEVICE_ID:
2580 	case MANGO_DEVICE_ID:
2581 	case PEACH_DEVICE_ID:
2582 		return;
2583 	default:
2584 		break;
2585 	}
2586 
2587 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2588 			   TIME_SYNC_CLEAR);
2589 }
2590 
2591 
2592 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
2593 					  u32 low, u32 high)
2594 {
2595 	u32 time_reg_low;
2596 	u32 time_reg_high;
2597 
2598 	switch (pci_priv->device_id) {
2599 	case KIWI_DEVICE_ID:
2600 	case MANGO_DEVICE_ID:
2601 	case PEACH_DEVICE_ID:
2602 		/* Use the next two shadow registers after host's usage */
2603 		time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
2604 				(pci_priv->plat_priv->num_shadow_regs_v3 *
2605 				 SHADOW_REG_LEN_BYTES);
2606 		time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
2607 		break;
2608 	default:
2609 		time_reg_low = PCIE_SHADOW_REG_VALUE_34;
2610 		time_reg_high = PCIE_SHADOW_REG_VALUE_35;
2611 		break;
2612 	}
2613 
2614 	cnss_pci_reg_write(pci_priv, time_reg_low, low);
2615 	cnss_pci_reg_write(pci_priv, time_reg_high, high);
2616 
2617 	cnss_pci_reg_read(pci_priv, time_reg_low, &low);
2618 	cnss_pci_reg_read(pci_priv, time_reg_high, &high);
2619 
2620 	cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
2621 		    time_reg_low, low, time_reg_high, high);
2622 }
2623 
2624 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
2625 {
2626 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2627 	struct device *dev = &pci_priv->pci_dev->dev;
2628 	unsigned long flags = 0;
2629 	u64 host_time_us, device_time_us, offset;
2630 	u32 low, high;
2631 	int ret;
2632 
2633 	ret = cnss_pci_prevent_l1(dev);
2634 	if (ret)
2635 		goto out;
2636 
2637 	ret = cnss_pci_force_wake_get(pci_priv);
2638 	if (ret)
2639 		goto allow_l1;
2640 
2641 	spin_lock_irqsave(&time_sync_lock, flags);
2642 	cnss_pci_clear_time_sync_counter(pci_priv);
2643 	cnss_pci_enable_time_sync_counter(pci_priv);
2644 	host_time_us = cnss_get_host_timestamp(plat_priv);
2645 	ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
2646 	cnss_pci_clear_time_sync_counter(pci_priv);
2647 	spin_unlock_irqrestore(&time_sync_lock, flags);
2648 	if (ret)
2649 		goto force_wake_put;
2650 
2651 	if (host_time_us < device_time_us) {
2652 		cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
2653 			    host_time_us, device_time_us);
2654 		ret = -EINVAL;
2655 		goto force_wake_put;
2656 	}
2657 
2658 	offset = host_time_us - device_time_us;
2659 	cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
2660 		    host_time_us, device_time_us, offset);
2661 
2662 	low = offset & 0xFFFFFFFF;
2663 	high = offset >> 32;
2664 
2665 	cnss_pci_time_sync_reg_update(pci_priv, low, high);
2666 
2667 force_wake_put:
2668 	cnss_pci_force_wake_put(pci_priv);
2669 allow_l1:
2670 	cnss_pci_allow_l1(dev);
2671 out:
2672 	return ret;
2673 }
2674 
2675 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
2676 {
2677 	struct cnss_pci_data *pci_priv =
2678 		container_of(work, struct cnss_pci_data, time_sync_work.work);
2679 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2680 	unsigned int time_sync_period_ms =
2681 		plat_priv->ctrl_params.time_sync_period;
2682 
2683 	if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
2684 		cnss_pr_dbg("Time sync is disabled\n");
2685 		return;
2686 	}
2687 
2688 	if (!time_sync_period_ms) {
2689 		cnss_pr_dbg("Skip time sync as time period is 0\n");
2690 		return;
2691 	}
2692 
2693 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
2694 		return;
2695 
2696 	if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
2697 		goto runtime_pm_put;
2698 
2699 	mutex_lock(&pci_priv->bus_lock);
2700 	cnss_pci_update_timestamp(pci_priv);
2701 	mutex_unlock(&pci_priv->bus_lock);
2702 	schedule_delayed_work(&pci_priv->time_sync_work,
2703 			      msecs_to_jiffies(time_sync_period_ms));
2704 
2705 runtime_pm_put:
2706 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
2707 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
2708 }
2709 
2710 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
2711 {
2712 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2713 
2714 	switch (pci_priv->device_id) {
2715 	case QCA6390_DEVICE_ID:
2716 	case QCA6490_DEVICE_ID:
2717 	case KIWI_DEVICE_ID:
2718 	case MANGO_DEVICE_ID:
2719 	case PEACH_DEVICE_ID:
2720 		break;
2721 	default:
2722 		return -EOPNOTSUPP;
2723 	}
2724 
2725 	if (!plat_priv->device_freq_hz) {
2726 		cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
2727 		return -EINVAL;
2728 	}
2729 
2730 	cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
2731 
2732 	return 0;
2733 }
2734 
2735 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
2736 {
2737 	switch (pci_priv->device_id) {
2738 	case QCA6390_DEVICE_ID:
2739 	case QCA6490_DEVICE_ID:
2740 	case KIWI_DEVICE_ID:
2741 	case MANGO_DEVICE_ID:
2742 	case PEACH_DEVICE_ID:
2743 		break;
2744 	default:
2745 		return;
2746 	}
2747 
2748 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
2749 }
2750 
2751 int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv,
2752 				  unsigned long thermal_state,
2753 				  int tcdev_id)
2754 {
2755 	if (!pci_priv) {
2756 		cnss_pr_err("pci_priv is NULL!\n");
2757 		return -ENODEV;
2758 	}
2759 
2760 	if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) {
2761 		cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n");
2762 		return -EINVAL;
2763 	}
2764 
2765 	return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev,
2766 							 thermal_state,
2767 							 tcdev_id);
2768 }
2769 
2770 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
2771 				     unsigned int time_sync_period)
2772 {
2773 	struct cnss_plat_data *plat_priv;
2774 
2775 	if (!pci_priv)
2776 		return -ENODEV;
2777 
2778 	plat_priv = pci_priv->plat_priv;
2779 
2780 	cnss_pci_stop_time_sync_update(pci_priv);
2781 	plat_priv->ctrl_params.time_sync_period = time_sync_period;
2782 	cnss_pci_start_time_sync_update(pci_priv);
2783 	cnss_pr_dbg("WLAN time sync period %u ms\n",
2784 		    plat_priv->ctrl_params.time_sync_period);
2785 
2786 	return 0;
2787 }
2788 
2789 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
2790 {
2791 	int ret = 0;
2792 	struct cnss_plat_data *plat_priv;
2793 
2794 	if (!pci_priv)
2795 		return -ENODEV;
2796 
2797 	plat_priv = pci_priv->plat_priv;
2798 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2799 		cnss_pr_err("Reboot is in progress, skip driver probe\n");
2800 		return -EINVAL;
2801 	}
2802 
2803 	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2804 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2805 		cnss_pr_dbg("Skip driver probe\n");
2806 		goto out;
2807 	}
2808 
2809 	if (!pci_priv->driver_ops) {
2810 		cnss_pr_err("driver_ops is NULL\n");
2811 		ret = -EINVAL;
2812 		goto out;
2813 	}
2814 
2815 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2816 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2817 		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
2818 						   pci_priv->pci_device_id);
2819 		if (ret) {
2820 			cnss_pr_err("Failed to reinit host driver, err = %d\n",
2821 				    ret);
2822 			goto out;
2823 		}
2824 		complete(&plat_priv->recovery_complete);
2825 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
2826 		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
2827 						  pci_priv->pci_device_id);
2828 		if (ret) {
2829 			cnss_pr_err("Failed to probe host driver, err = %d\n",
2830 				    ret);
2831 			complete_all(&plat_priv->power_up_complete);
2832 			goto out;
2833 		}
2834 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
2835 		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2836 		cnss_pci_free_blob_mem(pci_priv);
2837 		complete_all(&plat_priv->power_up_complete);
2838 	} else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
2839 			    &plat_priv->driver_state)) {
2840 		ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
2841 			pci_priv->pci_device_id);
2842 		if (ret) {
2843 			cnss_pr_err("Failed to idle restart host driver, err = %d\n",
2844 				    ret);
2845 			plat_priv->power_up_error = ret;
2846 			complete_all(&plat_priv->power_up_complete);
2847 			goto out;
2848 		}
2849 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
2850 		complete_all(&plat_priv->power_up_complete);
2851 	} else {
2852 		complete(&plat_priv->power_up_complete);
2853 	}
2854 
2855 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2856 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2857 		__pm_relax(plat_priv->recovery_ws);
2858 	}
2859 
2860 	cnss_pci_start_time_sync_update(pci_priv);
2861 
2862 	return 0;
2863 
2864 out:
2865 	return ret;
2866 }
2867 
2868 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
2869 {
2870 	struct cnss_plat_data *plat_priv;
2871 	int ret;
2872 
2873 	if (!pci_priv)
2874 		return -ENODEV;
2875 
2876 	plat_priv = pci_priv->plat_priv;
2877 
2878 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
2879 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
2880 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2881 		cnss_pr_dbg("Skip driver remove\n");
2882 		return 0;
2883 	}
2884 
2885 	if (!pci_priv->driver_ops) {
2886 		cnss_pr_err("driver_ops is NULL\n");
2887 		return -EINVAL;
2888 	}
2889 
2890 	cnss_pci_stop_time_sync_update(pci_priv);
2891 
2892 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2893 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2894 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
2895 	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
2896 		pci_priv->driver_ops->remove(pci_priv->pci_dev);
2897 		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2898 	} else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2899 			    &plat_priv->driver_state)) {
2900 		ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
2901 		if (ret == -EAGAIN) {
2902 			clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2903 				  &plat_priv->driver_state);
2904 			return ret;
2905 		}
2906 	}
2907 
2908 	plat_priv->get_info_cb_ctx = NULL;
2909 	plat_priv->get_info_cb = NULL;
2910 
2911 	return 0;
2912 }
2913 
2914 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
2915 				      int modem_current_status)
2916 {
2917 	struct cnss_wlan_driver *driver_ops;
2918 
2919 	if (!pci_priv)
2920 		return -ENODEV;
2921 
2922 	driver_ops = pci_priv->driver_ops;
2923 	if (!driver_ops || !driver_ops->modem_status)
2924 		return -EINVAL;
2925 
2926 	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
2927 
2928 	return 0;
2929 }
2930 
2931 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
2932 			   enum cnss_driver_status status)
2933 {
2934 	struct cnss_wlan_driver *driver_ops;
2935 
2936 	if (!pci_priv)
2937 		return -ENODEV;
2938 
2939 	driver_ops = pci_priv->driver_ops;
2940 	if (!driver_ops || !driver_ops->update_status)
2941 		return -EINVAL;
2942 
2943 	cnss_pr_dbg("Update driver status: %d\n", status);
2944 
2945 	driver_ops->update_status(pci_priv->pci_dev, status);
2946 
2947 	return 0;
2948 }
2949 
2950 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
2951 				   struct cnss_misc_reg *misc_reg,
2952 				   u32 misc_reg_size,
2953 				   char *reg_name)
2954 {
2955 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2956 	bool do_force_wake_put = true;
2957 	int i;
2958 
2959 	if (!misc_reg)
2960 		return;
2961 
2962 	if (in_interrupt() || irqs_disabled())
2963 		return;
2964 
2965 	if (cnss_pci_check_link_status(pci_priv))
2966 		return;
2967 
2968 	if (cnss_pci_force_wake_get(pci_priv)) {
2969 		/* Continue to dump when device has entered RDDM already */
2970 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
2971 			return;
2972 		do_force_wake_put = false;
2973 	}
2974 
2975 	cnss_pr_dbg("Start to dump %s registers\n", reg_name);
2976 
2977 	for (i = 0; i < misc_reg_size; i++) {
2978 		if (!test_bit(pci_priv->misc_reg_dev_mask,
2979 			      &misc_reg[i].dev_mask))
2980 			continue;
2981 
2982 		if (misc_reg[i].wr) {
2983 			if (misc_reg[i].offset ==
2984 			    QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
2985 			    i >= 1)
2986 				misc_reg[i].val =
2987 				QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
2988 				misc_reg[i - 1].val;
2989 			if (cnss_pci_reg_write(pci_priv,
2990 					       misc_reg[i].offset,
2991 					       misc_reg[i].val))
2992 				goto force_wake_put;
2993 			cnss_pr_vdbg("Write 0x%X to 0x%X\n",
2994 				     misc_reg[i].val,
2995 				     misc_reg[i].offset);
2996 
2997 		} else {
2998 			if (cnss_pci_reg_read(pci_priv,
2999 					      misc_reg[i].offset,
3000 					      &misc_reg[i].val))
3001 				goto force_wake_put;
3002 		}
3003 	}
3004 
3005 force_wake_put:
3006 	if (do_force_wake_put)
3007 		cnss_pci_force_wake_put(pci_priv);
3008 }
3009 
3010 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
3011 {
3012 	if (in_interrupt() || irqs_disabled())
3013 		return;
3014 
3015 	if (cnss_pci_check_link_status(pci_priv))
3016 		return;
3017 
3018 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
3019 			       WCSS_REG_SIZE, "wcss");
3020 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
3021 			       PCIE_REG_SIZE, "pcie");
3022 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
3023 			       WLAON_REG_SIZE, "wlaon");
3024 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
3025 			       SYSPM_REG_SIZE, "syspm");
3026 }
3027 
3028 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
3029 {
3030 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
3031 	u32 reg_offset;
3032 	bool do_force_wake_put = true;
3033 
3034 	if (in_interrupt() || irqs_disabled())
3035 		return;
3036 
3037 	if (cnss_pci_check_link_status(pci_priv))
3038 		return;
3039 
3040 	if (!pci_priv->debug_reg) {
3041 		pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
3042 						   sizeof(*pci_priv->debug_reg)
3043 						   * array_size, GFP_KERNEL);
3044 		if (!pci_priv->debug_reg)
3045 			return;
3046 	}
3047 
3048 	if (cnss_pci_force_wake_get(pci_priv))
3049 		do_force_wake_put = false;
3050 
3051 	cnss_pr_dbg("Start to dump shadow registers\n");
3052 
3053 	for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
3054 		reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
3055 		pci_priv->debug_reg[j].offset = reg_offset;
3056 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3057 				      &pci_priv->debug_reg[j].val))
3058 			goto force_wake_put;
3059 	}
3060 
3061 	for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
3062 		reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
3063 		pci_priv->debug_reg[j].offset = reg_offset;
3064 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3065 				      &pci_priv->debug_reg[j].val))
3066 			goto force_wake_put;
3067 	}
3068 
3069 force_wake_put:
3070 	if (do_force_wake_put)
3071 		cnss_pci_force_wake_put(pci_priv);
3072 }
3073 
3074 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
3075 {
3076 	int ret = 0;
3077 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3078 
3079 	ret = cnss_power_on_device(plat_priv, false);
3080 	if (ret) {
3081 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3082 		goto out;
3083 	}
3084 
3085 	ret = cnss_resume_pci_link(pci_priv);
3086 	if (ret) {
3087 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3088 		goto power_off;
3089 	}
3090 
3091 	ret = cnss_pci_call_driver_probe(pci_priv);
3092 	if (ret)
3093 		goto suspend_link;
3094 
3095 	return 0;
3096 suspend_link:
3097 	cnss_suspend_pci_link(pci_priv);
3098 power_off:
3099 	cnss_power_off_device(plat_priv);
3100 out:
3101 	return ret;
3102 }
3103 
3104 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
3105 {
3106 	int ret = 0;
3107 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3108 
3109 	cnss_pci_pm_runtime_resume(pci_priv);
3110 
3111 	ret = cnss_pci_call_driver_remove(pci_priv);
3112 	if (ret == -EAGAIN)
3113 		goto out;
3114 
3115 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3116 				   CNSS_BUS_WIDTH_NONE);
3117 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3118 	cnss_pci_set_auto_suspended(pci_priv, 0);
3119 
3120 	ret = cnss_suspend_pci_link(pci_priv);
3121 	if (ret)
3122 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3123 
3124 	cnss_power_off_device(plat_priv);
3125 
3126 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3127 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3128 
3129 out:
3130 	return ret;
3131 }
3132 
3133 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
3134 {
3135 	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
3136 		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
3137 }
3138 
3139 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
3140 {
3141 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3142 	struct cnss_ramdump_info *ramdump_info;
3143 
3144 	ramdump_info = &plat_priv->ramdump_info;
3145 	if (!ramdump_info->ramdump_size)
3146 		return -EINVAL;
3147 
3148 	return cnss_do_ramdump(plat_priv);
3149 }
3150 
3151 static void cnss_get_driver_mode_update_fw_name(struct cnss_plat_data *plat_priv)
3152 {
3153 	struct cnss_pci_data *pci_priv;
3154 	struct cnss_wlan_driver *driver_ops;
3155 
3156 	pci_priv = plat_priv->bus_priv;
3157 	driver_ops = pci_priv->driver_ops;
3158 
3159 	if (driver_ops && driver_ops->get_driver_mode) {
3160 		plat_priv->driver_mode = driver_ops->get_driver_mode();
3161 		cnss_pci_update_fw_name(pci_priv);
3162 		cnss_pr_dbg("New driver mode is %d", plat_priv->driver_mode);
3163 	}
3164 }
3165 
3166 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
3167 {
3168 	int ret = 0;
3169 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3170 	unsigned int timeout;
3171 	int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
3172 	int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
3173 
3174 	if (plat_priv->ramdump_info_v2.dump_data_valid) {
3175 		cnss_pci_clear_dump_info(pci_priv);
3176 		cnss_pci_power_off_mhi(pci_priv);
3177 		cnss_suspend_pci_link(pci_priv);
3178 		cnss_pci_deinit_mhi(pci_priv);
3179 		cnss_power_off_device(plat_priv);
3180 	}
3181 
3182 	/* Clear QMI send usage count during every power up */
3183 	pci_priv->qmi_send_usage_count = 0;
3184 
3185 	plat_priv->power_up_error = 0;
3186 
3187 	cnss_get_driver_mode_update_fw_name(plat_priv);
3188 retry:
3189 	ret = cnss_power_on_device(plat_priv, false);
3190 	if (ret) {
3191 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3192 		goto out;
3193 	}
3194 
3195 	ret = cnss_resume_pci_link(pci_priv);
3196 	if (ret) {
3197 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3198 		cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3199 			    cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
3200 		if (test_bit(IGNORE_PCI_LINK_FAILURE,
3201 			     &plat_priv->ctrl_params.quirks)) {
3202 			cnss_pr_dbg("Ignore PCI link resume failure\n");
3203 			ret = 0;
3204 			goto out;
3205 		}
3206 		if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
3207 			cnss_power_off_device(plat_priv);
3208 			/* Force toggle BT_EN GPIO low */
3209 			if (retry == POWER_ON_RETRY_MAX_TIMES) {
3210 				cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
3211 					    retry, bt_en_gpio);
3212 				if (bt_en_gpio >= 0)
3213 					gpio_direction_output(bt_en_gpio, 0);
3214 				cnss_pr_dbg("BT_EN GPIO val: %d\n",
3215 					    gpio_get_value(bt_en_gpio));
3216 			}
3217 			cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
3218 			cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3219 				    cnss_get_input_gpio_value(plat_priv,
3220 							      sw_ctrl_gpio));
3221 			msleep(POWER_ON_RETRY_DELAY_MS * retry);
3222 			goto retry;
3223 		}
3224 		/* Assert when it reaches maximum retries */
3225 		CNSS_ASSERT(0);
3226 		goto power_off;
3227 	}
3228 
3229 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
3230 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
3231 
3232 	ret = cnss_pci_start_mhi(pci_priv);
3233 	if (ret) {
3234 		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
3235 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
3236 		    !pci_priv->pci_link_down_ind && timeout) {
3237 			/* Start recovery directly for MHI start failures */
3238 			cnss_schedule_recovery(&pci_priv->pci_dev->dev,
3239 					       CNSS_REASON_DEFAULT);
3240 		}
3241 		return 0;
3242 	}
3243 
3244 	if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
3245 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
3246 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3247 		return 0;
3248 	}
3249 
3250 	cnss_set_pin_connect_status(plat_priv);
3251 
3252 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
3253 		ret = cnss_pci_call_driver_probe(pci_priv);
3254 		if (ret)
3255 			goto stop_mhi;
3256 	} else if (timeout) {
3257 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
3258 			timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
3259 		else
3260 			timeout += WLAN_MISSION_MODE_TIMEOUT;
3261 		mod_timer(&plat_priv->fw_boot_timer,
3262 			  jiffies + msecs_to_jiffies(timeout));
3263 	}
3264 
3265 	return 0;
3266 
3267 stop_mhi:
3268 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
3269 	cnss_pci_power_off_mhi(pci_priv);
3270 	cnss_suspend_pci_link(pci_priv);
3271 	cnss_pci_deinit_mhi(pci_priv);
3272 power_off:
3273 	cnss_power_off_device(plat_priv);
3274 out:
3275 	return ret;
3276 }
3277 
3278 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
3279 {
3280 	int ret = 0;
3281 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3282 	int do_force_wake = true;
3283 
3284 	cnss_pci_pm_runtime_resume(pci_priv);
3285 
3286 	ret = cnss_pci_call_driver_remove(pci_priv);
3287 	if (ret == -EAGAIN)
3288 		goto out;
3289 
3290 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3291 				   CNSS_BUS_WIDTH_NONE);
3292 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3293 	cnss_pci_set_auto_suspended(pci_priv, 0);
3294 
3295 	if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
3296 	     test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3297 	     test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
3298 	     test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
3299 	     test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
3300 	    test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
3301 		del_timer(&pci_priv->dev_rddm_timer);
3302 		cnss_pci_collect_dump_info(pci_priv, false);
3303 
3304 		if (!plat_priv->recovery_enabled)
3305 			CNSS_ASSERT(0);
3306 	}
3307 
3308 	if (!cnss_is_device_powered_on(plat_priv)) {
3309 		cnss_pr_dbg("Device is already powered off, ignore\n");
3310 		goto skip_power_off;
3311 	}
3312 
3313 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3314 		do_force_wake = false;
3315 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
3316 
3317 	/* FBC image will be freed after powering off MHI, so skip
3318 	 * if RAM dump data is still valid.
3319 	 */
3320 	if (plat_priv->ramdump_info_v2.dump_data_valid)
3321 		goto skip_power_off;
3322 
3323 	cnss_pci_power_off_mhi(pci_priv);
3324 	ret = cnss_suspend_pci_link(pci_priv);
3325 	if (ret)
3326 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3327 	cnss_pci_deinit_mhi(pci_priv);
3328 	cnss_power_off_device(plat_priv);
3329 
3330 skip_power_off:
3331 	pci_priv->remap_window = 0;
3332 
3333 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
3334 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
3335 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3336 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
3337 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
3338 		pci_priv->pci_link_down_ind = false;
3339 	}
3340 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3341 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3342 	memset(&print_optimize, 0, sizeof(print_optimize));
3343 
3344 out:
3345 	return ret;
3346 }
3347 
3348 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
3349 {
3350 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3351 
3352 	set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3353 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
3354 		    plat_priv->driver_state);
3355 
3356 	cnss_pci_collect_dump_info(pci_priv, true);
3357 	clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3358 }
3359 
3360 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
3361 {
3362 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3363 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3364 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
3365 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3366 	int ret = 0;
3367 
3368 	if (!info_v2->dump_data_valid || !dump_seg ||
3369 	    dump_data->nentries == 0)
3370 		return 0;
3371 
3372 	ret = cnss_do_elf_ramdump(plat_priv);
3373 
3374 	cnss_pci_clear_dump_info(pci_priv);
3375 	cnss_pci_power_off_mhi(pci_priv);
3376 	cnss_suspend_pci_link(pci_priv);
3377 	cnss_pci_deinit_mhi(pci_priv);
3378 	cnss_power_off_device(plat_priv);
3379 
3380 	return ret;
3381 }
3382 
3383 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
3384 {
3385 	int ret = 0;
3386 
3387 	if (!pci_priv) {
3388 		cnss_pr_err("pci_priv is NULL\n");
3389 		return -ENODEV;
3390 	}
3391 
3392 	switch (pci_priv->device_id) {
3393 	case QCA6174_DEVICE_ID:
3394 		ret = cnss_qca6174_powerup(pci_priv);
3395 		break;
3396 	case QCA6290_DEVICE_ID:
3397 	case QCA6390_DEVICE_ID:
3398 	case QCN7605_DEVICE_ID:
3399 	case QCA6490_DEVICE_ID:
3400 	case KIWI_DEVICE_ID:
3401 	case MANGO_DEVICE_ID:
3402 	case PEACH_DEVICE_ID:
3403 		ret = cnss_qca6290_powerup(pci_priv);
3404 		break;
3405 	default:
3406 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3407 			    pci_priv->device_id);
3408 		ret = -ENODEV;
3409 	}
3410 
3411 	return ret;
3412 }
3413 
3414 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
3415 {
3416 	int ret = 0;
3417 
3418 	if (!pci_priv) {
3419 		cnss_pr_err("pci_priv is NULL\n");
3420 		return -ENODEV;
3421 	}
3422 
3423 	switch (pci_priv->device_id) {
3424 	case QCA6174_DEVICE_ID:
3425 		ret = cnss_qca6174_shutdown(pci_priv);
3426 		break;
3427 	case QCA6290_DEVICE_ID:
3428 	case QCA6390_DEVICE_ID:
3429 	case QCN7605_DEVICE_ID:
3430 	case QCA6490_DEVICE_ID:
3431 	case KIWI_DEVICE_ID:
3432 	case MANGO_DEVICE_ID:
3433 	case PEACH_DEVICE_ID:
3434 		ret = cnss_qca6290_shutdown(pci_priv);
3435 		break;
3436 	default:
3437 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3438 			    pci_priv->device_id);
3439 		ret = -ENODEV;
3440 	}
3441 
3442 	return ret;
3443 }
3444 
3445 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
3446 {
3447 	int ret = 0;
3448 
3449 	if (!pci_priv) {
3450 		cnss_pr_err("pci_priv is NULL\n");
3451 		return -ENODEV;
3452 	}
3453 
3454 	switch (pci_priv->device_id) {
3455 	case QCA6174_DEVICE_ID:
3456 		cnss_qca6174_crash_shutdown(pci_priv);
3457 		break;
3458 	case QCA6290_DEVICE_ID:
3459 	case QCA6390_DEVICE_ID:
3460 	case QCN7605_DEVICE_ID:
3461 	case QCA6490_DEVICE_ID:
3462 	case KIWI_DEVICE_ID:
3463 	case MANGO_DEVICE_ID:
3464 	case PEACH_DEVICE_ID:
3465 		cnss_qca6290_crash_shutdown(pci_priv);
3466 		break;
3467 	default:
3468 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3469 			    pci_priv->device_id);
3470 		ret = -ENODEV;
3471 	}
3472 
3473 	return ret;
3474 }
3475 
3476 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
3477 {
3478 	int ret = 0;
3479 
3480 	if (!pci_priv) {
3481 		cnss_pr_err("pci_priv is NULL\n");
3482 		return -ENODEV;
3483 	}
3484 
3485 	switch (pci_priv->device_id) {
3486 	case QCA6174_DEVICE_ID:
3487 		ret = cnss_qca6174_ramdump(pci_priv);
3488 		break;
3489 	case QCA6290_DEVICE_ID:
3490 	case QCA6390_DEVICE_ID:
3491 	case QCN7605_DEVICE_ID:
3492 	case QCA6490_DEVICE_ID:
3493 	case KIWI_DEVICE_ID:
3494 	case MANGO_DEVICE_ID:
3495 	case PEACH_DEVICE_ID:
3496 		ret = cnss_qca6290_ramdump(pci_priv);
3497 		break;
3498 	default:
3499 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3500 			    pci_priv->device_id);
3501 		ret = -ENODEV;
3502 	}
3503 
3504 	return ret;
3505 }
3506 
3507 int cnss_pci_is_drv_connected(struct device *dev)
3508 {
3509 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
3510 
3511 	if (!pci_priv)
3512 		return -ENODEV;
3513 
3514 	return pci_priv->drv_connected_last;
3515 }
3516 EXPORT_SYMBOL(cnss_pci_is_drv_connected);
3517 
3518 static void cnss_wlan_reg_driver_work(struct work_struct *work)
3519 {
3520 	struct cnss_plat_data *plat_priv =
3521 	container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
3522 	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
3523 	struct cnss_cal_info *cal_info;
3524 	unsigned int timeout;
3525 
3526 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
3527 		return;
3528 
3529 	if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
3530 		goto reg_driver;
3531 	} else {
3532 		if (plat_priv->charger_mode) {
3533 			cnss_pr_err("Ignore calibration timeout in charger mode\n");
3534 			return;
3535 		}
3536 		if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
3537 			      &plat_priv->driver_state)) {
3538 			timeout = cnss_get_timeout(plat_priv,
3539 						   CNSS_TIMEOUT_CALIBRATION);
3540 			cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
3541 				    timeout / 1000);
3542 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3543 					      msecs_to_jiffies(timeout));
3544 			return;
3545 		}
3546 
3547 		del_timer(&plat_priv->fw_boot_timer);
3548 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
3549 		    !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3550 			cnss_pr_err("Timeout waiting for calibration to complete\n");
3551 			CNSS_ASSERT(0);
3552 		}
3553 		cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
3554 		if (!cal_info)
3555 			return;
3556 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
3557 		cnss_driver_event_post(plat_priv,
3558 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
3559 				       0, cal_info);
3560 	}
3561 reg_driver:
3562 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3563 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3564 		return;
3565 	}
3566 	reinit_completion(&plat_priv->power_up_complete);
3567 	cnss_driver_event_post(plat_priv,
3568 			       CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3569 			       CNSS_EVENT_SYNC_UNKILLABLE,
3570 			       pci_priv->driver_ops);
3571 }
3572 
3573 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
3574 {
3575 	int ret = 0;
3576 	struct cnss_plat_data *plat_priv;
3577 	struct cnss_pci_data *pci_priv;
3578 	const struct pci_device_id *id_table = driver_ops->id_table;
3579 	unsigned int timeout;
3580 
3581 	if (!cnss_check_driver_loading_allowed()) {
3582 		cnss_pr_info("No cnss2 dtsi entry present");
3583 		return -ENODEV;
3584 	}
3585 
3586 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3587 
3588 	if (!plat_priv) {
3589 		cnss_pr_buf("plat_priv is not ready for register driver\n");
3590 		return -EAGAIN;
3591 	}
3592 
3593 	pci_priv = plat_priv->bus_priv;
3594 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
3595 		while (id_table && id_table->device) {
3596 			if (plat_priv->device_id == id_table->device) {
3597 				if (plat_priv->device_id == KIWI_DEVICE_ID &&
3598 				    driver_ops->chip_version != 2) {
3599 					cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
3600 					return -ENODEV;
3601 				}
3602 				cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
3603 					     id_table->device);
3604 				plat_priv->driver_ops = driver_ops;
3605 				return 0;
3606 			}
3607 			id_table++;
3608 		}
3609 		return -ENODEV;
3610 	}
3611 
3612 	if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
3613 		cnss_pr_info("pci probe not yet done for register driver\n");
3614 		return -EAGAIN;
3615 	}
3616 
3617 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
3618 		cnss_pr_err("Driver has already registered\n");
3619 		return -EEXIST;
3620 	}
3621 
3622 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3623 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3624 		return -EINVAL;
3625 	}
3626 
3627 	if (!id_table || !pci_dev_present(id_table)) {
3628 		/* id_table pointer will move from pci_dev_present(),
3629 		 * so check again using local pointer.
3630 		 */
3631 		id_table = driver_ops->id_table;
3632 		while (id_table && id_table->vendor) {
3633 			cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
3634 				     id_table->device);
3635 			id_table++;
3636 		}
3637 		cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
3638 			    pci_priv->device_id);
3639 		return -ENODEV;
3640 	}
3641 
3642 	if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
3643 	    driver_ops->chip_version != plat_priv->device_version.major_version) {
3644 		cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
3645 			    driver_ops->chip_version,
3646 			    plat_priv->device_version.major_version);
3647 		return -ENODEV;
3648 	}
3649 
3650 	cnss_get_driver_mode_update_fw_name(plat_priv);
3651 	set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
3652 
3653 	if (!plat_priv->cbc_enabled ||
3654 	    test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
3655 		goto register_driver;
3656 
3657 	pci_priv->driver_ops = driver_ops;
3658 	/* If Cold Boot Calibration is enabled, it is the 1st step in init
3659 	 * sequence.CBC is done on file system_ready trigger. Qcacld will be
3660 	 * loaded from vendor_modprobe.sh at early boot and must be deferred
3661 	 * until CBC is complete
3662 	 */
3663 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
3664 	INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
3665 			  cnss_wlan_reg_driver_work);
3666 	schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3667 			      msecs_to_jiffies(timeout));
3668 	cnss_pr_info("WLAN register driver deferred for Calibration\n");
3669 	return 0;
3670 register_driver:
3671 	reinit_completion(&plat_priv->power_up_complete);
3672 	ret = cnss_driver_event_post(plat_priv,
3673 				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3674 				     CNSS_EVENT_SYNC_UNKILLABLE,
3675 				     driver_ops);
3676 
3677 	return ret;
3678 }
3679 EXPORT_SYMBOL(cnss_wlan_register_driver);
3680 
3681 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
3682 {
3683 	struct cnss_plat_data *plat_priv;
3684 	int ret = 0;
3685 	unsigned int timeout;
3686 
3687 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3688 	if (!plat_priv) {
3689 		cnss_pr_err("plat_priv is NULL\n");
3690 		return;
3691 	}
3692 
3693 	mutex_lock(&plat_priv->driver_ops_lock);
3694 
3695 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
3696 		goto skip_wait_power_up;
3697 
3698 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
3699 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
3700 					  msecs_to_jiffies(timeout));
3701 	if (!ret) {
3702 		cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
3703 			    timeout);
3704 		CNSS_ASSERT(0);
3705 	}
3706 
3707 skip_wait_power_up:
3708 	if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3709 	    !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3710 		goto skip_wait_recovery;
3711 
3712 	reinit_completion(&plat_priv->recovery_complete);
3713 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
3714 	ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
3715 					  msecs_to_jiffies(timeout));
3716 	if (!ret) {
3717 		cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
3718 			    timeout);
3719 		CNSS_ASSERT(0);
3720 	}
3721 
3722 skip_wait_recovery:
3723 	cnss_driver_event_post(plat_priv,
3724 			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3725 			       CNSS_EVENT_SYNC_UNKILLABLE, NULL);
3726 
3727 	mutex_unlock(&plat_priv->driver_ops_lock);
3728 }
3729 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
3730 
3731 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
3732 				  void *data)
3733 {
3734 	int ret = 0;
3735 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3736 
3737 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3738 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
3739 		return -EINVAL;
3740 	}
3741 
3742 	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3743 	pci_priv->driver_ops = data;
3744 
3745 	ret = cnss_pci_dev_powerup(pci_priv);
3746 	if (ret) {
3747 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3748 		pci_priv->driver_ops = NULL;
3749 	} else {
3750 		set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3751 	}
3752 
3753 	return ret;
3754 }
3755 
3756 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
3757 {
3758 	struct cnss_plat_data *plat_priv;
3759 
3760 	if (!pci_priv)
3761 		return -EINVAL;
3762 
3763 	plat_priv = pci_priv->plat_priv;
3764 	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3765 	cnss_pci_dev_shutdown(pci_priv);
3766 	pci_priv->driver_ops = NULL;
3767 	clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3768 
3769 	return 0;
3770 }
3771 
3772 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
3773 {
3774 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3775 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3776 	int ret = 0;
3777 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3778 
3779 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
3780 
3781 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3782 	    driver_ops && driver_ops->suspend) {
3783 		ret = driver_ops->suspend(pci_dev, state);
3784 		if (ret) {
3785 			cnss_pr_err("Failed to suspend host driver, err = %d\n",
3786 				    ret);
3787 			ret = -EAGAIN;
3788 		}
3789 	}
3790 
3791 	return ret;
3792 }
3793 
3794 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
3795 {
3796 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3797 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3798 	int ret = 0;
3799 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3800 
3801 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3802 	    driver_ops && driver_ops->resume) {
3803 		ret = driver_ops->resume(pci_dev);
3804 		if (ret)
3805 			cnss_pr_err("Failed to resume host driver, err = %d\n",
3806 				    ret);
3807 	}
3808 
3809 	return ret;
3810 }
3811 
3812 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
3813 {
3814 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3815 	int ret = 0;
3816 
3817 	if (pci_priv->pci_link_state == PCI_LINK_DOWN)
3818 		goto out;
3819 
3820 	if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
3821 		ret = -EAGAIN;
3822 		goto out;
3823 	}
3824 
3825 	if (pci_priv->drv_connected_last)
3826 		goto skip_disable_pci;
3827 
3828 	pci_clear_master(pci_dev);
3829 	cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
3830 	pci_disable_device(pci_dev);
3831 
3832 	ret = pci_set_power_state(pci_dev, PCI_D3hot);
3833 	if (ret)
3834 		cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
3835 
3836 skip_disable_pci:
3837 	if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
3838 		ret = -EAGAIN;
3839 		goto resume_mhi;
3840 	}
3841 	pci_priv->pci_link_state = PCI_LINK_DOWN;
3842 
3843 	return 0;
3844 
3845 resume_mhi:
3846 	if (!pci_is_enabled(pci_dev))
3847 		if (pci_enable_device(pci_dev))
3848 			cnss_pr_err("Failed to enable PCI device\n");
3849 	if (pci_priv->saved_state)
3850 		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
3851 	pci_set_master(pci_dev);
3852 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3853 out:
3854 	return ret;
3855 }
3856 
3857 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
3858 {
3859 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3860 	int ret = 0;
3861 
3862 	if (pci_priv->pci_link_state == PCI_LINK_UP)
3863 		goto out;
3864 
3865 	if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
3866 		cnss_fatal_err("Failed to resume PCI link from suspend\n");
3867 		cnss_pci_link_down(&pci_dev->dev);
3868 		ret = -EAGAIN;
3869 		goto out;
3870 	}
3871 
3872 	pci_priv->pci_link_state = PCI_LINK_UP;
3873 
3874 	if (pci_priv->drv_connected_last)
3875 		goto skip_enable_pci;
3876 
3877 	ret = pci_enable_device(pci_dev);
3878 	if (ret) {
3879 		cnss_pr_err("Failed to enable PCI device, err = %d\n",
3880 			    ret);
3881 		goto out;
3882 	}
3883 
3884 	if (pci_priv->saved_state)
3885 		cnss_set_pci_config_space(pci_priv,
3886 					  RESTORE_PCI_CONFIG_SPACE);
3887 	pci_set_master(pci_dev);
3888 
3889 skip_enable_pci:
3890 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3891 out:
3892 	return ret;
3893 }
3894 
3895 static int cnss_pci_suspend(struct device *dev)
3896 {
3897 	int ret = 0;
3898 	struct pci_dev *pci_dev = to_pci_dev(dev);
3899 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3900 	struct cnss_plat_data *plat_priv;
3901 
3902 	if (!pci_priv)
3903 		goto out;
3904 
3905 	plat_priv = pci_priv->plat_priv;
3906 	if (!plat_priv)
3907 		goto out;
3908 
3909 	if (!cnss_is_device_powered_on(plat_priv))
3910 		goto out;
3911 
3912 	/* No mhi state bit set if only finish pcie enumeration,
3913 	 * so test_bit is not applicable to check if it is INIT state.
3914 	 */
3915 	if (pci_priv->mhi_state == CNSS_MHI_INIT) {
3916 		bool suspend = cnss_should_suspend_pwroff(pci_dev);
3917 
3918 		/* Do PCI link suspend and power off in the LPM case
3919 		 * if chipset didn't do that after pcie enumeration.
3920 		 */
3921 		if (!suspend) {
3922 			ret = cnss_suspend_pci_link(pci_priv);
3923 			if (ret)
3924 				cnss_pr_err("Failed to suspend PCI link, err = %d\n",
3925 					    ret);
3926 			cnss_power_off_device(plat_priv);
3927 			goto out;
3928 		}
3929 	}
3930 
3931 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
3932 	    pci_priv->drv_supported) {
3933 		pci_priv->drv_connected_last =
3934 			cnss_pci_get_drv_connected(pci_priv);
3935 		if (!pci_priv->drv_connected_last) {
3936 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
3937 			ret = -EAGAIN;
3938 			goto out;
3939 		}
3940 	}
3941 
3942 	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3943 
3944 	ret = cnss_pci_suspend_driver(pci_priv);
3945 	if (ret)
3946 		goto clear_flag;
3947 
3948 	if (!pci_priv->disable_pc) {
3949 		mutex_lock(&pci_priv->bus_lock);
3950 		ret = cnss_pci_suspend_bus(pci_priv);
3951 		mutex_unlock(&pci_priv->bus_lock);
3952 		if (ret)
3953 			goto resume_driver;
3954 	}
3955 
3956 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3957 
3958 	return 0;
3959 
3960 resume_driver:
3961 	cnss_pci_resume_driver(pci_priv);
3962 clear_flag:
3963 	pci_priv->drv_connected_last = 0;
3964 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3965 out:
3966 	return ret;
3967 }
3968 
3969 static int cnss_pci_resume(struct device *dev)
3970 {
3971 	int ret = 0;
3972 	struct pci_dev *pci_dev = to_pci_dev(dev);
3973 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3974 	struct cnss_plat_data *plat_priv;
3975 
3976 	if (!pci_priv)
3977 		goto out;
3978 
3979 	plat_priv = pci_priv->plat_priv;
3980 	if (!plat_priv)
3981 		goto out;
3982 
3983 	if (pci_priv->pci_link_down_ind)
3984 		goto out;
3985 
3986 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3987 		goto out;
3988 
3989 	if (!pci_priv->disable_pc) {
3990 		ret = cnss_pci_resume_bus(pci_priv);
3991 		if (ret)
3992 			goto out;
3993 	}
3994 
3995 	ret = cnss_pci_resume_driver(pci_priv);
3996 
3997 	pci_priv->drv_connected_last = 0;
3998 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3999 
4000 out:
4001 	return ret;
4002 }
4003 
4004 static int cnss_pci_suspend_noirq(struct device *dev)
4005 {
4006 	int ret = 0;
4007 	struct pci_dev *pci_dev = to_pci_dev(dev);
4008 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4009 	struct cnss_wlan_driver *driver_ops;
4010 	struct cnss_plat_data *plat_priv;
4011 
4012 	if (!pci_priv)
4013 		goto out;
4014 
4015 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4016 		goto out;
4017 
4018 	driver_ops = pci_priv->driver_ops;
4019 	plat_priv = pci_priv->plat_priv;
4020 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4021 	    driver_ops && driver_ops->suspend_noirq)
4022 		ret = driver_ops->suspend_noirq(pci_dev);
4023 
4024 	if (pci_priv->disable_pc && !pci_dev->state_saved &&
4025 	    !pci_priv->plat_priv->use_pm_domain)
4026 		pci_save_state(pci_dev);
4027 
4028 out:
4029 	return ret;
4030 }
4031 
4032 static int cnss_pci_resume_noirq(struct device *dev)
4033 {
4034 	int ret = 0;
4035 	struct pci_dev *pci_dev = to_pci_dev(dev);
4036 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4037 	struct cnss_wlan_driver *driver_ops;
4038 	struct cnss_plat_data *plat_priv;
4039 
4040 	if (!pci_priv)
4041 		goto out;
4042 
4043 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4044 		goto out;
4045 
4046 	plat_priv = pci_priv->plat_priv;
4047 	driver_ops = pci_priv->driver_ops;
4048 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4049 	    driver_ops && driver_ops->resume_noirq &&
4050 	    !pci_priv->pci_link_down_ind)
4051 		ret = driver_ops->resume_noirq(pci_dev);
4052 
4053 out:
4054 	return ret;
4055 }
4056 
4057 static int cnss_pci_runtime_suspend(struct device *dev)
4058 {
4059 	int ret = 0;
4060 	struct pci_dev *pci_dev = to_pci_dev(dev);
4061 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4062 	struct cnss_plat_data *plat_priv;
4063 	struct cnss_wlan_driver *driver_ops;
4064 
4065 	if (!pci_priv)
4066 		return -EAGAIN;
4067 
4068 	plat_priv = pci_priv->plat_priv;
4069 	if (!plat_priv)
4070 		return -EAGAIN;
4071 
4072 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4073 		return -EAGAIN;
4074 
4075 	if (pci_priv->pci_link_down_ind) {
4076 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4077 		return -EAGAIN;
4078 	}
4079 
4080 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4081 	    pci_priv->drv_supported) {
4082 		pci_priv->drv_connected_last =
4083 			cnss_pci_get_drv_connected(pci_priv);
4084 		if (!pci_priv->drv_connected_last) {
4085 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4086 			return -EAGAIN;
4087 		}
4088 	}
4089 
4090 	cnss_pr_vdbg("Runtime suspend start\n");
4091 
4092 	driver_ops = pci_priv->driver_ops;
4093 	if (driver_ops && driver_ops->runtime_ops &&
4094 	    driver_ops->runtime_ops->runtime_suspend)
4095 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
4096 	else
4097 		ret = cnss_auto_suspend(dev);
4098 
4099 	if (ret)
4100 		pci_priv->drv_connected_last = 0;
4101 
4102 	cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
4103 
4104 	return ret;
4105 }
4106 
4107 static int cnss_pci_runtime_resume(struct device *dev)
4108 {
4109 	int ret = 0;
4110 	struct pci_dev *pci_dev = to_pci_dev(dev);
4111 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4112 	struct cnss_wlan_driver *driver_ops;
4113 
4114 	if (!pci_priv)
4115 		return -EAGAIN;
4116 
4117 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4118 		return -EAGAIN;
4119 
4120 	if (pci_priv->pci_link_down_ind) {
4121 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4122 		return -EAGAIN;
4123 	}
4124 
4125 	cnss_pr_vdbg("Runtime resume start\n");
4126 
4127 	driver_ops = pci_priv->driver_ops;
4128 	if (driver_ops && driver_ops->runtime_ops &&
4129 	    driver_ops->runtime_ops->runtime_resume)
4130 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
4131 	else
4132 		ret = cnss_auto_resume(dev);
4133 
4134 	if (!ret)
4135 		pci_priv->drv_connected_last = 0;
4136 
4137 	cnss_pr_vdbg("Runtime resume status: %d\n", ret);
4138 
4139 	return ret;
4140 }
4141 
4142 static int cnss_pci_runtime_idle(struct device *dev)
4143 {
4144 	cnss_pr_vdbg("Runtime idle\n");
4145 
4146 	pm_request_autosuspend(dev);
4147 
4148 	return -EBUSY;
4149 }
4150 
4151 int cnss_wlan_pm_control(struct device *dev, bool vote)
4152 {
4153 	struct pci_dev *pci_dev = to_pci_dev(dev);
4154 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4155 	int ret = 0;
4156 
4157 	if (!pci_priv)
4158 		return -ENODEV;
4159 
4160 	ret = cnss_pci_disable_pc(pci_priv, vote);
4161 	if (ret)
4162 		return ret;
4163 
4164 	pci_priv->disable_pc = vote;
4165 	cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
4166 
4167 	return 0;
4168 }
4169 EXPORT_SYMBOL(cnss_wlan_pm_control);
4170 
4171 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
4172 					   enum cnss_rtpm_id id)
4173 {
4174 	if (id >= RTPM_ID_MAX)
4175 		return;
4176 
4177 	atomic_inc(&pci_priv->pm_stats.runtime_get);
4178 	atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
4179 	pci_priv->pm_stats.runtime_get_timestamp_id[id] =
4180 		cnss_get_host_timestamp(pci_priv->plat_priv);
4181 }
4182 
4183 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
4184 					   enum cnss_rtpm_id id)
4185 {
4186 	if (id >= RTPM_ID_MAX)
4187 		return;
4188 
4189 	atomic_inc(&pci_priv->pm_stats.runtime_put);
4190 	atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
4191 	pci_priv->pm_stats.runtime_put_timestamp_id[id] =
4192 		cnss_get_host_timestamp(pci_priv->plat_priv);
4193 }
4194 
4195 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
4196 {
4197 	struct device *dev;
4198 
4199 	if (!pci_priv)
4200 		return;
4201 
4202 	dev = &pci_priv->pci_dev->dev;
4203 
4204 	cnss_pr_dbg("Runtime PM usage count: %d\n",
4205 		    atomic_read(&dev->power.usage_count));
4206 }
4207 
4208 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
4209 {
4210 	struct device *dev;
4211 	enum rpm_status status;
4212 
4213 	if (!pci_priv)
4214 		return -ENODEV;
4215 
4216 	dev = &pci_priv->pci_dev->dev;
4217 
4218 	status = dev->power.runtime_status;
4219 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4220 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4221 			     (void *)_RET_IP_);
4222 
4223 	return pm_request_resume(dev);
4224 }
4225 
4226 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
4227 {
4228 	struct device *dev;
4229 	enum rpm_status status;
4230 
4231 	if (!pci_priv)
4232 		return -ENODEV;
4233 
4234 	dev = &pci_priv->pci_dev->dev;
4235 
4236 	status = dev->power.runtime_status;
4237 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4238 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4239 			     (void *)_RET_IP_);
4240 
4241 	return pm_runtime_resume(dev);
4242 }
4243 
4244 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
4245 			    enum cnss_rtpm_id id)
4246 {
4247 	struct device *dev;
4248 	enum rpm_status status;
4249 
4250 	if (!pci_priv)
4251 		return -ENODEV;
4252 
4253 	dev = &pci_priv->pci_dev->dev;
4254 
4255 	status = dev->power.runtime_status;
4256 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4257 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4258 			     (void *)_RET_IP_);
4259 
4260 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4261 
4262 	return pm_runtime_get(dev);
4263 }
4264 
4265 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
4266 				 enum cnss_rtpm_id id)
4267 {
4268 	struct device *dev;
4269 	enum rpm_status status;
4270 
4271 	if (!pci_priv)
4272 		return -ENODEV;
4273 
4274 	dev = &pci_priv->pci_dev->dev;
4275 
4276 	status = dev->power.runtime_status;
4277 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4278 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4279 			     (void *)_RET_IP_);
4280 
4281 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4282 
4283 	return pm_runtime_get_sync(dev);
4284 }
4285 
4286 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
4287 				      enum cnss_rtpm_id id)
4288 {
4289 	if (!pci_priv)
4290 		return;
4291 
4292 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4293 	pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
4294 }
4295 
4296 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
4297 					enum cnss_rtpm_id id)
4298 {
4299 	struct device *dev;
4300 
4301 	if (!pci_priv)
4302 		return -ENODEV;
4303 
4304 	dev = &pci_priv->pci_dev->dev;
4305 
4306 	if (atomic_read(&dev->power.usage_count) == 0) {
4307 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4308 		return -EINVAL;
4309 	}
4310 
4311 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4312 
4313 	return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
4314 }
4315 
4316 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
4317 				    enum cnss_rtpm_id id)
4318 {
4319 	struct device *dev;
4320 
4321 	if (!pci_priv)
4322 		return;
4323 
4324 	dev = &pci_priv->pci_dev->dev;
4325 
4326 	if (atomic_read(&dev->power.usage_count) == 0) {
4327 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4328 		return;
4329 	}
4330 
4331 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4332 	pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
4333 }
4334 
4335 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
4336 {
4337 	if (!pci_priv)
4338 		return;
4339 
4340 	pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
4341 }
4342 
4343 int cnss_auto_suspend(struct device *dev)
4344 {
4345 	int ret = 0;
4346 	struct pci_dev *pci_dev = to_pci_dev(dev);
4347 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4348 	struct cnss_plat_data *plat_priv;
4349 
4350 	if (!pci_priv)
4351 		return -ENODEV;
4352 
4353 	plat_priv = pci_priv->plat_priv;
4354 	if (!plat_priv)
4355 		return -ENODEV;
4356 
4357 	mutex_lock(&pci_priv->bus_lock);
4358 	if (!pci_priv->qmi_send_usage_count) {
4359 		ret = cnss_pci_suspend_bus(pci_priv);
4360 		if (ret) {
4361 			mutex_unlock(&pci_priv->bus_lock);
4362 			return ret;
4363 		}
4364 	}
4365 
4366 	cnss_pci_set_auto_suspended(pci_priv, 1);
4367 	mutex_unlock(&pci_priv->bus_lock);
4368 
4369 	cnss_pci_set_monitor_wake_intr(pci_priv, true);
4370 
4371 	/* For suspend temporarily set bandwidth vote to NONE and dont save in
4372 	 * current_bw_vote as in resume path we should vote for last used
4373 	 * bandwidth vote. Also ignore error if bw voting is not setup.
4374 	 */
4375 	cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
4376 	return 0;
4377 }
4378 EXPORT_SYMBOL(cnss_auto_suspend);
4379 
4380 int cnss_auto_resume(struct device *dev)
4381 {
4382 	int ret = 0;
4383 	struct pci_dev *pci_dev = to_pci_dev(dev);
4384 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4385 	struct cnss_plat_data *plat_priv;
4386 
4387 	if (!pci_priv)
4388 		return -ENODEV;
4389 
4390 	plat_priv = pci_priv->plat_priv;
4391 	if (!plat_priv)
4392 		return -ENODEV;
4393 
4394 	mutex_lock(&pci_priv->bus_lock);
4395 	ret = cnss_pci_resume_bus(pci_priv);
4396 	if (ret) {
4397 		mutex_unlock(&pci_priv->bus_lock);
4398 		return ret;
4399 	}
4400 
4401 	cnss_pci_set_auto_suspended(pci_priv, 0);
4402 	mutex_unlock(&pci_priv->bus_lock);
4403 
4404 	cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
4405 
4406 	return 0;
4407 }
4408 EXPORT_SYMBOL(cnss_auto_resume);
4409 
4410 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
4411 {
4412 	struct pci_dev *pci_dev = to_pci_dev(dev);
4413 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4414 	struct cnss_plat_data *plat_priv;
4415 	struct mhi_controller *mhi_ctrl;
4416 
4417 	if (!pci_priv)
4418 		return -ENODEV;
4419 
4420 	switch (pci_priv->device_id) {
4421 	case QCA6390_DEVICE_ID:
4422 	case QCA6490_DEVICE_ID:
4423 	case KIWI_DEVICE_ID:
4424 	case MANGO_DEVICE_ID:
4425 	case PEACH_DEVICE_ID:
4426 		break;
4427 	default:
4428 		return 0;
4429 	}
4430 
4431 	mhi_ctrl = pci_priv->mhi_ctrl;
4432 	if (!mhi_ctrl)
4433 		return -EINVAL;
4434 
4435 	plat_priv = pci_priv->plat_priv;
4436 	if (!plat_priv)
4437 		return -ENODEV;
4438 
4439 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4440 		return -EAGAIN;
4441 
4442 	if (timeout_us) {
4443 		/* Busy wait for timeout_us */
4444 		return cnss_mhi_device_get_sync_atomic(pci_priv,
4445 						       timeout_us, false);
4446 	} else {
4447 		/* Sleep wait for mhi_ctrl->timeout_ms */
4448 		return mhi_device_get_sync(mhi_ctrl->mhi_dev);
4449 	}
4450 }
4451 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
4452 
4453 int cnss_pci_force_wake_request(struct device *dev)
4454 {
4455 	struct pci_dev *pci_dev = to_pci_dev(dev);
4456 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4457 	struct cnss_plat_data *plat_priv;
4458 	struct mhi_controller *mhi_ctrl;
4459 
4460 	if (!pci_priv)
4461 		return -ENODEV;
4462 
4463 	switch (pci_priv->device_id) {
4464 	case QCA6390_DEVICE_ID:
4465 	case QCA6490_DEVICE_ID:
4466 	case KIWI_DEVICE_ID:
4467 	case MANGO_DEVICE_ID:
4468 	case PEACH_DEVICE_ID:
4469 		break;
4470 	default:
4471 		return 0;
4472 	}
4473 
4474 	mhi_ctrl = pci_priv->mhi_ctrl;
4475 	if (!mhi_ctrl)
4476 		return -EINVAL;
4477 
4478 	plat_priv = pci_priv->plat_priv;
4479 	if (!plat_priv)
4480 		return -ENODEV;
4481 
4482 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4483 		return -EAGAIN;
4484 
4485 	mhi_device_get(mhi_ctrl->mhi_dev);
4486 
4487 	return 0;
4488 }
4489 EXPORT_SYMBOL(cnss_pci_force_wake_request);
4490 
4491 int cnss_pci_is_device_awake(struct device *dev)
4492 {
4493 	struct pci_dev *pci_dev = to_pci_dev(dev);
4494 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4495 	struct mhi_controller *mhi_ctrl;
4496 
4497 	if (!pci_priv)
4498 		return -ENODEV;
4499 
4500 	switch (pci_priv->device_id) {
4501 	case QCA6390_DEVICE_ID:
4502 	case QCA6490_DEVICE_ID:
4503 	case KIWI_DEVICE_ID:
4504 	case MANGO_DEVICE_ID:
4505 	case PEACH_DEVICE_ID:
4506 		break;
4507 	default:
4508 		return 0;
4509 	}
4510 
4511 	mhi_ctrl = pci_priv->mhi_ctrl;
4512 	if (!mhi_ctrl)
4513 		return -EINVAL;
4514 
4515 	return (mhi_ctrl->dev_state == MHI_STATE_M0);
4516 }
4517 EXPORT_SYMBOL(cnss_pci_is_device_awake);
4518 
4519 int cnss_pci_force_wake_release(struct device *dev)
4520 {
4521 	struct pci_dev *pci_dev = to_pci_dev(dev);
4522 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4523 	struct cnss_plat_data *plat_priv;
4524 	struct mhi_controller *mhi_ctrl;
4525 
4526 	if (!pci_priv)
4527 		return -ENODEV;
4528 
4529 	switch (pci_priv->device_id) {
4530 	case QCA6390_DEVICE_ID:
4531 	case QCA6490_DEVICE_ID:
4532 	case KIWI_DEVICE_ID:
4533 	case MANGO_DEVICE_ID:
4534 	case PEACH_DEVICE_ID:
4535 		break;
4536 	default:
4537 		return 0;
4538 	}
4539 
4540 	mhi_ctrl = pci_priv->mhi_ctrl;
4541 	if (!mhi_ctrl)
4542 		return -EINVAL;
4543 
4544 	plat_priv = pci_priv->plat_priv;
4545 	if (!plat_priv)
4546 		return -ENODEV;
4547 
4548 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4549 		return -EAGAIN;
4550 
4551 	mhi_device_put(mhi_ctrl->mhi_dev);
4552 
4553 	return 0;
4554 }
4555 EXPORT_SYMBOL(cnss_pci_force_wake_release);
4556 
4557 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
4558 {
4559 	int ret = 0;
4560 
4561 	if (!pci_priv)
4562 		return -ENODEV;
4563 
4564 	mutex_lock(&pci_priv->bus_lock);
4565 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4566 	    !pci_priv->qmi_send_usage_count)
4567 		ret = cnss_pci_resume_bus(pci_priv);
4568 	pci_priv->qmi_send_usage_count++;
4569 	cnss_pr_buf("Increased QMI send usage count to %d\n",
4570 		    pci_priv->qmi_send_usage_count);
4571 	mutex_unlock(&pci_priv->bus_lock);
4572 
4573 	return ret;
4574 }
4575 
4576 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
4577 {
4578 	int ret = 0;
4579 
4580 	if (!pci_priv)
4581 		return -ENODEV;
4582 
4583 	mutex_lock(&pci_priv->bus_lock);
4584 	if (pci_priv->qmi_send_usage_count)
4585 		pci_priv->qmi_send_usage_count--;
4586 	cnss_pr_buf("Decreased QMI send usage count to %d\n",
4587 		    pci_priv->qmi_send_usage_count);
4588 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4589 	    !pci_priv->qmi_send_usage_count &&
4590 	    !cnss_pcie_is_device_down(pci_priv))
4591 		ret = cnss_pci_suspend_bus(pci_priv);
4592 	mutex_unlock(&pci_priv->bus_lock);
4593 
4594 	return ret;
4595 }
4596 
4597 int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb,
4598 			       uint32_t len, uint8_t slotid)
4599 {
4600 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4601 	struct cnss_fw_mem *fw_mem;
4602 	void *mem = NULL;
4603 	int i, ret;
4604 	u32 *status;
4605 
4606 	if (!plat_priv)
4607 		return -EINVAL;
4608 
4609 	fw_mem = plat_priv->fw_mem;
4610 	if (slotid >= AFC_MAX_SLOT) {
4611 		cnss_pr_err("Invalid slot id %d\n", slotid);
4612 		ret = -EINVAL;
4613 		goto err;
4614 	}
4615 	if (len > AFC_SLOT_SIZE) {
4616 		cnss_pr_err("len %d greater than slot size", len);
4617 		ret = -EINVAL;
4618 		goto err;
4619 	}
4620 
4621 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4622 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4623 			mem = fw_mem[i].va;
4624 			status = mem + (slotid * AFC_SLOT_SIZE);
4625 			break;
4626 		}
4627 	}
4628 
4629 	if (!mem) {
4630 		cnss_pr_err("AFC mem is not available\n");
4631 		ret = -ENOMEM;
4632 		goto err;
4633 	}
4634 
4635 	memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
4636 	if (len < AFC_SLOT_SIZE)
4637 		memset(mem + (slotid * AFC_SLOT_SIZE) + len,
4638 		       0, AFC_SLOT_SIZE - len);
4639 	status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
4640 
4641 	return 0;
4642 err:
4643 	return ret;
4644 }
4645 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
4646 
4647 int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
4648 {
4649 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4650 	struct cnss_fw_mem *fw_mem;
4651 	void *mem = NULL;
4652 	int i, ret;
4653 
4654 	if (!plat_priv)
4655 		return -EINVAL;
4656 
4657 	fw_mem = plat_priv->fw_mem;
4658 	if (slotid >= AFC_MAX_SLOT) {
4659 		cnss_pr_err("Invalid slot id %d\n", slotid);
4660 		ret = -EINVAL;
4661 		goto err;
4662 	}
4663 
4664 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4665 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4666 			mem = fw_mem[i].va;
4667 			break;
4668 		}
4669 	}
4670 
4671 	if (!mem) {
4672 		cnss_pr_err("AFC mem is not available\n");
4673 		ret = -ENOMEM;
4674 		goto err;
4675 	}
4676 
4677 	memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
4678 	return 0;
4679 
4680 err:
4681 	return ret;
4682 }
4683 EXPORT_SYMBOL(cnss_reset_afcmem);
4684 
4685 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
4686 {
4687 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4688 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4689 	struct device *dev = &pci_priv->pci_dev->dev;
4690 	int i;
4691 
4692 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4693 		if (!fw_mem[i].va && fw_mem[i].size) {
4694 retry:
4695 			fw_mem[i].va =
4696 				dma_alloc_attrs(dev, fw_mem[i].size,
4697 						&fw_mem[i].pa, GFP_KERNEL,
4698 						fw_mem[i].attrs);
4699 
4700 			if (!fw_mem[i].va) {
4701 				if ((fw_mem[i].attrs &
4702 				    DMA_ATTR_FORCE_CONTIGUOUS)) {
4703 					fw_mem[i].attrs &=
4704 						~DMA_ATTR_FORCE_CONTIGUOUS;
4705 
4706 					cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
4707 						    fw_mem[i].type);
4708 					goto retry;
4709 				}
4710 				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
4711 					    fw_mem[i].size, fw_mem[i].type);
4712 				CNSS_ASSERT(0);
4713 				return -ENOMEM;
4714 			}
4715 		}
4716 	}
4717 
4718 	return 0;
4719 }
4720 
4721 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
4722 {
4723 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4724 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4725 	struct device *dev = &pci_priv->pci_dev->dev;
4726 	int i;
4727 
4728 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4729 		if (fw_mem[i].va && fw_mem[i].size) {
4730 			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
4731 				    fw_mem[i].va, &fw_mem[i].pa,
4732 				    fw_mem[i].size, fw_mem[i].type);
4733 			dma_free_attrs(dev, fw_mem[i].size,
4734 				       fw_mem[i].va, fw_mem[i].pa,
4735 				       fw_mem[i].attrs);
4736 			fw_mem[i].va = NULL;
4737 			fw_mem[i].pa = 0;
4738 			fw_mem[i].size = 0;
4739 			fw_mem[i].type = 0;
4740 		}
4741 	}
4742 
4743 	plat_priv->fw_mem_seg_len = 0;
4744 }
4745 
4746 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
4747 {
4748 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4749 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4750 	int i, j;
4751 
4752 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4753 		if (!qdss_mem[i].va && qdss_mem[i].size) {
4754 			qdss_mem[i].va =
4755 				dma_alloc_coherent(&pci_priv->pci_dev->dev,
4756 						   qdss_mem[i].size,
4757 						   &qdss_mem[i].pa,
4758 						   GFP_KERNEL);
4759 			if (!qdss_mem[i].va) {
4760 				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
4761 					    qdss_mem[i].size,
4762 					    qdss_mem[i].type, i);
4763 				break;
4764 			}
4765 		}
4766 	}
4767 
4768 	/* Best-effort allocation for QDSS trace */
4769 	if (i < plat_priv->qdss_mem_seg_len) {
4770 		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
4771 			qdss_mem[j].type = 0;
4772 			qdss_mem[j].size = 0;
4773 		}
4774 		plat_priv->qdss_mem_seg_len = i;
4775 	}
4776 
4777 	return 0;
4778 }
4779 
4780 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
4781 {
4782 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4783 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4784 	int i;
4785 
4786 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4787 		if (qdss_mem[i].va && qdss_mem[i].size) {
4788 			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
4789 				    &qdss_mem[i].pa, qdss_mem[i].size,
4790 				    qdss_mem[i].type);
4791 			dma_free_coherent(&pci_priv->pci_dev->dev,
4792 					  qdss_mem[i].size, qdss_mem[i].va,
4793 					  qdss_mem[i].pa);
4794 			qdss_mem[i].va = NULL;
4795 			qdss_mem[i].pa = 0;
4796 			qdss_mem[i].size = 0;
4797 			qdss_mem[i].type = 0;
4798 		}
4799 	}
4800 	plat_priv->qdss_mem_seg_len = 0;
4801 }
4802 
4803 int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv)
4804 {
4805 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4806 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4807 	char filename[MAX_FIRMWARE_NAME_LEN];
4808 	char *tme_patch_filename = NULL;
4809 	const struct firmware *fw_entry;
4810 	int ret = 0;
4811 
4812 	switch (pci_priv->device_id) {
4813 	case PEACH_DEVICE_ID:
4814 		tme_patch_filename = TME_PATCH_FILE_NAME;
4815 		break;
4816 	case QCA6174_DEVICE_ID:
4817 	case QCA6290_DEVICE_ID:
4818 	case QCA6390_DEVICE_ID:
4819 	case QCA6490_DEVICE_ID:
4820 	case KIWI_DEVICE_ID:
4821 	case MANGO_DEVICE_ID:
4822 	default:
4823 		cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n",
4824 			    pci_priv->device_id);
4825 		return 0;
4826 	}
4827 
4828 	if (!tme_lite_mem->va && !tme_lite_mem->size) {
4829 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4830 					    tme_patch_filename);
4831 
4832 		ret = firmware_request_nowarn(&fw_entry, filename,
4833 					      &pci_priv->pci_dev->dev);
4834 		if (ret) {
4835 			cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n",
4836 				    filename, ret);
4837 			return ret;
4838 		}
4839 
4840 		tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4841 						fw_entry->size, &tme_lite_mem->pa,
4842 						GFP_KERNEL);
4843 		if (!tme_lite_mem->va) {
4844 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4845 				    fw_entry->size);
4846 			release_firmware(fw_entry);
4847 			return -ENOMEM;
4848 		}
4849 
4850 		memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
4851 		tme_lite_mem->size = fw_entry->size;
4852 		release_firmware(fw_entry);
4853 	}
4854 
4855 	return 0;
4856 }
4857 
4858 static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv)
4859 {
4860 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4861 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4862 
4863 	if (tme_lite_mem->va && tme_lite_mem->size) {
4864 		cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4865 			    tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
4866 		dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size,
4867 				  tme_lite_mem->va, tme_lite_mem->pa);
4868 	}
4869 
4870 	tme_lite_mem->va = NULL;
4871 	tme_lite_mem->pa = 0;
4872 	tme_lite_mem->size = 0;
4873 }
4874 
4875 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
4876 {
4877 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4878 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4879 	char filename[MAX_FIRMWARE_NAME_LEN];
4880 	char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
4881 	const struct firmware *fw_entry;
4882 	int ret = 0;
4883 
4884 	/* Use forward compatibility here since for any recent device
4885 	 * it should use DEFAULT_PHY_UCODE_FILE_NAME.
4886 	 */
4887 	switch (pci_priv->device_id) {
4888 	case QCA6174_DEVICE_ID:
4889 		cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
4890 			    pci_priv->device_id);
4891 		return -EINVAL;
4892 	case QCA6290_DEVICE_ID:
4893 	case QCA6390_DEVICE_ID:
4894 	case QCA6490_DEVICE_ID:
4895 		phy_filename = DEFAULT_PHY_M3_FILE_NAME;
4896 		break;
4897 	case KIWI_DEVICE_ID:
4898 	case MANGO_DEVICE_ID:
4899 	case PEACH_DEVICE_ID:
4900 		switch (plat_priv->device_version.major_version) {
4901 		case FW_V2_NUMBER:
4902 			phy_filename = PHY_UCODE_V2_FILE_NAME;
4903 			break;
4904 		default:
4905 			break;
4906 		}
4907 		break;
4908 	default:
4909 		break;
4910 	}
4911 
4912 	if (!m3_mem->va && !m3_mem->size) {
4913 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4914 					    phy_filename);
4915 
4916 		ret = firmware_request_nowarn(&fw_entry, filename,
4917 					      &pci_priv->pci_dev->dev);
4918 		if (ret) {
4919 			cnss_pr_err("Failed to load M3 image: %s\n", filename);
4920 			return ret;
4921 		}
4922 
4923 		m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4924 						fw_entry->size, &m3_mem->pa,
4925 						GFP_KERNEL);
4926 		if (!m3_mem->va) {
4927 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4928 				    fw_entry->size);
4929 			release_firmware(fw_entry);
4930 			return -ENOMEM;
4931 		}
4932 
4933 		memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
4934 		m3_mem->size = fw_entry->size;
4935 		release_firmware(fw_entry);
4936 	}
4937 
4938 	return 0;
4939 }
4940 
4941 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
4942 {
4943 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4944 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4945 
4946 	if (m3_mem->va && m3_mem->size) {
4947 		cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4948 			    m3_mem->va, &m3_mem->pa, m3_mem->size);
4949 		dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
4950 				  m3_mem->va, m3_mem->pa);
4951 	}
4952 
4953 	m3_mem->va = NULL;
4954 	m3_mem->pa = 0;
4955 	m3_mem->size = 0;
4956 }
4957 
4958 #ifdef CONFIG_FREE_M3_BLOB_MEM
4959 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
4960 {
4961 	cnss_pci_free_m3_mem(pci_priv);
4962 }
4963 #else
4964 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
4965 {
4966 }
4967 #endif
4968 
4969 int cnss_pci_load_aux(struct cnss_pci_data *pci_priv)
4970 {
4971 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4972 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
4973 	char filename[MAX_FIRMWARE_NAME_LEN];
4974 	char *aux_filename = DEFAULT_AUX_FILE_NAME;
4975 	const struct firmware *fw_entry;
4976 	int ret = 0;
4977 
4978 	if (!aux_mem->va && !aux_mem->size) {
4979 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4980 					    aux_filename);
4981 
4982 		ret = firmware_request_nowarn(&fw_entry, filename,
4983 					      &pci_priv->pci_dev->dev);
4984 		if (ret) {
4985 			cnss_pr_err("Failed to load AUX image: %s\n", filename);
4986 			return ret;
4987 		}
4988 
4989 		aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4990 						fw_entry->size, &aux_mem->pa,
4991 						GFP_KERNEL);
4992 		if (!aux_mem->va) {
4993 			cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n",
4994 				    fw_entry->size);
4995 			release_firmware(fw_entry);
4996 			return -ENOMEM;
4997 		}
4998 
4999 		memcpy(aux_mem->va, fw_entry->data, fw_entry->size);
5000 		aux_mem->size = fw_entry->size;
5001 		release_firmware(fw_entry);
5002 	}
5003 
5004 	return 0;
5005 }
5006 
5007 static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv)
5008 {
5009 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5010 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5011 
5012 	if (aux_mem->va && aux_mem->size) {
5013 		cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5014 			    aux_mem->va, &aux_mem->pa, aux_mem->size);
5015 		dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size,
5016 				  aux_mem->va, aux_mem->pa);
5017 	}
5018 
5019 	aux_mem->va = NULL;
5020 	aux_mem->pa = 0;
5021 	aux_mem->size = 0;
5022 }
5023 
5024 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
5025 {
5026 	struct cnss_plat_data *plat_priv;
5027 
5028 	if (!pci_priv)
5029 		return;
5030 
5031 	cnss_fatal_err("Timeout waiting for FW ready indication\n");
5032 
5033 	plat_priv = pci_priv->plat_priv;
5034 	if (!plat_priv)
5035 		return;
5036 
5037 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
5038 		cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
5039 		return;
5040 	}
5041 
5042 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5043 			       CNSS_REASON_TIMEOUT);
5044 }
5045 
5046 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
5047 {
5048 	pci_priv->iommu_domain = NULL;
5049 }
5050 
5051 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5052 {
5053 	if (!pci_priv)
5054 		return -ENODEV;
5055 
5056 	if (!pci_priv->smmu_iova_len)
5057 		return -EINVAL;
5058 
5059 	*addr = pci_priv->smmu_iova_start;
5060 	*size = pci_priv->smmu_iova_len;
5061 
5062 	return 0;
5063 }
5064 
5065 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5066 {
5067 	if (!pci_priv)
5068 		return -ENODEV;
5069 
5070 	if (!pci_priv->smmu_iova_ipa_len)
5071 		return -EINVAL;
5072 
5073 	*addr = pci_priv->smmu_iova_ipa_start;
5074 	*size = pci_priv->smmu_iova_ipa_len;
5075 
5076 	return 0;
5077 }
5078 
5079 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
5080 {
5081 	if (pci_priv)
5082 		return pci_priv->smmu_s1_enable;
5083 
5084 	return false;
5085 }
5086 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
5087 {
5088 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5089 
5090 	if (!pci_priv)
5091 		return NULL;
5092 
5093 	return pci_priv->iommu_domain;
5094 }
5095 EXPORT_SYMBOL(cnss_smmu_get_domain);
5096 
5097 int cnss_smmu_map(struct device *dev,
5098 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
5099 {
5100 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5101 	struct cnss_plat_data *plat_priv;
5102 	unsigned long iova;
5103 	size_t len;
5104 	int ret = 0;
5105 	int flag = IOMMU_READ | IOMMU_WRITE;
5106 	struct pci_dev *root_port;
5107 	struct device_node *root_of_node;
5108 	bool dma_coherent = false;
5109 
5110 	if (!pci_priv)
5111 		return -ENODEV;
5112 
5113 	if (!iova_addr) {
5114 		cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
5115 			    &paddr, size);
5116 		return -EINVAL;
5117 	}
5118 
5119 	plat_priv = pci_priv->plat_priv;
5120 
5121 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
5122 	iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
5123 
5124 	if (pci_priv->iommu_geometry &&
5125 	    iova >= pci_priv->smmu_iova_ipa_start +
5126 		    pci_priv->smmu_iova_ipa_len) {
5127 		cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5128 			    iova,
5129 			    &pci_priv->smmu_iova_ipa_start,
5130 			    pci_priv->smmu_iova_ipa_len);
5131 		return -ENOMEM;
5132 	}
5133 
5134 	if (!test_bit(DISABLE_IO_COHERENCY,
5135 		      &plat_priv->ctrl_params.quirks)) {
5136 		root_port = pcie_find_root_port(pci_priv->pci_dev);
5137 		if (!root_port) {
5138 			cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
5139 		} else {
5140 			root_of_node = root_port->dev.of_node;
5141 			if (root_of_node && root_of_node->parent) {
5142 				dma_coherent =
5143 				    of_property_read_bool(root_of_node->parent,
5144 							  "dma-coherent");
5145 			cnss_pr_dbg("dma-coherent is %s\n",
5146 				    dma_coherent ? "enabled" : "disabled");
5147 			if (dma_coherent)
5148 				flag |= IOMMU_CACHE;
5149 			}
5150 		}
5151 	}
5152 
5153 	cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
5154 
5155 	ret = iommu_map(pci_priv->iommu_domain, iova,
5156 			rounddown(paddr, PAGE_SIZE), len, flag);
5157 	if (ret) {
5158 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
5159 		return ret;
5160 	}
5161 
5162 	pci_priv->smmu_iova_ipa_current = iova + len;
5163 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
5164 	cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
5165 
5166 	return 0;
5167 }
5168 EXPORT_SYMBOL(cnss_smmu_map);
5169 
5170 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
5171 {
5172 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5173 	unsigned long iova;
5174 	size_t unmapped;
5175 	size_t len;
5176 
5177 	if (!pci_priv)
5178 		return -ENODEV;
5179 
5180 	iova = rounddown(iova_addr, PAGE_SIZE);
5181 	len = roundup(size + iova_addr - iova, PAGE_SIZE);
5182 
5183 	if (iova >= pci_priv->smmu_iova_ipa_start +
5184 		    pci_priv->smmu_iova_ipa_len) {
5185 		cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5186 			    iova,
5187 			    &pci_priv->smmu_iova_ipa_start,
5188 			    pci_priv->smmu_iova_ipa_len);
5189 		return -ENOMEM;
5190 	}
5191 
5192 	cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
5193 
5194 	unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
5195 	if (unmapped != len) {
5196 		cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
5197 			    unmapped, len);
5198 		return -EINVAL;
5199 	}
5200 
5201 	pci_priv->smmu_iova_ipa_current = iova;
5202 	return 0;
5203 }
5204 EXPORT_SYMBOL(cnss_smmu_unmap);
5205 
5206 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
5207 {
5208 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5209 	struct cnss_plat_data *plat_priv;
5210 
5211 	if (!pci_priv)
5212 		return -ENODEV;
5213 
5214 	plat_priv = pci_priv->plat_priv;
5215 	if (!plat_priv)
5216 		return -ENODEV;
5217 
5218 	info->va = pci_priv->bar;
5219 	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
5220 	info->chip_id = plat_priv->chip_info.chip_id;
5221 	info->chip_family = plat_priv->chip_info.chip_family;
5222 	info->board_id = plat_priv->board_info.board_id;
5223 	info->soc_id = plat_priv->soc_info.soc_id;
5224 	info->fw_version = plat_priv->fw_version_info.fw_version;
5225 	strlcpy(info->fw_build_timestamp,
5226 		plat_priv->fw_version_info.fw_build_timestamp,
5227 		sizeof(info->fw_build_timestamp));
5228 	memcpy(&info->device_version, &plat_priv->device_version,
5229 	       sizeof(info->device_version));
5230 	memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
5231 	       sizeof(info->dev_mem_info));
5232 	memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
5233 	       sizeof(info->fw_build_id));
5234 
5235 	return 0;
5236 }
5237 EXPORT_SYMBOL(cnss_get_soc_info);
5238 
5239 int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv,
5240 				     char *user_name,
5241 				     int *num_vectors,
5242 				     u32 *user_base_data,
5243 				     u32 *base_vector)
5244 {
5245 	return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5246 					    user_name,
5247 					    num_vectors,
5248 					    user_base_data,
5249 					    base_vector);
5250 }
5251 
5252 static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv,
5253 					  unsigned int vec,
5254 					  const struct cpumask *cpumask)
5255 {
5256 	int ret;
5257 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5258 
5259 	ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec),
5260 				    cpumask);
5261 
5262 	return ret;
5263 }
5264 
5265 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
5266 {
5267 	int ret = 0;
5268 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5269 	int num_vectors;
5270 	struct cnss_msi_config *msi_config;
5271 
5272 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5273 		return 0;
5274 
5275 	if (cnss_pci_is_force_one_msi(pci_priv)) {
5276 		ret = cnss_pci_get_one_msi_assignment(pci_priv);
5277 		cnss_pr_dbg("force one msi\n");
5278 	} else {
5279 		ret = cnss_pci_get_msi_assignment(pci_priv);
5280 	}
5281 	if (ret) {
5282 		cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
5283 		goto out;
5284 	}
5285 
5286 	msi_config = pci_priv->msi_config;
5287 	if (!msi_config) {
5288 		cnss_pr_err("msi_config is NULL!\n");
5289 		ret = -EINVAL;
5290 		goto out;
5291 	}
5292 
5293 	num_vectors = pci_alloc_irq_vectors(pci_dev,
5294 					    msi_config->total_vectors,
5295 					    msi_config->total_vectors,
5296 					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
5297 	if ((num_vectors != msi_config->total_vectors) &&
5298 	    !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
5299 		cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
5300 			    msi_config->total_vectors, num_vectors);
5301 		if (num_vectors >= 0)
5302 			ret = -EINVAL;
5303 		goto reset_msi_config;
5304 	}
5305 
5306 	/* With VT-d disabled on x86 platform, only one pci irq vector is
5307 	 * allocated. Once suspend the irq may be migrated to CPU0 if it was
5308 	 * affine to other CPU with one new msi vector re-allocated.
5309 	 * The observation cause the issue about no irq handler for vector
5310 	 * once resume.
5311 	 * The fix is to set irq vector affinity to CPU0 before calling
5312 	 * request_irq to avoid the irq migration.
5313 	 */
5314 	if (cnss_pci_is_one_msi(pci_priv)) {
5315 		ret = cnss_pci_irq_set_affinity_hint(pci_priv,
5316 						     0,
5317 						     cpumask_of(0));
5318 		if (ret) {
5319 			cnss_pr_err("Failed to affinize irq vector to CPU0\n");
5320 			goto free_msi_vector;
5321 		}
5322 	}
5323 
5324 	if (cnss_pci_config_msi_addr(pci_priv)) {
5325 		ret = -EINVAL;
5326 		goto free_msi_vector;
5327 	}
5328 
5329 	if (cnss_pci_config_msi_data(pci_priv)) {
5330 		ret = -EINVAL;
5331 		goto free_msi_vector;
5332 	}
5333 
5334 	return 0;
5335 
5336 free_msi_vector:
5337 	if (cnss_pci_is_one_msi(pci_priv))
5338 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5339 	pci_free_irq_vectors(pci_priv->pci_dev);
5340 reset_msi_config:
5341 	pci_priv->msi_config = NULL;
5342 out:
5343 	return ret;
5344 }
5345 
5346 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
5347 {
5348 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5349 		return;
5350 
5351 	if (cnss_pci_is_one_msi(pci_priv))
5352 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5353 
5354 	pci_free_irq_vectors(pci_priv->pci_dev);
5355 }
5356 
5357 int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
5358 				 int *num_vectors, u32 *user_base_data,
5359 				 u32 *base_vector)
5360 {
5361 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5362 	struct cnss_msi_config *msi_config;
5363 	int idx;
5364 
5365 	if (!pci_priv)
5366 		return -ENODEV;
5367 
5368 	msi_config = pci_priv->msi_config;
5369 	if (!msi_config) {
5370 		cnss_pr_err("MSI is not supported.\n");
5371 		return -EINVAL;
5372 	}
5373 
5374 	for (idx = 0; idx < msi_config->total_users; idx++) {
5375 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
5376 			*num_vectors = msi_config->users[idx].num_vectors;
5377 			*user_base_data = msi_config->users[idx].base_vector
5378 				+ pci_priv->msi_ep_base_data;
5379 			*base_vector = msi_config->users[idx].base_vector;
5380 			/*Add only single print for each user*/
5381 			if (print_optimize.msi_log_chk[idx]++)
5382 				goto skip_print;
5383 
5384 			cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
5385 				    user_name, *num_vectors, *user_base_data,
5386 				    *base_vector);
5387 skip_print:
5388 			return 0;
5389 		}
5390 	}
5391 
5392 	cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
5393 
5394 	return -EINVAL;
5395 }
5396 EXPORT_SYMBOL(cnss_get_user_msi_assignment);
5397 
5398 int cnss_get_msi_irq(struct device *dev, unsigned int vector)
5399 {
5400 	struct pci_dev *pci_dev = to_pci_dev(dev);
5401 	int irq_num;
5402 
5403 	irq_num = pci_irq_vector(pci_dev, vector);
5404 	cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
5405 
5406 	return irq_num;
5407 }
5408 EXPORT_SYMBOL(cnss_get_msi_irq);
5409 
5410 bool cnss_is_one_msi(struct device *dev)
5411 {
5412 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5413 
5414 	if (!pci_priv)
5415 		return false;
5416 
5417 	return cnss_pci_is_one_msi(pci_priv);
5418 }
5419 EXPORT_SYMBOL(cnss_is_one_msi);
5420 
5421 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
5422 			  u32 *msi_addr_high)
5423 {
5424 	struct pci_dev *pci_dev = to_pci_dev(dev);
5425 	struct cnss_pci_data *pci_priv;
5426 	u16 control;
5427 
5428 	if (!pci_dev)
5429 		return;
5430 
5431 	pci_priv = cnss_get_pci_priv(pci_dev);
5432 	if (!pci_priv)
5433 		return;
5434 
5435 	if (pci_dev->msix_enabled) {
5436 		*msi_addr_low = pci_priv->msix_addr;
5437 		*msi_addr_high = 0;
5438 		if (!print_optimize.msi_addr_chk++)
5439 			cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5440 				    *msi_addr_low, *msi_addr_high);
5441 		return;
5442 	}
5443 
5444 	pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
5445 			     &control);
5446 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
5447 			      msi_addr_low);
5448 	/* Return MSI high address only when device supports 64-bit MSI */
5449 	if (control & PCI_MSI_FLAGS_64BIT)
5450 		pci_read_config_dword(pci_dev,
5451 				      pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
5452 				      msi_addr_high);
5453 	else
5454 		*msi_addr_high = 0;
5455 	 /*Add only single print as the address is constant*/
5456 	 if (!print_optimize.msi_addr_chk++)
5457 		cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5458 			    *msi_addr_low, *msi_addr_high);
5459 }
5460 EXPORT_SYMBOL(cnss_get_msi_address);
5461 
5462 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
5463 {
5464 	int ret, num_vectors;
5465 	u32 user_base_data, base_vector;
5466 
5467 	if (!pci_priv)
5468 		return -ENODEV;
5469 
5470 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5471 					   WAKE_MSI_NAME, &num_vectors,
5472 					   &user_base_data, &base_vector);
5473 	if (ret) {
5474 		cnss_pr_err("WAKE MSI is not valid\n");
5475 		return 0;
5476 	}
5477 
5478 	return user_base_data;
5479 }
5480 
5481 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
5482 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5483 {
5484 	return dma_set_mask(&pci_dev->dev, mask);
5485 }
5486 
5487 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5488 	u64 mask)
5489 {
5490 	return dma_set_coherent_mask(&pci_dev->dev, mask);
5491 }
5492 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5493 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5494 {
5495 	return pci_set_dma_mask(pci_dev, mask);
5496 }
5497 
5498 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5499 	u64 mask)
5500 {
5501 	return pci_set_consistent_dma_mask(pci_dev, mask);
5502 }
5503 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5504 
5505 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
5506 {
5507 	int ret = 0;
5508 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5509 	u16 device_id;
5510 
5511 	pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
5512 	if (device_id != pci_priv->pci_device_id->device)  {
5513 		cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
5514 			    device_id, pci_priv->pci_device_id->device);
5515 		ret = -EIO;
5516 		goto out;
5517 	}
5518 
5519 	ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
5520 	if (ret) {
5521 		pr_err("Failed to assign PCI resource, err = %d\n", ret);
5522 		goto out;
5523 	}
5524 
5525 	ret = pci_enable_device(pci_dev);
5526 	if (ret) {
5527 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
5528 		goto out;
5529 	}
5530 
5531 	ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
5532 	if (ret) {
5533 		cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
5534 		goto disable_device;
5535 	}
5536 
5537 	switch (device_id) {
5538 	case QCA6174_DEVICE_ID:
5539 	case QCN7605_DEVICE_ID:
5540 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5541 		break;
5542 	case QCA6390_DEVICE_ID:
5543 	case QCA6490_DEVICE_ID:
5544 	case KIWI_DEVICE_ID:
5545 	case MANGO_DEVICE_ID:
5546 	case PEACH_DEVICE_ID:
5547 		pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
5548 		break;
5549 	default:
5550 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5551 		break;
5552 	}
5553 
5554 	cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
5555 
5556 	ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5557 	if (ret) {
5558 		cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
5559 		goto release_region;
5560 	}
5561 
5562 	ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5563 	if (ret) {
5564 		cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
5565 			    ret);
5566 		goto release_region;
5567 	}
5568 
5569 	pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
5570 	if (!pci_priv->bar) {
5571 		cnss_pr_err("Failed to do PCI IO map!\n");
5572 		ret = -EIO;
5573 		goto release_region;
5574 	}
5575 
5576 	/* Save default config space without BME enabled */
5577 	pci_save_state(pci_dev);
5578 	pci_priv->default_state = pci_store_saved_state(pci_dev);
5579 
5580 	pci_set_master(pci_dev);
5581 
5582 	return 0;
5583 
5584 release_region:
5585 	pci_release_region(pci_dev, PCI_BAR_NUM);
5586 disable_device:
5587 	pci_disable_device(pci_dev);
5588 out:
5589 	return ret;
5590 }
5591 
5592 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
5593 {
5594 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5595 
5596 	pci_clear_master(pci_dev);
5597 	pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
5598 	pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
5599 
5600 	if (pci_priv->bar) {
5601 		pci_iounmap(pci_dev, pci_priv->bar);
5602 		pci_priv->bar = NULL;
5603 	}
5604 
5605 	pci_release_region(pci_dev, PCI_BAR_NUM);
5606 	if (pci_is_enabled(pci_dev))
5607 		pci_disable_device(pci_dev);
5608 }
5609 
5610 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
5611 {
5612 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5613 	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
5614 	gfp_t gfp = GFP_KERNEL;
5615 	u32 reg_offset;
5616 
5617 	if (in_interrupt() || irqs_disabled())
5618 		gfp = GFP_ATOMIC;
5619 
5620 	if (!plat_priv->qdss_reg) {
5621 		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
5622 						   sizeof(*plat_priv->qdss_reg)
5623 						   * array_size, gfp);
5624 		if (!plat_priv->qdss_reg)
5625 			return;
5626 	}
5627 
5628 	cnss_pr_dbg("Start to dump qdss registers\n");
5629 
5630 	for (i = 0; qdss_csr[i].name; i++) {
5631 		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
5632 		if (cnss_pci_reg_read(pci_priv, reg_offset,
5633 				      &plat_priv->qdss_reg[i]))
5634 			return;
5635 		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
5636 			    plat_priv->qdss_reg[i]);
5637 	}
5638 }
5639 
5640 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
5641 				 enum cnss_ce_index ce)
5642 {
5643 	int i;
5644 	u32 ce_base = ce * CE_REG_INTERVAL;
5645 	u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
5646 
5647 	switch (pci_priv->device_id) {
5648 	case QCA6390_DEVICE_ID:
5649 		src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
5650 		dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
5651 		cmn_base = QCA6390_CE_COMMON_REG_BASE;
5652 		break;
5653 	case QCA6490_DEVICE_ID:
5654 		src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
5655 		dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
5656 		cmn_base = QCA6490_CE_COMMON_REG_BASE;
5657 		break;
5658 	default:
5659 		return;
5660 	}
5661 
5662 	switch (ce) {
5663 	case CNSS_CE_09:
5664 	case CNSS_CE_10:
5665 		for (i = 0; ce_src[i].name; i++) {
5666 			reg_offset = src_ring_base + ce_base + ce_src[i].offset;
5667 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5668 				return;
5669 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5670 				    ce, ce_src[i].name, reg_offset, val);
5671 		}
5672 
5673 		for (i = 0; ce_dst[i].name; i++) {
5674 			reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
5675 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5676 				return;
5677 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5678 				    ce, ce_dst[i].name, reg_offset, val);
5679 		}
5680 		break;
5681 	case CNSS_CE_COMMON:
5682 		for (i = 0; ce_cmn[i].name; i++) {
5683 			reg_offset = cmn_base  + ce_cmn[i].offset;
5684 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5685 				return;
5686 			cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
5687 				    ce_cmn[i].name, reg_offset, val);
5688 		}
5689 		break;
5690 	default:
5691 		cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
5692 	}
5693 }
5694 
5695 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
5696 {
5697 	if (cnss_pci_check_link_status(pci_priv))
5698 		return;
5699 
5700 	cnss_pr_dbg("Start to dump debug registers\n");
5701 
5702 	cnss_mhi_debug_reg_dump(pci_priv);
5703 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5704 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
5705 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
5706 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
5707 }
5708 
5709 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
5710 {
5711 	if (cnss_get_host_sol_value(pci_priv->plat_priv))
5712 		return -EINVAL;
5713 
5714 	cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
5715 	cnss_set_host_sol_value(pci_priv->plat_priv, 1);
5716 
5717 	return 0;
5718 }
5719 
5720 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
5721 {
5722 	if (!cnss_pci_check_link_status(pci_priv))
5723 		cnss_mhi_debug_reg_dump(pci_priv);
5724 
5725 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5726 	cnss_pci_dump_misc_reg(pci_priv);
5727 	cnss_pci_dump_shadow_reg(pci_priv);
5728 }
5729 
5730 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
5731 {
5732 	int ret;
5733 	struct cnss_plat_data *plat_priv;
5734 
5735 	if (!pci_priv)
5736 		return -ENODEV;
5737 
5738 	plat_priv = pci_priv->plat_priv;
5739 	if (!plat_priv)
5740 		return -ENODEV;
5741 
5742 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5743 	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
5744 		return -EINVAL;
5745 	/*
5746 	 * Call pm_runtime_get_sync insteat of auto_resume to get
5747 	 * reference and make sure runtime_suspend wont get called.
5748 	 */
5749 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
5750 	if (ret < 0)
5751 		goto runtime_pm_put;
5752 	/*
5753 	 * In some scenarios, cnss_pci_pm_runtime_get_sync
5754 	 * might not resume PCI bus. For those cases do auto resume.
5755 	 */
5756 	cnss_auto_resume(&pci_priv->pci_dev->dev);
5757 
5758 	if (!pci_priv->is_smmu_fault)
5759 		cnss_pci_mhi_reg_dump(pci_priv);
5760 
5761 	/* If link is still down here, directly trigger link down recovery */
5762 	ret = cnss_pci_check_link_status(pci_priv);
5763 	if (ret) {
5764 		cnss_pci_link_down(&pci_priv->pci_dev->dev);
5765 		cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5766 		cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5767 		return 0;
5768 	}
5769 
5770 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
5771 	if (ret) {
5772 		if (pci_priv->is_smmu_fault) {
5773 			cnss_pci_mhi_reg_dump(pci_priv);
5774 			pci_priv->is_smmu_fault = false;
5775 		}
5776 		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5777 		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
5778 			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
5779 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5780 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5781 			return 0;
5782 		}
5783 		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
5784 		if (!cnss_pci_assert_host_sol(pci_priv)) {
5785 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5786 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5787 			return 0;
5788 		}
5789 		cnss_pci_dump_debug_reg(pci_priv);
5790 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5791 				       CNSS_REASON_DEFAULT);
5792 		goto runtime_pm_put;
5793 	}
5794 
5795 	if (pci_priv->is_smmu_fault) {
5796 		cnss_pci_mhi_reg_dump(pci_priv);
5797 		pci_priv->is_smmu_fault = false;
5798 	}
5799 
5800 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
5801 		mod_timer(&pci_priv->dev_rddm_timer,
5802 			  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
5803 	}
5804 
5805 runtime_pm_put:
5806 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5807 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5808 	return ret;
5809 }
5810 
5811 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
5812 				  struct cnss_dump_seg *dump_seg,
5813 				  enum cnss_fw_dump_type type, int seg_no,
5814 				  void *va, dma_addr_t dma, size_t size)
5815 {
5816 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5817 	struct device *dev = &pci_priv->pci_dev->dev;
5818 	phys_addr_t pa;
5819 
5820 	dump_seg->address = dma;
5821 	dump_seg->v_address = va;
5822 	dump_seg->size = size;
5823 	dump_seg->type = type;
5824 
5825 	cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
5826 		    seg_no, va, &dma, size);
5827 
5828 	if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
5829 		return;
5830 
5831 	cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
5832 }
5833 
5834 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
5835 				     struct cnss_dump_seg *dump_seg,
5836 				     enum cnss_fw_dump_type type, int seg_no,
5837 				     void *va, dma_addr_t dma, size_t size)
5838 {
5839 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5840 	struct device *dev = &pci_priv->pci_dev->dev;
5841 	phys_addr_t pa;
5842 
5843 	cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
5844 	cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
5845 }
5846 
5847 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
5848 				enum cnss_driver_status status, void *data)
5849 {
5850 	struct cnss_uevent_data uevent_data;
5851 	struct cnss_wlan_driver *driver_ops;
5852 
5853 	driver_ops = pci_priv->driver_ops;
5854 	if (!driver_ops || !driver_ops->update_event) {
5855 		cnss_pr_dbg("Hang event driver ops is NULL\n");
5856 		return -EINVAL;
5857 	}
5858 
5859 	cnss_pr_dbg("Calling driver uevent: %d\n", status);
5860 
5861 	uevent_data.status = status;
5862 	uevent_data.data = data;
5863 
5864 	return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
5865 }
5866 
5867 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
5868 {
5869 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5870 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5871 	struct cnss_hang_event hang_event;
5872 	void *hang_data_va = NULL;
5873 	u64 offset = 0;
5874 	u16 length = 0;
5875 	int i = 0;
5876 
5877 	if (!fw_mem || !plat_priv->fw_mem_seg_len)
5878 		return;
5879 
5880 	memset(&hang_event, 0, sizeof(hang_event));
5881 	switch (pci_priv->device_id) {
5882 	case QCA6390_DEVICE_ID:
5883 		offset = HST_HANG_DATA_OFFSET;
5884 		length = HANG_DATA_LENGTH;
5885 		break;
5886 	case QCA6490_DEVICE_ID:
5887 		/* Fallback to hard-coded values if hang event params not
5888 		 * present in QMI. Once all the firmware branches have the
5889 		 * fix to send params over QMI, this can be removed.
5890 		 */
5891 		if (plat_priv->hang_event_data_len) {
5892 			offset = plat_priv->hang_data_addr_offset;
5893 			length = plat_priv->hang_event_data_len;
5894 		} else {
5895 			offset = HSP_HANG_DATA_OFFSET;
5896 			length = HANG_DATA_LENGTH;
5897 		}
5898 		break;
5899 	case KIWI_DEVICE_ID:
5900 	case MANGO_DEVICE_ID:
5901 	case PEACH_DEVICE_ID:
5902 		offset = plat_priv->hang_data_addr_offset;
5903 		length = plat_priv->hang_event_data_len;
5904 		break;
5905 	default:
5906 		cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
5907 			    pci_priv->device_id);
5908 		return;
5909 	}
5910 
5911 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
5912 		if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
5913 		    fw_mem[i].va) {
5914 			/* The offset must be < (fw_mem size- hangdata length) */
5915 			if (!(offset <= fw_mem[i].size - length))
5916 				goto exit;
5917 
5918 			hang_data_va = fw_mem[i].va + offset;
5919 			hang_event.hang_event_data = kmemdup(hang_data_va,
5920 							     length,
5921 							     GFP_ATOMIC);
5922 			if (!hang_event.hang_event_data) {
5923 				cnss_pr_dbg("Hang data memory alloc failed\n");
5924 				return;
5925 			}
5926 			hang_event.hang_event_data_len = length;
5927 			break;
5928 		}
5929 	}
5930 
5931 	cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
5932 
5933 	kfree(hang_event.hang_event_data);
5934 	hang_event.hang_event_data = NULL;
5935 	return;
5936 exit:
5937 	cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
5938 		    plat_priv->hang_data_addr_offset,
5939 		    plat_priv->hang_event_data_len);
5940 }
5941 
5942 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
5943 void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv)
5944 {
5945 	struct cnss_ssr_driver_dump_entry ssr_entry[CNSS_HOST_DUMP_TYPE_MAX] = {0};
5946 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5947 	size_t num_entries_loaded = 0;
5948 	int x;
5949 	int ret = -1;
5950 
5951 	if (pci_priv->driver_ops &&
5952 	    pci_priv->driver_ops->collect_driver_dump) {
5953 		ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev,
5954 								ssr_entry,
5955 								&num_entries_loaded);
5956 	}
5957 
5958 	if (!ret) {
5959 		for (x = 0; x < num_entries_loaded; x++) {
5960 			cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n",
5961 				     x, ssr_entry[x].buffer_pointer,
5962 				     ssr_entry[x].region_name,
5963 				     ssr_entry[x].buffer_size);
5964 		}
5965 
5966 		cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded);
5967 	} else {
5968 		cnss_pr_info("Host SSR elf dump collection feature disabled\n");
5969 	}
5970 }
5971 #endif
5972 
5973 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
5974 {
5975 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5976 	struct cnss_dump_data *dump_data =
5977 		&plat_priv->ramdump_info_v2.dump_data;
5978 	struct cnss_dump_seg *dump_seg =
5979 		plat_priv->ramdump_info_v2.dump_data_vaddr;
5980 	struct image_info *fw_image, *rddm_image;
5981 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5982 	int ret, i, j;
5983 
5984 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
5985 	    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
5986 		cnss_pci_send_hang_event(pci_priv);
5987 
5988 	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
5989 		cnss_pr_dbg("RAM dump is already collected, skip\n");
5990 		return;
5991 	}
5992 
5993 	if (!cnss_is_device_powered_on(plat_priv)) {
5994 		cnss_pr_dbg("Device is already powered off, skip\n");
5995 		return;
5996 	}
5997 
5998 	if (!in_panic) {
5999 		mutex_lock(&pci_priv->bus_lock);
6000 		ret = cnss_pci_check_link_status(pci_priv);
6001 		if (ret) {
6002 			if (ret != -EACCES) {
6003 				mutex_unlock(&pci_priv->bus_lock);
6004 				return;
6005 			}
6006 			if (cnss_pci_resume_bus(pci_priv)) {
6007 				mutex_unlock(&pci_priv->bus_lock);
6008 				return;
6009 			}
6010 		}
6011 		mutex_unlock(&pci_priv->bus_lock);
6012 	} else {
6013 		if (cnss_pci_check_link_status(pci_priv))
6014 			return;
6015 		/* Inside panic handler, reduce timeout for RDDM to avoid
6016 		 * unnecessary hypervisor watchdog bite.
6017 		 */
6018 		pci_priv->mhi_ctrl->timeout_ms /= 2;
6019 	}
6020 
6021 	cnss_mhi_debug_reg_dump(pci_priv);
6022 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6023 	cnss_pci_dump_misc_reg(pci_priv);
6024 
6025 	cnss_rddm_trigger_debug(pci_priv);
6026 	ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
6027 	if (ret) {
6028 		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
6029 			       ret);
6030 		if (!cnss_pci_assert_host_sol(pci_priv))
6031 			return;
6032 		cnss_rddm_trigger_check(pci_priv);
6033 		cnss_pci_dump_debug_reg(pci_priv);
6034 		return;
6035 	}
6036 	cnss_rddm_trigger_check(pci_priv);
6037 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6038 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6039 	dump_data->nentries = 0;
6040 
6041 	if (plat_priv->qdss_mem_seg_len)
6042 		cnss_pci_dump_qdss_reg(pci_priv);
6043 	cnss_mhi_dump_sfr(pci_priv);
6044 
6045 	if (!dump_seg) {
6046 		cnss_pr_warn("FW image dump collection not setup");
6047 		goto skip_dump;
6048 	}
6049 
6050 	cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
6051 		    fw_image->entries);
6052 
6053 	for (i = 0; i < fw_image->entries; i++) {
6054 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6055 				      fw_image->mhi_buf[i].buf,
6056 				      fw_image->mhi_buf[i].dma_addr,
6057 				      fw_image->mhi_buf[i].len);
6058 		dump_seg++;
6059 	}
6060 
6061 	dump_data->nentries += fw_image->entries;
6062 
6063 	cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
6064 		    rddm_image->entries);
6065 
6066 	for (i = 0; i < rddm_image->entries; i++) {
6067 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6068 				      rddm_image->mhi_buf[i].buf,
6069 				      rddm_image->mhi_buf[i].dma_addr,
6070 				      rddm_image->mhi_buf[i].len);
6071 		dump_seg++;
6072 	}
6073 
6074 	dump_data->nentries += rddm_image->entries;
6075 
6076 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6077 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
6078 			if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
6079 				cnss_pr_dbg("Collect remote heap dump segment\n");
6080 				cnss_pci_add_dump_seg(pci_priv, dump_seg,
6081 						      CNSS_FW_REMOTE_HEAP, j,
6082 						      fw_mem[i].va,
6083 						      fw_mem[i].pa,
6084 						      fw_mem[i].size);
6085 				dump_seg++;
6086 				dump_data->nentries++;
6087 				j++;
6088 			} else {
6089 				cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
6090 			}
6091 		}
6092 	}
6093 
6094 	if (dump_data->nentries > 0)
6095 		plat_priv->ramdump_info_v2.dump_data_valid = true;
6096 
6097 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
6098 
6099 skip_dump:
6100 	complete(&plat_priv->rddm_complete);
6101 }
6102 
6103 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
6104 {
6105 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6106 	struct cnss_dump_seg *dump_seg =
6107 		plat_priv->ramdump_info_v2.dump_data_vaddr;
6108 	struct image_info *fw_image, *rddm_image;
6109 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6110 	int i, j;
6111 
6112 	if (!dump_seg)
6113 		return;
6114 
6115 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6116 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6117 
6118 	for (i = 0; i < fw_image->entries; i++) {
6119 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6120 					 fw_image->mhi_buf[i].buf,
6121 					 fw_image->mhi_buf[i].dma_addr,
6122 					 fw_image->mhi_buf[i].len);
6123 		dump_seg++;
6124 	}
6125 
6126 	for (i = 0; i < rddm_image->entries; i++) {
6127 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6128 					 rddm_image->mhi_buf[i].buf,
6129 					 rddm_image->mhi_buf[i].dma_addr,
6130 					 rddm_image->mhi_buf[i].len);
6131 		dump_seg++;
6132 	}
6133 
6134 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6135 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
6136 		    (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
6137 			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6138 						 CNSS_FW_REMOTE_HEAP, j,
6139 						 fw_mem[i].va, fw_mem[i].pa,
6140 						 fw_mem[i].size);
6141 			dump_seg++;
6142 			j++;
6143 		}
6144 	}
6145 
6146 	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
6147 	plat_priv->ramdump_info_v2.dump_data_valid = false;
6148 }
6149 
6150 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
6151 {
6152 	struct cnss_plat_data *plat_priv;
6153 
6154 	if (!pci_priv) {
6155 		cnss_pr_err("pci_priv is NULL\n");
6156 		return;
6157 	}
6158 
6159 	plat_priv = pci_priv->plat_priv;
6160 	if (!plat_priv) {
6161 		cnss_pr_err("plat_priv is NULL\n");
6162 		return;
6163 	}
6164 
6165 	if (plat_priv->recovery_enabled)
6166 		cnss_pci_collect_host_dump_info(pci_priv);
6167 
6168 	/* Call recovery handler in the DRIVER_RECOVERY event context
6169 	 * instead of scheduling work. In that way complete recovery
6170 	 * will be done as part of DRIVER_RECOVERY event and get
6171 	 * serialized with other events.
6172 	 */
6173 	cnss_recovery_handler(plat_priv);
6174 }
6175 
6176 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
6177 {
6178 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6179 
6180 	return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
6181 }
6182 
6183 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
6184 {
6185 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6186 
6187 	cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
6188 }
6189 
6190 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
6191 				 char *prefix_name, char *name)
6192 {
6193 	struct cnss_plat_data *plat_priv;
6194 
6195 	if (!pci_priv)
6196 		return;
6197 
6198 	plat_priv = pci_priv->plat_priv;
6199 
6200 	if (!plat_priv->use_fw_path_with_prefix) {
6201 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6202 		return;
6203 	}
6204 
6205 	switch (pci_priv->device_id) {
6206 	case QCN7605_DEVICE_ID:
6207 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6208 			  QCN7605_PATH_PREFIX "%s", name);
6209 		break;
6210 	case QCA6390_DEVICE_ID:
6211 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6212 			  QCA6390_PATH_PREFIX "%s", name);
6213 		break;
6214 	case QCA6490_DEVICE_ID:
6215 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6216 			  QCA6490_PATH_PREFIX "%s", name);
6217 		break;
6218 	case KIWI_DEVICE_ID:
6219 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6220 			  KIWI_PATH_PREFIX "%s", name);
6221 		break;
6222 	case MANGO_DEVICE_ID:
6223 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6224 			  MANGO_PATH_PREFIX "%s", name);
6225 		break;
6226 	case PEACH_DEVICE_ID:
6227 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6228 			  PEACH_PATH_PREFIX "%s", name);
6229 		break;
6230 	default:
6231 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6232 		break;
6233 	}
6234 
6235 	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
6236 }
6237 
6238 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
6239 {
6240 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6241 
6242 	switch (pci_priv->device_id) {
6243 	case QCA6390_DEVICE_ID:
6244 		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
6245 			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
6246 				    pci_priv->device_id,
6247 				    plat_priv->device_version.major_version);
6248 			return -EINVAL;
6249 		}
6250 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6251 					    FW_V2_FILE_NAME);
6252 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6253 			 FW_V2_FILE_NAME);
6254 		break;
6255 	case QCA6490_DEVICE_ID:
6256 		switch (plat_priv->device_version.major_version) {
6257 		case FW_V2_NUMBER:
6258 				cnss_pci_add_fw_prefix_name(pci_priv,
6259 							    plat_priv->firmware_name,
6260 							    FW_V2_FILE_NAME);
6261 				snprintf(plat_priv->fw_fallback_name,
6262 					 MAX_FIRMWARE_NAME_LEN,
6263 					 FW_V2_FILE_NAME);
6264 			break;
6265 		default:
6266 			cnss_pci_add_fw_prefix_name(pci_priv,
6267 						    plat_priv->firmware_name,
6268 						    DEFAULT_FW_FILE_NAME);
6269 			snprintf(plat_priv->fw_fallback_name,
6270 				 MAX_FIRMWARE_NAME_LEN,
6271 				 DEFAULT_FW_FILE_NAME);
6272 			break;
6273 		}
6274 		break;
6275 	case KIWI_DEVICE_ID:
6276 	case MANGO_DEVICE_ID:
6277 	case PEACH_DEVICE_ID:
6278 		switch (plat_priv->device_version.major_version) {
6279 		case FW_V2_NUMBER:
6280 			/*
6281 			 * kiwiv2 using seprate fw binary for MM and FTM mode,
6282 			 * platform driver loads corresponding binary according
6283 			 * to current mode indicated by wlan driver. Otherwise
6284 			 * use default binary.
6285 			 * Mission mode using same binary name as before,
6286 			 * if seprate binary is not there, fall back to default.
6287 			 */
6288 			if (plat_priv->driver_mode == CNSS_MISSION) {
6289 				cnss_pci_add_fw_prefix_name(pci_priv,
6290 							    plat_priv->firmware_name,
6291 							    FW_V2_FILE_NAME);
6292 				cnss_pci_add_fw_prefix_name(pci_priv,
6293 							    plat_priv->fw_fallback_name,
6294 							    FW_V2_FILE_NAME);
6295 			} else if (plat_priv->driver_mode == CNSS_FTM) {
6296 				cnss_pci_add_fw_prefix_name(pci_priv,
6297 							    plat_priv->firmware_name,
6298 							    FW_V2_FTM_FILE_NAME);
6299 				cnss_pci_add_fw_prefix_name(pci_priv,
6300 							    plat_priv->fw_fallback_name,
6301 							    FW_V2_FILE_NAME);
6302 			} else {
6303 				/*
6304 				 * Since during cold boot calibration phase,
6305 				 * wlan driver has not registered, so default
6306 				 * fw binary will be used.
6307 				 */
6308 				cnss_pci_add_fw_prefix_name(pci_priv,
6309 							    plat_priv->firmware_name,
6310 							    FW_V2_FILE_NAME);
6311 				snprintf(plat_priv->fw_fallback_name,
6312 					 MAX_FIRMWARE_NAME_LEN,
6313 					 FW_V2_FILE_NAME);
6314 			}
6315 			break;
6316 		default:
6317 			cnss_pci_add_fw_prefix_name(pci_priv,
6318 						    plat_priv->firmware_name,
6319 						    DEFAULT_FW_FILE_NAME);
6320 			snprintf(plat_priv->fw_fallback_name,
6321 				 MAX_FIRMWARE_NAME_LEN,
6322 				 DEFAULT_FW_FILE_NAME);
6323 			break;
6324 		}
6325 		break;
6326 	default:
6327 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6328 					    DEFAULT_FW_FILE_NAME);
6329 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6330 			 DEFAULT_FW_FILE_NAME);
6331 		break;
6332 	}
6333 
6334 	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
6335 		    plat_priv->firmware_name, plat_priv->fw_fallback_name);
6336 
6337 	return 0;
6338 }
6339 
6340 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
6341 {
6342 	switch (status) {
6343 	case MHI_CB_IDLE:
6344 		return "IDLE";
6345 	case MHI_CB_EE_RDDM:
6346 		return "RDDM";
6347 	case MHI_CB_SYS_ERROR:
6348 		return "SYS_ERROR";
6349 	case MHI_CB_FATAL_ERROR:
6350 		return "FATAL_ERROR";
6351 	case MHI_CB_EE_MISSION_MODE:
6352 		return "MISSION_MODE";
6353 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
6354 	case MHI_CB_FALLBACK_IMG:
6355 		return "FW_FALLBACK";
6356 #endif
6357 	default:
6358 		return "UNKNOWN";
6359 	}
6360 };
6361 
6362 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
6363 {
6364 	struct cnss_pci_data *pci_priv =
6365 		from_timer(pci_priv, t, dev_rddm_timer);
6366 	enum mhi_ee_type mhi_ee;
6367 
6368 	if (!pci_priv)
6369 		return;
6370 
6371 	cnss_fatal_err("Timeout waiting for RDDM notification\n");
6372 
6373 	if (!cnss_pci_assert_host_sol(pci_priv))
6374 		return;
6375 
6376 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6377 	if (mhi_ee == MHI_EE_PBL)
6378 		cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n");
6379 
6380 	if (mhi_ee == MHI_EE_RDDM) {
6381 		cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
6382 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6383 				       CNSS_REASON_RDDM);
6384 	} else {
6385 		cnss_mhi_debug_reg_dump(pci_priv);
6386 		cnss_pci_soc_scratch_reg_dump(pci_priv);
6387 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6388 				       CNSS_REASON_TIMEOUT);
6389 	}
6390 }
6391 
6392 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
6393 {
6394 	struct cnss_pci_data *pci_priv =
6395 		from_timer(pci_priv, t, boot_debug_timer);
6396 
6397 	if (!pci_priv)
6398 		return;
6399 
6400 	if (cnss_pci_check_link_status(pci_priv))
6401 		return;
6402 
6403 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
6404 		return;
6405 
6406 	if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
6407 		return;
6408 
6409 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
6410 		return;
6411 
6412 	cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
6413 		    BOOT_DEBUG_TIMEOUT_MS / 1000);
6414 	cnss_mhi_debug_reg_dump(pci_priv);
6415 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6416 	cnss_pci_dump_bl_sram_mem(pci_priv);
6417 
6418 	mod_timer(&pci_priv->boot_debug_timer,
6419 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
6420 }
6421 
6422 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
6423 {
6424 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6425 
6426 	cnss_ignore_qmi_failure(true);
6427 	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6428 	del_timer(&plat_priv->fw_boot_timer);
6429 	mod_timer(&pci_priv->dev_rddm_timer,
6430 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6431 	cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6432 
6433 	return 0;
6434 }
6435 
6436 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
6437 {
6438 	return cnss_pci_handle_mhi_sys_err(pci_priv);
6439 }
6440 
6441 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
6442 				   enum mhi_callback reason)
6443 {
6444 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6445 	struct cnss_plat_data *plat_priv;
6446 	enum cnss_recovery_reason cnss_reason;
6447 
6448 	if (!pci_priv) {
6449 		cnss_pr_err("pci_priv is NULL");
6450 		return;
6451 	}
6452 
6453 	plat_priv = pci_priv->plat_priv;
6454 
6455 	if (reason != MHI_CB_IDLE)
6456 		cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
6457 			    cnss_mhi_notify_status_to_str(reason), reason);
6458 
6459 	switch (reason) {
6460 	case MHI_CB_IDLE:
6461 	case MHI_CB_EE_MISSION_MODE:
6462 		return;
6463 	case MHI_CB_FATAL_ERROR:
6464 		cnss_ignore_qmi_failure(true);
6465 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6466 		del_timer(&plat_priv->fw_boot_timer);
6467 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6468 		cnss_reason = CNSS_REASON_DEFAULT;
6469 		break;
6470 	case MHI_CB_SYS_ERROR:
6471 		cnss_pci_handle_mhi_sys_err(pci_priv);
6472 		return;
6473 	case MHI_CB_EE_RDDM:
6474 		cnss_ignore_qmi_failure(true);
6475 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6476 		del_timer(&plat_priv->fw_boot_timer);
6477 		del_timer(&pci_priv->dev_rddm_timer);
6478 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6479 		cnss_reason = CNSS_REASON_RDDM;
6480 		break;
6481 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
6482 	case MHI_CB_FALLBACK_IMG:
6483 		/* for kiwi_v2 binary fallback is used, skip path fallback here */
6484 		if (!(pci_priv->device_id == KIWI_DEVICE_ID &&
6485 		      plat_priv->device_version.major_version == FW_V2_NUMBER)) {
6486 			plat_priv->use_fw_path_with_prefix = false;
6487 			cnss_pci_update_fw_name(pci_priv);
6488 		}
6489 		return;
6490 #endif
6491 	default:
6492 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
6493 		return;
6494 	}
6495 
6496 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
6497 }
6498 
6499 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
6500 {
6501 	int ret, num_vectors, i;
6502 	u32 user_base_data, base_vector;
6503 	int *irq;
6504 	unsigned int msi_data;
6505 	bool is_one_msi = false;
6506 
6507 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
6508 					   MHI_MSI_NAME, &num_vectors,
6509 					   &user_base_data, &base_vector);
6510 	if (ret)
6511 		return ret;
6512 
6513 	if (cnss_pci_is_one_msi(pci_priv)) {
6514 		is_one_msi = true;
6515 		num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
6516 	}
6517 	cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
6518 		    num_vectors, base_vector);
6519 
6520 	irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
6521 	if (!irq)
6522 		return -ENOMEM;
6523 
6524 	for (i = 0; i < num_vectors; i++) {
6525 		msi_data = base_vector;
6526 		if (!is_one_msi)
6527 			msi_data += i;
6528 		irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
6529 	}
6530 
6531 	pci_priv->mhi_ctrl->irq = irq;
6532 	pci_priv->mhi_ctrl->nr_irqs = num_vectors;
6533 
6534 	return 0;
6535 }
6536 
6537 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
6538 			     struct mhi_link_info *link_info)
6539 {
6540 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6541 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6542 	int ret = 0;
6543 
6544 	cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
6545 		    link_info->target_link_speed,
6546 		    link_info->target_link_width);
6547 
6548 	/* It has to set target link speed here before setting link bandwidth
6549 	 * when device requests link speed change. This can avoid setting link
6550 	 * bandwidth getting rejected if requested link speed is higher than
6551 	 * current one.
6552 	 */
6553 	ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
6554 					  link_info->target_link_speed);
6555 	if (ret)
6556 		cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
6557 			    link_info->target_link_speed, ret);
6558 
6559 	ret = cnss_pci_set_link_bandwidth(pci_priv,
6560 					  link_info->target_link_speed,
6561 					  link_info->target_link_width);
6562 
6563 	if (ret) {
6564 		cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
6565 		return ret;
6566 	}
6567 
6568 	pci_priv->def_link_speed = link_info->target_link_speed;
6569 	pci_priv->def_link_width = link_info->target_link_width;
6570 
6571 	return 0;
6572 }
6573 
6574 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
6575 			     void __iomem *addr, u32 *out)
6576 {
6577 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6578 
6579 	u32 tmp = readl_relaxed(addr);
6580 
6581 	/* Unexpected value, query the link status */
6582 	if (PCI_INVALID_READ(tmp) &&
6583 	    cnss_pci_check_link_status(pci_priv))
6584 		return -EIO;
6585 
6586 	*out = tmp;
6587 
6588 	return 0;
6589 }
6590 
6591 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
6592 			       void __iomem *addr, u32 val)
6593 {
6594 	writel_relaxed(val, addr);
6595 }
6596 
6597 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
6598 				 struct mhi_controller *mhi_ctrl)
6599 {
6600 	int ret = 0;
6601 
6602 	ret = mhi_get_soc_info(mhi_ctrl);
6603 	if (ret)
6604 		goto exit;
6605 
6606 	plat_priv->device_version.family_number = mhi_ctrl->family_number;
6607 	plat_priv->device_version.device_number = mhi_ctrl->device_number;
6608 	plat_priv->device_version.major_version = mhi_ctrl->major_version;
6609 	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
6610 
6611 	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
6612 		    plat_priv->device_version.family_number,
6613 		    plat_priv->device_version.device_number,
6614 		    plat_priv->device_version.major_version,
6615 		    plat_priv->device_version.minor_version);
6616 
6617 	/* Only keep lower 4 bits as real device major version */
6618 	plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
6619 
6620 exit:
6621 	return ret;
6622 }
6623 
6624 static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv)
6625 {
6626 	if (!pci_priv) {
6627 		cnss_pr_dbg("pci_priv is NULL");
6628 		return false;
6629 	}
6630 
6631 	switch (pci_priv->device_id) {
6632 	case PEACH_DEVICE_ID:
6633 		return true;
6634 	default:
6635 		return false;
6636 	}
6637 }
6638 
6639 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
6640 {
6641 	int ret = 0;
6642 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6643 	struct pci_dev *pci_dev = pci_priv->pci_dev;
6644 	struct mhi_controller *mhi_ctrl;
6645 	phys_addr_t bar_start;
6646 	const struct mhi_controller_config *cnss_mhi_config =
6647 						&cnss_mhi_config_default;
6648 
6649 	ret = cnss_qmi_init(plat_priv);
6650 	if (ret)
6651 		return -EINVAL;
6652 
6653 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6654 		return 0;
6655 
6656 	mhi_ctrl = mhi_alloc_controller();
6657 	if (!mhi_ctrl) {
6658 		cnss_pr_err("Invalid MHI controller context\n");
6659 		return -EINVAL;
6660 	}
6661 
6662 	pci_priv->mhi_ctrl = mhi_ctrl;
6663 	mhi_ctrl->cntrl_dev = &pci_dev->dev;
6664 
6665 	mhi_ctrl->fw_image = plat_priv->firmware_name;
6666 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
6667 	mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
6668 #endif
6669 
6670 	mhi_ctrl->regs = pci_priv->bar;
6671 	mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
6672 	bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
6673 	cnss_pr_dbg("BAR starts at %pa, length is %x\n",
6674 		    &bar_start, mhi_ctrl->reg_len);
6675 
6676 	ret = cnss_pci_get_mhi_msi(pci_priv);
6677 	if (ret) {
6678 		cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
6679 		goto free_mhi_ctrl;
6680 	}
6681 
6682 	if (cnss_pci_is_one_msi(pci_priv))
6683 		mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
6684 
6685 	if (pci_priv->smmu_s1_enable) {
6686 		mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
6687 		mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
6688 					pci_priv->smmu_iova_len;
6689 	} else {
6690 		mhi_ctrl->iova_start = 0;
6691 		mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
6692 	}
6693 
6694 	mhi_ctrl->status_cb = cnss_mhi_notify_status;
6695 	mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
6696 	mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
6697 	mhi_ctrl->read_reg = cnss_mhi_read_reg;
6698 	mhi_ctrl->write_reg = cnss_mhi_write_reg;
6699 
6700 	mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
6701 	if (!mhi_ctrl->rddm_size)
6702 		mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
6703 
6704 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
6705 		mhi_ctrl->sbl_size = SZ_256K;
6706 	else
6707 		mhi_ctrl->sbl_size = SZ_512K;
6708 
6709 	mhi_ctrl->seg_len = SZ_512K;
6710 	mhi_ctrl->fbc_download = true;
6711 
6712 	ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
6713 	if (ret)
6714 		goto free_mhi_irq;
6715 
6716 	/* Satellite config only supported on KIWI V2 and later chipset */
6717 	if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
6718 			(plat_priv->device_id == KIWI_DEVICE_ID &&
6719 			 plat_priv->device_version.major_version == 1)) {
6720 		if (plat_priv->device_id == QCN7605_DEVICE_ID)
6721 			cnss_mhi_config = &cnss_mhi_config_genoa;
6722 		else
6723 			cnss_mhi_config = &cnss_mhi_config_no_satellite;
6724 	}
6725 
6726 	mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv);
6727 
6728 	ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
6729 	if (ret) {
6730 		cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
6731 		goto free_mhi_irq;
6732 	}
6733 
6734 	/* MHI satellite driver only needs to connect when DRV is supported */
6735 	if (cnss_pci_get_drv_supported(pci_priv))
6736 		cnss_mhi_controller_set_base(pci_priv, bar_start);
6737 
6738 	cnss_get_bwscal_info(plat_priv);
6739 	cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
6740 
6741 	/* BW scale CB needs to be set after registering MHI per requirement */
6742 	if (!plat_priv->no_bwscale)
6743 		cnss_mhi_controller_set_bw_scale_cb(pci_priv,
6744 						    cnss_mhi_bw_scale);
6745 
6746 	ret = cnss_pci_update_fw_name(pci_priv);
6747 	if (ret)
6748 		goto unreg_mhi;
6749 
6750 	return 0;
6751 
6752 unreg_mhi:
6753 	mhi_unregister_controller(mhi_ctrl);
6754 free_mhi_irq:
6755 	kfree(mhi_ctrl->irq);
6756 free_mhi_ctrl:
6757 	mhi_free_controller(mhi_ctrl);
6758 
6759 	return ret;
6760 }
6761 
6762 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
6763 {
6764 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
6765 
6766 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6767 		return;
6768 
6769 	mhi_unregister_controller(mhi_ctrl);
6770 	kfree(mhi_ctrl->irq);
6771 	mhi_ctrl->irq = NULL;
6772 	mhi_free_controller(mhi_ctrl);
6773 	pci_priv->mhi_ctrl = NULL;
6774 }
6775 
6776 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
6777 {
6778 	switch (pci_priv->device_id) {
6779 	case QCA6390_DEVICE_ID:
6780 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
6781 		pci_priv->wcss_reg = wcss_reg_access_seq;
6782 		pci_priv->pcie_reg = pcie_reg_access_seq;
6783 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6784 		pci_priv->syspm_reg = syspm_reg_access_seq;
6785 
6786 		/* Configure WDOG register with specific value so that we can
6787 		 * know if HW is in the process of WDOG reset recovery or not
6788 		 * when reading the registers.
6789 		 */
6790 		cnss_pci_reg_write
6791 		(pci_priv,
6792 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
6793 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
6794 		break;
6795 	case QCA6490_DEVICE_ID:
6796 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
6797 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6798 		break;
6799 	default:
6800 		return;
6801 	}
6802 }
6803 
6804 #if !IS_ENABLED(CONFIG_ARCH_QCOM)
6805 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
6806 {
6807 	return 0;
6808 }
6809 
6810 static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
6811 {
6812 	struct cnss_pci_data *pci_priv = data;
6813 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6814 	enum rpm_status status;
6815 	struct device *dev;
6816 
6817 	pci_priv->wake_counter++;
6818 	cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
6819 		    pci_priv->wake_irq, pci_priv->wake_counter);
6820 
6821 	/* Make sure abort current suspend */
6822 	cnss_pm_stay_awake(plat_priv);
6823 	cnss_pm_relax(plat_priv);
6824 	/* Above two pm* API calls will abort system suspend only when
6825 	 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
6826 	 * calling pm_system_wakeup() is just to guarantee system suspend
6827 	 * can be aborted if it is not initiated in any case.
6828 	 */
6829 	pm_system_wakeup();
6830 
6831 	dev = &pci_priv->pci_dev->dev;
6832 	status = dev->power.runtime_status;
6833 
6834 	if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
6835 	     cnss_pci_get_auto_suspended(pci_priv)) ||
6836 	    (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
6837 		cnss_pci_set_monitor_wake_intr(pci_priv, false);
6838 		cnss_pci_pm_request_resume(pci_priv);
6839 	}
6840 
6841 	return IRQ_HANDLED;
6842 }
6843 
6844 /**
6845  * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
6846  * @pci_priv: driver PCI bus context pointer
6847  *
6848  * This function initializes WLAN PCI wake GPIO and corresponding
6849  * interrupt. It should be used in non-MSM platforms whose PCIe
6850  * root complex driver doesn't handle the GPIO.
6851  *
6852  * Return: 0 for success or skip, negative value for error
6853  */
6854 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
6855 {
6856 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6857 	struct device *dev = &plat_priv->plat_dev->dev;
6858 	int ret = 0;
6859 
6860 	pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
6861 						"wlan-pci-wake-gpio", 0);
6862 	if (pci_priv->wake_gpio < 0)
6863 		goto out;
6864 
6865 	cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
6866 		    pci_priv->wake_gpio);
6867 
6868 	ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
6869 	if (ret) {
6870 		cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
6871 			    ret);
6872 		goto out;
6873 	}
6874 
6875 	gpio_direction_input(pci_priv->wake_gpio);
6876 	pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
6877 
6878 	ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
6879 			  IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
6880 	if (ret) {
6881 		cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
6882 		goto free_gpio;
6883 	}
6884 
6885 	ret = enable_irq_wake(pci_priv->wake_irq);
6886 	if (ret) {
6887 		cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
6888 		goto free_irq;
6889 	}
6890 
6891 	return 0;
6892 
6893 free_irq:
6894 	free_irq(pci_priv->wake_irq, pci_priv);
6895 free_gpio:
6896 	gpio_free(pci_priv->wake_gpio);
6897 out:
6898 	return ret;
6899 }
6900 
6901 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
6902 {
6903 	if (pci_priv->wake_gpio < 0)
6904 		return;
6905 
6906 	disable_irq_wake(pci_priv->wake_irq);
6907 	free_irq(pci_priv->wake_irq, pci_priv);
6908 	gpio_free(pci_priv->wake_gpio);
6909 }
6910 #endif
6911 
6912 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
6913 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
6914 {
6915 	int ret = 0;
6916 
6917 	/* in the dual wlan card case, if call pci_register_driver after
6918 	 * finishing the first pcie device enumeration, it will cause
6919 	 * the cnss_pci_probe called in advance with the second wlan card,
6920 	 * and the sequence like this:
6921 	 * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe
6922 	 * -> exit msm_pcie_enumerate.
6923 	 * But the correct sequence we expected is like this:
6924 	 * enter msm_pcie_enumerate -> pci_bus_add_devices  ->
6925 	 * exit msm_pcie_enumerate -> cnss_pci_probe.
6926 	 * And this unexpected sequence will make the second wlan card do
6927 	 * pcie link suspend while the pcie enumeration not finished.
6928 	 * So need to add below logical to avoid doing pcie link suspend
6929 	 * if the enumeration has not finish.
6930 	 */
6931 	plat_priv->enumerate_done = true;
6932 
6933 	/* Now enumeration is finished, try to suspend PCIe link */
6934 	if (plat_priv->bus_priv) {
6935 		struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
6936 		struct pci_dev *pci_dev = pci_priv->pci_dev;
6937 
6938 		switch (pci_dev->device) {
6939 		case QCA6390_DEVICE_ID:
6940 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv,
6941 						    false,
6942 						    true,
6943 						    false);
6944 
6945 			cnss_pci_suspend_pwroff(pci_dev);
6946 			break;
6947 		default:
6948 			cnss_pr_err("Unknown PCI device found: 0x%x\n",
6949 				    pci_dev->device);
6950 			ret = -ENODEV;
6951 		}
6952 	}
6953 
6954 	return ret;
6955 }
6956 #else
6957 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
6958 {
6959 	return 0;
6960 }
6961 #endif
6962 
6963 /* Setting to use this cnss_pm_domain ops will let PM framework override the
6964  * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
6965  * has to take care everything device driver needed which is currently done
6966  * from pci_dev_pm_ops.
6967  */
6968 static struct dev_pm_domain cnss_pm_domain = {
6969 	.ops = {
6970 		SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
6971 		SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
6972 					      cnss_pci_resume_noirq)
6973 		SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
6974 				   cnss_pci_runtime_resume,
6975 				   cnss_pci_runtime_idle)
6976 	}
6977 };
6978 
6979 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
6980 {
6981 	struct device_node *child;
6982 	u32 id, i;
6983 	int id_n, ret;
6984 
6985 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
6986 		return 0;
6987 
6988 	if (!plat_priv->device_id) {
6989 		cnss_pr_err("Invalid device id\n");
6990 		return -EINVAL;
6991 	}
6992 
6993 	for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
6994 					 child) {
6995 		if (strcmp(child->name, "chip_cfg"))
6996 			continue;
6997 
6998 		id_n = of_property_count_u32_elems(child, "supported-ids");
6999 		if (id_n <= 0) {
7000 			cnss_pr_err("Device id is NOT set\n");
7001 			return -EINVAL;
7002 		}
7003 
7004 		for (i = 0; i < id_n; i++) {
7005 			ret = of_property_read_u32_index(child,
7006 							 "supported-ids",
7007 							 i, &id);
7008 			if (ret) {
7009 				cnss_pr_err("Failed to read supported ids\n");
7010 				return -EINVAL;
7011 			}
7012 
7013 			if (id == plat_priv->device_id) {
7014 				plat_priv->dev_node = child;
7015 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
7016 					    child->name, i, id);
7017 				return 0;
7018 			}
7019 		}
7020 	}
7021 
7022 	return -EINVAL;
7023 }
7024 
7025 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
7026 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7027 {
7028 	bool suspend_pwroff;
7029 
7030 	switch (pci_dev->device) {
7031 	case QCA6390_DEVICE_ID:
7032 	case QCA6490_DEVICE_ID:
7033 		suspend_pwroff = false;
7034 		break;
7035 	default:
7036 		suspend_pwroff = true;
7037 	}
7038 
7039 	return suspend_pwroff;
7040 }
7041 #else
7042 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7043 {
7044 	return true;
7045 }
7046 #endif
7047 
7048 #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED
7049 static void
7050 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7051 {
7052 	int ret;
7053 
7054 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7055 					  PCI_EXP_LNKSTA_CLS_2_5GB);
7056 	if (ret)
7057 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n",
7058 			     rc_num, ret);
7059 }
7060 
7061 static void
7062 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7063 {
7064 	int ret;
7065 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7066 
7067 	/* if not Genoa, do not restore rc speed */
7068 	if (pci_priv->device_id != QCN7605_DEVICE_ID) {
7069 		/* The request 0 will reset maximum GEN speed to default */
7070 		ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 0);
7071 		if (ret)
7072 			cnss_pr_err("Failed to reset max PCIe RC%x link speed to default, err = %d\n",
7073 				     plat_priv->rc_num, ret);
7074 	}
7075 }
7076 
7077 static void
7078 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7079 {
7080 	int ret;
7081 
7082 	/* suspend/resume will trigger retain to re-establish link speed */
7083 	ret = cnss_suspend_pci_link(pci_priv);
7084 	if (ret)
7085 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
7086 
7087 	ret = cnss_resume_pci_link(pci_priv);
7088 	if (ret)
7089 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
7090 
7091 	cnss_pci_get_link_status(pci_priv);
7092 }
7093 #else
7094 static void
7095 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7096 {
7097 }
7098 
7099 static void
7100 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7101 {
7102 }
7103 
7104 static void
7105 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7106 {
7107 }
7108 #endif
7109 
7110 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
7111 {
7112 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7113 	int rc_num = pci_dev->bus->domain_nr;
7114 	struct cnss_plat_data *plat_priv;
7115 	int ret = 0;
7116 	bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
7117 
7118 	plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7119 
7120 	if (suspend_pwroff) {
7121 		ret = cnss_suspend_pci_link(pci_priv);
7122 		if (ret)
7123 			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
7124 				    ret);
7125 		cnss_power_off_device(plat_priv);
7126 	} else {
7127 		cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
7128 			    pci_dev->device);
7129 		cnss_pci_link_retrain_trigger(pci_priv);
7130 	}
7131 }
7132 
7133 static int cnss_pci_probe(struct pci_dev *pci_dev,
7134 			  const struct pci_device_id *id)
7135 {
7136 	int ret = 0;
7137 	struct cnss_pci_data *pci_priv;
7138 	struct device *dev = &pci_dev->dev;
7139 	int rc_num = pci_dev->bus->domain_nr;
7140 	struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7141 
7142 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n",
7143 		    id->vendor, pci_dev->device, rc_num);
7144 	if (!plat_priv) {
7145 		cnss_pr_err("Find match plat_priv with rc number failure\n");
7146 		ret = -ENODEV;
7147 		goto out;
7148 	}
7149 
7150 	pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
7151 	if (!pci_priv) {
7152 		ret = -ENOMEM;
7153 		goto out;
7154 	}
7155 
7156 	pci_priv->pci_link_state = PCI_LINK_UP;
7157 	pci_priv->plat_priv = plat_priv;
7158 	pci_priv->pci_dev = pci_dev;
7159 	pci_priv->pci_device_id = id;
7160 	pci_priv->device_id = pci_dev->device;
7161 	cnss_set_pci_priv(pci_dev, pci_priv);
7162 	plat_priv->device_id = pci_dev->device;
7163 	plat_priv->bus_priv = pci_priv;
7164 	mutex_init(&pci_priv->bus_lock);
7165 	if (plat_priv->use_pm_domain)
7166 		dev->pm_domain = &cnss_pm_domain;
7167 
7168 	cnss_pci_restore_rc_speed(pci_priv);
7169 
7170 	ret = cnss_pci_get_dev_cfg_node(plat_priv);
7171 	if (ret) {
7172 		cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
7173 		goto reset_ctx;
7174 	}
7175 
7176 	cnss_get_sleep_clk_supported(plat_priv);
7177 
7178 	ret = cnss_dev_specific_power_on(plat_priv);
7179 	if (ret < 0)
7180 		goto reset_ctx;
7181 
7182 	cnss_pci_of_reserved_mem_device_init(pci_priv);
7183 
7184 	ret = cnss_register_subsys(plat_priv);
7185 	if (ret)
7186 		goto reset_ctx;
7187 
7188 	ret = cnss_register_ramdump(plat_priv);
7189 	if (ret)
7190 		goto unregister_subsys;
7191 
7192 	ret = cnss_pci_init_smmu(pci_priv);
7193 	if (ret)
7194 		goto unregister_ramdump;
7195 
7196 	/* update drv support flag */
7197 	cnss_pci_update_drv_supported(pci_priv);
7198 
7199 	cnss_update_supported_link_info(pci_priv);
7200 
7201 	ret = cnss_reg_pci_event(pci_priv);
7202 	if (ret) {
7203 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
7204 		goto deinit_smmu;
7205 	}
7206 
7207 	ret = cnss_pci_enable_bus(pci_priv);
7208 	if (ret)
7209 		goto dereg_pci_event;
7210 
7211 	ret = cnss_pci_enable_msi(pci_priv);
7212 	if (ret)
7213 		goto disable_bus;
7214 
7215 	ret = cnss_pci_register_mhi(pci_priv);
7216 	if (ret)
7217 		goto disable_msi;
7218 
7219 	switch (pci_dev->device) {
7220 	case QCA6174_DEVICE_ID:
7221 		pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
7222 				     &pci_priv->revision_id);
7223 		break;
7224 	case QCA6290_DEVICE_ID:
7225 	case QCA6390_DEVICE_ID:
7226 	case QCN7605_DEVICE_ID:
7227 	case QCA6490_DEVICE_ID:
7228 	case KIWI_DEVICE_ID:
7229 	case MANGO_DEVICE_ID:
7230 	case PEACH_DEVICE_ID:
7231 		if ((cnss_is_dual_wlan_enabled() &&
7232 		     plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled())
7233 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false,
7234 						    false);
7235 
7236 		timer_setup(&pci_priv->dev_rddm_timer,
7237 			    cnss_dev_rddm_timeout_hdlr, 0);
7238 		timer_setup(&pci_priv->boot_debug_timer,
7239 			    cnss_boot_debug_timeout_hdlr, 0);
7240 		INIT_DELAYED_WORK(&pci_priv->time_sync_work,
7241 				  cnss_pci_time_sync_work_hdlr);
7242 		cnss_pci_get_link_status(pci_priv);
7243 		cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
7244 		cnss_pci_wake_gpio_init(pci_priv);
7245 		break;
7246 	default:
7247 		cnss_pr_err("Unknown PCI device found: 0x%x\n",
7248 			    pci_dev->device);
7249 		ret = -ENODEV;
7250 		goto unreg_mhi;
7251 	}
7252 
7253 	cnss_pci_config_regs(pci_priv);
7254 	if (EMULATION_HW)
7255 		goto out;
7256 	if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done)
7257 		goto probe_done;
7258 	cnss_pci_suspend_pwroff(pci_dev);
7259 
7260 probe_done:
7261 	set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7262 
7263 	return 0;
7264 
7265 unreg_mhi:
7266 	cnss_pci_unregister_mhi(pci_priv);
7267 disable_msi:
7268 	cnss_pci_disable_msi(pci_priv);
7269 disable_bus:
7270 	cnss_pci_disable_bus(pci_priv);
7271 dereg_pci_event:
7272 	cnss_dereg_pci_event(pci_priv);
7273 deinit_smmu:
7274 	cnss_pci_deinit_smmu(pci_priv);
7275 unregister_ramdump:
7276 	cnss_unregister_ramdump(plat_priv);
7277 unregister_subsys:
7278 	cnss_unregister_subsys(plat_priv);
7279 reset_ctx:
7280 	plat_priv->bus_priv = NULL;
7281 out:
7282 	return ret;
7283 }
7284 
7285 static void cnss_pci_remove(struct pci_dev *pci_dev)
7286 {
7287 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7288 	struct cnss_plat_data *plat_priv =
7289 		cnss_bus_dev_to_plat_priv(&pci_dev->dev);
7290 
7291 	clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7292 	cnss_pci_unregister_driver_hdlr(pci_priv);
7293 	cnss_pci_free_aux_mem(pci_priv);
7294 	cnss_pci_free_tme_lite_mem(pci_priv);
7295 	cnss_pci_free_m3_mem(pci_priv);
7296 	cnss_pci_free_fw_mem(pci_priv);
7297 	cnss_pci_free_qdss_mem(pci_priv);
7298 
7299 	switch (pci_dev->device) {
7300 	case QCA6290_DEVICE_ID:
7301 	case QCA6390_DEVICE_ID:
7302 	case QCN7605_DEVICE_ID:
7303 	case QCA6490_DEVICE_ID:
7304 	case KIWI_DEVICE_ID:
7305 	case MANGO_DEVICE_ID:
7306 	case PEACH_DEVICE_ID:
7307 		cnss_pci_wake_gpio_deinit(pci_priv);
7308 		del_timer(&pci_priv->boot_debug_timer);
7309 		del_timer(&pci_priv->dev_rddm_timer);
7310 		break;
7311 	default:
7312 		break;
7313 	}
7314 
7315 	cnss_pci_unregister_mhi(pci_priv);
7316 	cnss_pci_disable_msi(pci_priv);
7317 	cnss_pci_disable_bus(pci_priv);
7318 	cnss_dereg_pci_event(pci_priv);
7319 	cnss_pci_deinit_smmu(pci_priv);
7320 	if (plat_priv) {
7321 		cnss_unregister_ramdump(plat_priv);
7322 		cnss_unregister_subsys(plat_priv);
7323 		plat_priv->bus_priv = NULL;
7324 	} else {
7325 		cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
7326 	}
7327 }
7328 
7329 static const struct pci_device_id cnss_pci_id_table[] = {
7330 	{ QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7331 	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7332 	{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7333 	{ QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7334 	{ QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7335 	{ KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7336 	{ MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7337 	{ PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7338 	{ 0 }
7339 };
7340 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
7341 
7342 static const struct dev_pm_ops cnss_pm_ops = {
7343 	SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7344 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7345 				      cnss_pci_resume_noirq)
7346 	SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
7347 			   cnss_pci_runtime_idle)
7348 };
7349 
7350 static struct pci_driver cnss_pci_driver = {
7351 	.name     = "cnss_pci",
7352 	.id_table = cnss_pci_id_table,
7353 	.probe    = cnss_pci_probe,
7354 	.remove   = cnss_pci_remove,
7355 	.driver = {
7356 		.pm = &cnss_pm_ops,
7357 	},
7358 };
7359 
7360 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
7361 {
7362 	int ret, retry = 0;
7363 
7364 	/* Always set initial target PCIe link speed to Gen2 for QCA6490 device
7365 	 * since there may be link issues if it boots up with Gen3 link speed.
7366 	 * Device is able to change it later at any time. It will be rejected
7367 	 * if requested speed is higher than the one specified in PCIe DT.
7368 	 */
7369 	if (plat_priv->device_id == QCA6490_DEVICE_ID) {
7370 		ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7371 						  PCI_EXP_LNKSTA_CLS_5_0GB);
7372 		if (ret && ret != -EPROBE_DEFER)
7373 			cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
7374 				    rc_num, ret);
7375 	} else {
7376 		cnss_pci_downgrade_rc_speed(plat_priv, rc_num);
7377 	}
7378 
7379 	cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
7380 retry:
7381 	ret = _cnss_pci_enumerate(plat_priv, rc_num);
7382 	if (ret) {
7383 		if (ret == -EPROBE_DEFER) {
7384 			cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
7385 			goto out;
7386 		}
7387 		cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
7388 			    rc_num, ret);
7389 		if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
7390 			cnss_pr_dbg("Retry PCI link training #%d\n", retry);
7391 			goto retry;
7392 		} else {
7393 			goto out;
7394 		}
7395 	}
7396 
7397 	plat_priv->rc_num = rc_num;
7398 
7399 out:
7400 	return ret;
7401 }
7402 
7403 int cnss_pci_init(struct cnss_plat_data *plat_priv)
7404 {
7405 	struct device *dev = &plat_priv->plat_dev->dev;
7406 	const __be32 *prop;
7407 	int ret = 0, prop_len = 0, rc_count, i;
7408 
7409 	prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
7410 	if (!prop || !prop_len) {
7411 		cnss_pr_err("Failed to get PCIe RC number from DT\n");
7412 		goto out;
7413 	}
7414 
7415 	rc_count = prop_len / sizeof(__be32);
7416 	for (i = 0; i < rc_count; i++) {
7417 		ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
7418 		if (!ret)
7419 			break;
7420 		else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
7421 			goto out;
7422 	}
7423 
7424 	ret = cnss_try_suspend(plat_priv);
7425 	if (ret) {
7426 		cnss_pr_err("Failed to suspend, ret: %d\n", ret);
7427 		goto out;
7428 	}
7429 
7430 	if (!cnss_driver_registered) {
7431 		ret = pci_register_driver(&cnss_pci_driver);
7432 		if (ret) {
7433 			cnss_pr_err("Failed to register to PCI framework, err = %d\n",
7434 				    ret);
7435 			goto out;
7436 		}
7437 		if (!plat_priv->bus_priv) {
7438 			cnss_pr_err("Failed to probe PCI driver\n");
7439 			ret = -ENODEV;
7440 			goto unreg_pci;
7441 		}
7442 		cnss_driver_registered = true;
7443 	}
7444 
7445 	return 0;
7446 
7447 unreg_pci:
7448 	pci_unregister_driver(&cnss_pci_driver);
7449 out:
7450 	return ret;
7451 }
7452 
7453 void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
7454 {
7455 	if (cnss_driver_registered) {
7456 		pci_unregister_driver(&cnss_pci_driver);
7457 		cnss_driver_registered = false;
7458 	}
7459 }
7460