xref: /wlan-dirver/platform/cnss2/pci.c (revision 600b1dfc1ce589d1d11800b90ea52dfbd5c412a5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/completion.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/memblock.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/suspend.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 #include "main.h"
20 #include "bus.h"
21 #include "debug.h"
22 #include "pci.h"
23 #include "pci_platform.h"
24 #include "reg.h"
25 
26 #define PCI_LINK_UP			1
27 #define PCI_LINK_DOWN			0
28 
29 #define SAVE_PCI_CONFIG_SPACE		1
30 #define RESTORE_PCI_CONFIG_SPACE	0
31 
32 #define PCI_BAR_NUM			0
33 #define PCI_INVALID_READ(val)		((val) == U32_MAX)
34 
35 #define PCI_DMA_MASK_32_BIT		DMA_BIT_MASK(32)
36 #define PCI_DMA_MASK_36_BIT		DMA_BIT_MASK(36)
37 #define PCI_DMA_MASK_64_BIT		DMA_BIT_MASK(64)
38 
39 #define MHI_NODE_NAME			"qcom,mhi"
40 #define MHI_MSI_NAME			"MHI"
41 
42 #define QCA6390_PATH_PREFIX		"qca6390/"
43 #define QCA6490_PATH_PREFIX		"qca6490/"
44 #define QCN7605_PATH_PREFIX             "qcn7605/"
45 #define KIWI_PATH_PREFIX		"kiwi/"
46 #define MANGO_PATH_PREFIX		"mango/"
47 #define PEACH_PATH_PREFIX		"peach/"
48 #define DEFAULT_PHY_M3_FILE_NAME	"m3.bin"
49 #define DEFAULT_AUX_FILE_NAME		"aux_ucode.elf"
50 #define DEFAULT_PHY_UCODE_FILE_NAME	"phy_ucode.elf"
51 #define TME_PATCH_FILE_NAME		"tmel_patch.elf"
52 #define PHY_UCODE_V2_FILE_NAME		"phy_ucode20.elf"
53 #define DEFAULT_FW_FILE_NAME		"amss.bin"
54 #define FW_V2_FILE_NAME			"amss20.bin"
55 #define FW_V2_FTM_FILE_NAME		"amss20_ftm.bin"
56 #define DEVICE_MAJOR_VERSION_MASK	0xF
57 
58 #define WAKE_MSI_NAME			"WAKE"
59 
60 #define DEV_RDDM_TIMEOUT		5000
61 #define WAKE_EVENT_TIMEOUT		5000
62 
63 #ifdef CONFIG_CNSS_EMULATION
64 #define EMULATION_HW			1
65 #else
66 #define EMULATION_HW			0
67 #endif
68 
69 #define RAMDUMP_SIZE_DEFAULT		0x420000
70 #define CNSS_256KB_SIZE			0x40000
71 #define DEVICE_RDDM_COOKIE		0xCAFECACE
72 
73 static bool cnss_driver_registered;
74 
75 static DEFINE_SPINLOCK(pci_link_down_lock);
76 static DEFINE_SPINLOCK(pci_reg_window_lock);
77 static DEFINE_SPINLOCK(time_sync_lock);
78 
79 #define MHI_TIMEOUT_OVERWRITE_MS	(plat_priv->ctrl_params.mhi_timeout)
80 #define MHI_M2_TIMEOUT_MS		(plat_priv->ctrl_params.mhi_m2_timeout)
81 
82 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US	1000
83 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US	2000
84 
85 #define FORCE_WAKE_DELAY_MIN_US			4000
86 #define FORCE_WAKE_DELAY_MAX_US			6000
87 #define FORCE_WAKE_DELAY_TIMEOUT_US		60000
88 
89 #define REG_RETRY_MAX_TIMES		3
90 
91 #define MHI_SUSPEND_RETRY_MAX_TIMES		3
92 #define MHI_SUSPEND_RETRY_DELAY_US		5000
93 
94 #define BOOT_DEBUG_TIMEOUT_MS			7000
95 
96 #define HANG_DATA_LENGTH		384
97 #define HST_HANG_DATA_OFFSET		((3 * 1024 * 1024) - HANG_DATA_LENGTH)
98 #define HSP_HANG_DATA_OFFSET		((2 * 1024 * 1024) - HANG_DATA_LENGTH)
99 
100 #define AFC_SLOT_SIZE                   0x1000
101 #define AFC_MAX_SLOT                    2
102 #define AFC_MEM_SIZE                    (AFC_SLOT_SIZE * AFC_MAX_SLOT)
103 #define AFC_AUTH_STATUS_OFFSET          1
104 #define AFC_AUTH_SUCCESS                1
105 #define AFC_AUTH_ERROR                  0
106 
107 static const struct mhi_channel_config cnss_mhi_channels[] = {
108 	{
109 		.num = 0,
110 		.name = "LOOPBACK",
111 		.num_elements = 32,
112 		.event_ring = 1,
113 		.dir = DMA_TO_DEVICE,
114 		.ee_mask = 0x4,
115 		.pollcfg = 0,
116 		.doorbell = MHI_DB_BRST_DISABLE,
117 		.lpm_notify = false,
118 		.offload_channel = false,
119 		.doorbell_mode_switch = false,
120 		.auto_queue = false,
121 	},
122 	{
123 		.num = 1,
124 		.name = "LOOPBACK",
125 		.num_elements = 32,
126 		.event_ring = 1,
127 		.dir = DMA_FROM_DEVICE,
128 		.ee_mask = 0x4,
129 		.pollcfg = 0,
130 		.doorbell = MHI_DB_BRST_DISABLE,
131 		.lpm_notify = false,
132 		.offload_channel = false,
133 		.doorbell_mode_switch = false,
134 		.auto_queue = false,
135 	},
136 	{
137 		.num = 4,
138 		.name = "DIAG",
139 		.num_elements = 64,
140 		.event_ring = 1,
141 		.dir = DMA_TO_DEVICE,
142 		.ee_mask = 0x4,
143 		.pollcfg = 0,
144 		.doorbell = MHI_DB_BRST_DISABLE,
145 		.lpm_notify = false,
146 		.offload_channel = false,
147 		.doorbell_mode_switch = false,
148 		.auto_queue = false,
149 	},
150 	{
151 		.num = 5,
152 		.name = "DIAG",
153 		.num_elements = 64,
154 		.event_ring = 1,
155 		.dir = DMA_FROM_DEVICE,
156 		.ee_mask = 0x4,
157 		.pollcfg = 0,
158 		.doorbell = MHI_DB_BRST_DISABLE,
159 		.lpm_notify = false,
160 		.offload_channel = false,
161 		.doorbell_mode_switch = false,
162 		.auto_queue = false,
163 	},
164 	{
165 		.num = 20,
166 		.name = "IPCR",
167 		.num_elements = 64,
168 		.event_ring = 1,
169 		.dir = DMA_TO_DEVICE,
170 		.ee_mask = 0x4,
171 		.pollcfg = 0,
172 		.doorbell = MHI_DB_BRST_DISABLE,
173 		.lpm_notify = false,
174 		.offload_channel = false,
175 		.doorbell_mode_switch = false,
176 		.auto_queue = false,
177 	},
178 	{
179 		.num = 21,
180 		.name = "IPCR",
181 		.num_elements = 64,
182 		.event_ring = 1,
183 		.dir = DMA_FROM_DEVICE,
184 		.ee_mask = 0x4,
185 		.pollcfg = 0,
186 		.doorbell = MHI_DB_BRST_DISABLE,
187 		.lpm_notify = false,
188 		.offload_channel = false,
189 		.doorbell_mode_switch = false,
190 		.auto_queue = true,
191 	},
192 /* All MHI satellite config to be at the end of data struct */
193 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
194 	{
195 		.num = 50,
196 		.name = "ADSP_0",
197 		.num_elements = 64,
198 		.event_ring = 3,
199 		.dir = DMA_BIDIRECTIONAL,
200 		.ee_mask = 0x4,
201 		.pollcfg = 0,
202 		.doorbell = MHI_DB_BRST_DISABLE,
203 		.lpm_notify = false,
204 		.offload_channel = true,
205 		.doorbell_mode_switch = false,
206 		.auto_queue = false,
207 	},
208 	{
209 		.num = 51,
210 		.name = "ADSP_1",
211 		.num_elements = 64,
212 		.event_ring = 3,
213 		.dir = DMA_BIDIRECTIONAL,
214 		.ee_mask = 0x4,
215 		.pollcfg = 0,
216 		.doorbell = MHI_DB_BRST_DISABLE,
217 		.lpm_notify = false,
218 		.offload_channel = true,
219 		.doorbell_mode_switch = false,
220 		.auto_queue = false,
221 	},
222 	{
223 		.num = 70,
224 		.name = "ADSP_2",
225 		.num_elements = 64,
226 		.event_ring = 3,
227 		.dir = DMA_BIDIRECTIONAL,
228 		.ee_mask = 0x4,
229 		.pollcfg = 0,
230 		.doorbell = MHI_DB_BRST_DISABLE,
231 		.lpm_notify = false,
232 		.offload_channel = true,
233 		.doorbell_mode_switch = false,
234 		.auto_queue = false,
235 	},
236 	{
237 		.num = 71,
238 		.name = "ADSP_3",
239 		.num_elements = 64,
240 		.event_ring = 3,
241 		.dir = DMA_BIDIRECTIONAL,
242 		.ee_mask = 0x4,
243 		.pollcfg = 0,
244 		.doorbell = MHI_DB_BRST_DISABLE,
245 		.lpm_notify = false,
246 		.offload_channel = true,
247 		.doorbell_mode_switch = false,
248 		.auto_queue = false,
249 	},
250 #endif
251 };
252 
253 static const struct mhi_channel_config cnss_mhi_channels_genoa[] = {
254 	{
255 		.num = 0,
256 		.name = "LOOPBACK",
257 		.num_elements = 32,
258 		.event_ring = 1,
259 		.dir = DMA_TO_DEVICE,
260 		.ee_mask = 0x4,
261 		.pollcfg = 0,
262 		.doorbell = MHI_DB_BRST_DISABLE,
263 		.lpm_notify = false,
264 		.offload_channel = false,
265 		.doorbell_mode_switch = false,
266 		.auto_queue = false,
267 	},
268 	{
269 		.num = 1,
270 		.name = "LOOPBACK",
271 		.num_elements = 32,
272 		.event_ring = 1,
273 		.dir = DMA_FROM_DEVICE,
274 		.ee_mask = 0x4,
275 		.pollcfg = 0,
276 		.doorbell = MHI_DB_BRST_DISABLE,
277 		.lpm_notify = false,
278 		.offload_channel = false,
279 		.doorbell_mode_switch = false,
280 		.auto_queue = false,
281 	},
282 	{
283 		.num = 4,
284 		.name = "DIAG",
285 		.num_elements = 64,
286 		.event_ring = 1,
287 		.dir = DMA_TO_DEVICE,
288 		.ee_mask = 0x4,
289 		.pollcfg = 0,
290 		.doorbell = MHI_DB_BRST_DISABLE,
291 		.lpm_notify = false,
292 		.offload_channel = false,
293 		.doorbell_mode_switch = false,
294 		.auto_queue = false,
295 	},
296 	{
297 		.num = 5,
298 		.name = "DIAG",
299 		.num_elements = 64,
300 		.event_ring = 1,
301 		.dir = DMA_FROM_DEVICE,
302 		.ee_mask = 0x4,
303 		.pollcfg = 0,
304 		.doorbell = MHI_DB_BRST_DISABLE,
305 		.lpm_notify = false,
306 		.offload_channel = false,
307 		.doorbell_mode_switch = false,
308 		.auto_queue = false,
309 	},
310 	{
311 		.num = 16,
312 		.name = "IPCR",
313 		.num_elements = 64,
314 		.event_ring = 1,
315 		.dir = DMA_TO_DEVICE,
316 		.ee_mask = 0x4,
317 		.pollcfg = 0,
318 		.doorbell = MHI_DB_BRST_DISABLE,
319 		.lpm_notify = false,
320 		.offload_channel = false,
321 		.doorbell_mode_switch = false,
322 		.auto_queue = false,
323 	},
324 	{
325 		.num = 17,
326 		.name = "IPCR",
327 		.num_elements = 64,
328 		.event_ring = 1,
329 		.dir = DMA_FROM_DEVICE,
330 		.ee_mask = 0x4,
331 		.pollcfg = 0,
332 		.doorbell = MHI_DB_BRST_DISABLE,
333 		.lpm_notify = false,
334 		.offload_channel = false,
335 		.doorbell_mode_switch = false,
336 		.auto_queue = true,
337 	},
338 };
339 
340 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
341 static struct mhi_event_config cnss_mhi_events[] = {
342 #else
343 static const struct mhi_event_config cnss_mhi_events[] = {
344 #endif
345 	{
346 		.num_elements = 32,
347 		.irq_moderation_ms = 0,
348 		.irq = 1,
349 		.mode = MHI_DB_BRST_DISABLE,
350 		.data_type = MHI_ER_CTRL,
351 		.priority = 0,
352 		.hardware_event = false,
353 		.client_managed = false,
354 		.offload_channel = false,
355 	},
356 	{
357 		.num_elements = 256,
358 		.irq_moderation_ms = 0,
359 		.irq = 2,
360 		.mode = MHI_DB_BRST_DISABLE,
361 		.priority = 1,
362 		.hardware_event = false,
363 		.client_managed = false,
364 		.offload_channel = false,
365 	},
366 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
367 	{
368 		.num_elements = 32,
369 		.irq_moderation_ms = 0,
370 		.irq = 1,
371 		.mode = MHI_DB_BRST_DISABLE,
372 		.data_type = MHI_ER_BW_SCALE,
373 		.priority = 2,
374 		.hardware_event = false,
375 		.client_managed = false,
376 		.offload_channel = false,
377 	},
378 #endif
379 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
380 	{
381 		.num_elements = 256,
382 		.irq_moderation_ms = 0,
383 		.irq = 2,
384 		.mode = MHI_DB_BRST_DISABLE,
385 		.data_type = MHI_ER_DATA,
386 		.priority = 1,
387 		.hardware_event = false,
388 		.client_managed = true,
389 		.offload_channel = true,
390 	},
391 #endif
392 };
393 
394 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
395 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
396 #define CNSS_MHI_SATELLITE_EVT_COUNT 1
397 #else
398 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
399 #define CNSS_MHI_SATELLITE_EVT_COUNT 0
400 #endif
401 
402 static const struct mhi_controller_config cnss_mhi_config_default = {
403 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
404 	.max_channels = 72,
405 #else
406 	.max_channels = 32,
407 #endif
408 	.timeout_ms = 10000,
409 	.use_bounce_buf = false,
410 	.buf_len = 0x8000,
411 	.num_channels = ARRAY_SIZE(cnss_mhi_channels),
412 	.ch_cfg = cnss_mhi_channels,
413 	.num_events = ARRAY_SIZE(cnss_mhi_events),
414 	.event_cfg = cnss_mhi_events,
415 	.m2_no_db = true,
416 };
417 
418 static const struct mhi_controller_config cnss_mhi_config_genoa = {
419 	.max_channels = 32,
420 	.timeout_ms = 10000,
421 	.use_bounce_buf = false,
422 	.buf_len = 0x8000,
423 	.num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa),
424 	.ch_cfg = cnss_mhi_channels_genoa,
425 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
426 		CNSS_MHI_SATELLITE_EVT_COUNT,
427 	.event_cfg = cnss_mhi_events,
428 	.m2_no_db = true,
429 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
430 	.bhie_offset = 0x0324,
431 #endif
432 };
433 
434 static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
435 	.max_channels = 32,
436 	.timeout_ms = 10000,
437 	.use_bounce_buf = false,
438 	.buf_len = 0x8000,
439 	.num_channels = ARRAY_SIZE(cnss_mhi_channels) -
440 			CNSS_MHI_SATELLITE_CH_CFG_COUNT,
441 	.ch_cfg = cnss_mhi_channels,
442 	.num_events = ARRAY_SIZE(cnss_mhi_events) -
443 			CNSS_MHI_SATELLITE_EVT_COUNT,
444 	.event_cfg = cnss_mhi_events,
445 	.m2_no_db = true,
446 };
447 
448 static struct cnss_pci_reg ce_src[] = {
449 	{ "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
450 	{ "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
451 	{ "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
452 	{ "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
453 	{ "SRC_CTRL", CE_SRC_CTRL_OFFSET },
454 	{ "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
455 	{ "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
456 	{ "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
457 	{ NULL },
458 };
459 
460 static struct cnss_pci_reg ce_dst[] = {
461 	{ "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
462 	{ "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
463 	{ "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
464 	{ "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
465 	{ "DEST_CTRL", CE_DEST_CTRL_OFFSET },
466 	{ "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
467 	{ "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
468 	{ "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
469 	{ "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
470 	{ "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
471 	{ "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
472 	{ "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
473 	{ "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
474 	{ "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
475 	{ "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
476 	{ NULL },
477 };
478 
479 static struct cnss_pci_reg ce_cmn[] = {
480 	{ "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
481 	{ "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
482 	{ "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
483 	{ "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
484 	{ "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
485 	{ NULL },
486 };
487 
488 static struct cnss_pci_reg qdss_csr[] = {
489 	{ "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
490 	{ "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
491 	{ "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
492 	{ "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
493 	{ NULL },
494 };
495 
496 static struct cnss_pci_reg pci_scratch[] = {
497 	{ "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
498 	{ "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
499 	{ "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
500 	{ NULL },
501 };
502 
503 /* First field of the structure is the device bit mask. Use
504  * enum cnss_pci_reg_mask as reference for the value.
505  */
506 static struct cnss_misc_reg wcss_reg_access_seq[] = {
507 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
508 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
509 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
510 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
511 	{1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
512 	{1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
513 	{1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
514 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
515 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
516 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
517 	{1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
518 	{1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
519 	{1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
520 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
521 	{1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
522 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
523 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
524 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
525 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
526 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
527 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
528 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
529 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
530 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
531 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
532 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
533 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
534 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
535 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
536 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
537 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
538 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
539 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
540 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
541 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
542 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
543 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
544 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
545 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
546 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
547 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
548 	{1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
549 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
550 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
551 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
552 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
553 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
554 	{1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
555 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
556 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
557 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
558 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
559 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
560 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
561 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
562 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
563 	{1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
564 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
565 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
566 	{1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
567 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
568 	{1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
569 	{1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
570 	{1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
571 };
572 
573 static struct cnss_misc_reg pcie_reg_access_seq[] = {
574 	{1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
575 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
576 	{1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
577 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
578 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
579 	{1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
580 	{1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
581 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
582 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
583 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
584 	{1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
585 	{1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
586 	{1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
587 	{1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
588 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
589 	{1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
590 	{1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
591 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
592 	{1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
593 	{1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
594 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
595 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
596 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
597 	{1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
598 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
599 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
600 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
601 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
602 	{1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
603 	{1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
604 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
605 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
606 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
607 	{1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
608 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
609 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
610 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
611 	{1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
612 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
613 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
614 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
615 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
616 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
617 	{1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
618 	{1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
619 };
620 
621 static struct cnss_misc_reg wlaon_reg_access_seq[] = {
622 	{3, 0, WLAON_SOC_POWER_CTRL, 0},
623 	{3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
624 	{3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
625 	{3, 0, WLAON_SW_COLD_RESET, 0},
626 	{3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
627 	{3, 0, WLAON_GDSC_DELAY_SETTING, 0},
628 	{3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
629 	{3, 0, WLAON_WL_PWR_STATUS_REG, 0},
630 	{3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
631 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
632 	{2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
633 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
634 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
635 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
636 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
637 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
638 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
639 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
640 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
641 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
642 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
643 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
644 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
645 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
646 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
647 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
648 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
649 	{2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
650 	{2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
651 	{2, 0, WLAON_WL_AON_CXPC_REG, 0},
652 	{2, 0, WLAON_WL_AON_APM_STATUS0, 0},
653 	{2, 0, WLAON_WL_AON_APM_STATUS1, 0},
654 	{2, 0, WLAON_WL_AON_APM_STATUS2, 0},
655 	{2, 0, WLAON_WL_AON_APM_STATUS3, 0},
656 	{2, 0, WLAON_WL_AON_APM_STATUS4, 0},
657 	{2, 0, WLAON_WL_AON_APM_STATUS5, 0},
658 	{2, 0, WLAON_WL_AON_APM_STATUS6, 0},
659 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
660 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
661 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
662 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
663 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
664 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
665 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
666 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
667 	{3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
668 	{3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
669 	{3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
670 	{3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
671 	{3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
672 	{3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
673 	{3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
674 	{3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
675 	{3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
676 	{3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
677 	{3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
678 	{3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
679 	{3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
680 	{3, 0, WLAON_QDSS_WCSS_REG, 0},
681 	{3, 0, WLAON_QDSS_WCSS_ACK, 0},
682 	{3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
683 	{3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
684 	{3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
685 	{3, 0, WLAON_DLY_CONFIG, 0},
686 	{3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
687 	{3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
688 	{3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
689 	{3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
690 	{3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
691 	{3, 0, WLAON_Q6_COOKIE_BIT, 0},
692 	{3, 0, WLAON_WARM_SW_ENTRY, 0},
693 	{3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
694 	{3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
695 	{3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
696 	{3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
697 	{3, 0, WLAON_DEBUG, 0},
698 	{3, 0, WLAON_SOC_PARAMETERS, 0},
699 	{3, 0, WLAON_WLPM_SIGNAL, 0},
700 	{3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
701 	{3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
702 	{3, 0, WLAON_PBL_STACK_CANARY, 0},
703 	{3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
704 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
705 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
706 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
707 	{3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
708 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
709 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
710 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
711 	{3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
712 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
713 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
714 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
715 	{3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
716 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
717 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
718 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
719 	{3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
720 	{3, 0, WLAON_MEM_CNT_SEL_REG, 0},
721 	{3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
722 	{3, 0, WLAON_MEM_DEBUG_REG, 0},
723 	{3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
724 	{3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
725 	{3, 0, WLAON_WL_AON_SPARE2, 0},
726 	{3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
727 	{3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
728 	{3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
729 	{3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
730 	{3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
731 	{3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
732 	{3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
733 	{3, 0, WLAON_POWERCTRL_PMU_REG, 0},
734 	{3, 0, WLAON_POWERCTRL_MEM_REG, 0},
735 	{3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
736 	{3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
737 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
738 	{3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
739 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
740 	{3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
741 	{3, 0, WLAON_MEM_SVS_CFG_REG, 0},
742 	{3, 0, WLAON_CMN_AON_MISC_REG, 0},
743 	{3, 0, WLAON_INTR_STATUS, 0},
744 	{2, 0, WLAON_INTR_ENABLE, 0},
745 	{2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
746 	{2, 0, WLAON_NOC_DBG_BUS_REG, 0},
747 	{2, 0, WLAON_WL_CTRL_MISC_REG, 0},
748 	{2, 0, WLAON_DBG_STATUS0, 0},
749 	{2, 0, WLAON_DBG_STATUS1, 0},
750 	{2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
751 	{2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
752 	{2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
753 };
754 
755 static struct cnss_misc_reg syspm_reg_access_seq[] = {
756 	{1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
757 	{1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
758 	{1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
759 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
760 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
761 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
762 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
763 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
764 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
765 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
766 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
767 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
768 	{1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
769 };
770 
771 static struct cnss_print_optimize print_optimize;
772 
773 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
774 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
775 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
776 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
777 
778 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
779 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev);
780 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev);
781 
782 
783 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
784 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
785 {
786 	mhi_debug_reg_dump(pci_priv->mhi_ctrl);
787 }
788 
789 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
790 {
791 	mhi_dump_sfr(pci_priv->mhi_ctrl);
792 }
793 
794 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
795 				      u32 cookie)
796 {
797 	return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
798 }
799 
800 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
801 				    bool notify_clients)
802 {
803 	return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
804 }
805 
806 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
807 				   bool notify_clients)
808 {
809 	return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
810 }
811 
812 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
813 				       u32 timeout)
814 {
815 	return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
816 }
817 
818 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
819 					   int timeout_us, bool in_panic)
820 {
821 	return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
822 					  timeout_us, in_panic);
823 }
824 
825 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
826 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
827 {
828 	return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl);
829 }
830 #endif
831 
832 static void
833 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
834 				    int (*cb)(struct mhi_controller *mhi_ctrl,
835 					      struct mhi_link_info *link_info))
836 {
837 	mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
838 }
839 
840 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
841 {
842 	return mhi_force_reset(pci_priv->mhi_ctrl);
843 }
844 
845 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
846 				  phys_addr_t base)
847 {
848 	return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
849 }
850 #else
851 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
852 {
853 }
854 
855 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
856 {
857 }
858 
859 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
860 				      u32 cookie)
861 {
862 	return false;
863 }
864 
865 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
866 				    bool notify_clients)
867 {
868 	return -EOPNOTSUPP;
869 }
870 
871 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
872 				   bool notify_clients)
873 {
874 	return -EOPNOTSUPP;
875 }
876 
877 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
878 				       u32 timeout)
879 {
880 }
881 
882 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
883 					   int timeout_us, bool in_panic)
884 {
885 	return -EOPNOTSUPP;
886 }
887 
888 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
889 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
890 {
891 	return -EOPNOTSUPP;
892 }
893 #endif
894 
895 static void
896 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
897 				    int (*cb)(struct mhi_controller *mhi_ctrl,
898 					      struct mhi_link_info *link_info))
899 {
900 }
901 
902 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
903 {
904 	return -EOPNOTSUPP;
905 }
906 
907 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
908 				  phys_addr_t base)
909 {
910 }
911 #endif /* CONFIG_MHI_BUS_MISC */
912 
913 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
914 #define CNSS_MHI_WAKE_TIMEOUT		500000
915 
916 static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv,
917 					     enum cnss_smmu_fault_time id)
918 {
919 	if (id >= SMMU_CB_MAX)
920 		return;
921 
922 	pci_priv->smmu_fault_timestamp[id] = sched_clock();
923 }
924 
925 static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain,
926 					    void *handler_token)
927 {
928 	struct cnss_pci_data *pci_priv = handler_token;
929 	int ret = 0;
930 
931 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY);
932 	ret = cnss_mhi_device_get_sync_atomic(pci_priv,
933 					      CNSS_MHI_WAKE_TIMEOUT, true);
934 	if (ret < 0) {
935 		cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret);
936 		return;
937 	}
938 
939 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING);
940 	ret = cnss_mhi_host_notify_db_disable_trace(pci_priv);
941 	if (ret < 0)
942 		cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret);
943 
944 	cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT);
945 }
946 
947 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
948 {
949 	qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain,
950 					 cnss_pci_smmu_fault_handler_irq, pci_priv);
951 }
952 #else
953 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
954 {
955 }
956 #endif
957 
958 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
959 {
960 	u16 device_id;
961 
962 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
963 		cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
964 			    (void *)_RET_IP_);
965 		return -EACCES;
966 	}
967 
968 	if (pci_priv->pci_link_down_ind) {
969 		cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
970 		return -EIO;
971 	}
972 
973 	pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
974 	if (device_id != pci_priv->device_id)  {
975 		cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
976 			       (void *)_RET_IP_, device_id,
977 			       pci_priv->device_id);
978 		return -EIO;
979 	}
980 
981 	return 0;
982 }
983 
984 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
985 {
986 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
987 
988 	u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
989 	u32 window_enable = WINDOW_ENABLE_BIT | window;
990 	u32 val;
991 
992 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
993 		writel_relaxed(window_enable, pci_priv->bar +
994 			       PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
995 	} else {
996 		writel_relaxed(window_enable, pci_priv->bar +
997 			       QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
998 	}
999 
1000 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
1001 		window_enable = QCN7605_WINDOW_ENABLE_BIT | window;
1002 
1003 	if (window != pci_priv->remap_window) {
1004 		pci_priv->remap_window = window;
1005 		cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
1006 			    window_enable);
1007 	}
1008 
1009 	/* Read it back to make sure the write has taken effect */
1010 	if (plat_priv->device_id == PEACH_DEVICE_ID) {
1011 		val = readl_relaxed(pci_priv->bar +
1012 			PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1013 	} else {
1014 		val = readl_relaxed(pci_priv->bar +
1015 			QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1016 	}
1017 	if (val != window_enable) {
1018 		cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
1019 			    window_enable, val);
1020 		if (!cnss_pci_check_link_status(pci_priv) &&
1021 		    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
1022 			CNSS_ASSERT(0);
1023 	}
1024 }
1025 
1026 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
1027 			     u32 offset, u32 *val)
1028 {
1029 	int ret;
1030 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1031 
1032 	if (!in_interrupt() && !irqs_disabled()) {
1033 		ret = cnss_pci_check_link_status(pci_priv);
1034 		if (ret)
1035 			return ret;
1036 	}
1037 
1038 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1039 	    offset < MAX_UNWINDOWED_ADDRESS) {
1040 		*val = readl_relaxed(pci_priv->bar + offset);
1041 		return 0;
1042 	}
1043 
1044 	/* If in panic, assumption is kernel panic handler will hold all threads
1045 	 * and interrupts. Further pci_reg_window_lock could be held before
1046 	 * panic. So only lock during normal operation.
1047 	 */
1048 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1049 		cnss_pci_select_window(pci_priv, offset);
1050 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1051 				     (offset & WINDOW_RANGE_MASK));
1052 	} else {
1053 		spin_lock_bh(&pci_reg_window_lock);
1054 		cnss_pci_select_window(pci_priv, offset);
1055 		*val = readl_relaxed(pci_priv->bar + WINDOW_START +
1056 				     (offset & WINDOW_RANGE_MASK));
1057 		spin_unlock_bh(&pci_reg_window_lock);
1058 	}
1059 
1060 	return 0;
1061 }
1062 
1063 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1064 			      u32 val)
1065 {
1066 	int ret;
1067 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1068 
1069 	if (!in_interrupt() && !irqs_disabled()) {
1070 		ret = cnss_pci_check_link_status(pci_priv);
1071 		if (ret)
1072 			return ret;
1073 	}
1074 
1075 	if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1076 	    offset < MAX_UNWINDOWED_ADDRESS) {
1077 		writel_relaxed(val, pci_priv->bar + offset);
1078 		return 0;
1079 	}
1080 
1081 	/* Same constraint as PCI register read in panic */
1082 	if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1083 		cnss_pci_select_window(pci_priv, offset);
1084 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1085 			  (offset & WINDOW_RANGE_MASK));
1086 	} else {
1087 		spin_lock_bh(&pci_reg_window_lock);
1088 		cnss_pci_select_window(pci_priv, offset);
1089 		writel_relaxed(val, pci_priv->bar + WINDOW_START +
1090 			  (offset & WINDOW_RANGE_MASK));
1091 		spin_unlock_bh(&pci_reg_window_lock);
1092 	}
1093 
1094 	return 0;
1095 }
1096 
1097 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
1098 {
1099 	struct device *dev = &pci_priv->pci_dev->dev;
1100 	int ret;
1101 
1102 	ret = cnss_pci_force_wake_request_sync(dev,
1103 					       FORCE_WAKE_DELAY_TIMEOUT_US);
1104 	if (ret) {
1105 		if (ret != -EAGAIN)
1106 			cnss_pr_err("Failed to request force wake\n");
1107 		return ret;
1108 	}
1109 
1110 	/* If device's M1 state-change event races here, it can be ignored,
1111 	 * as the device is expected to immediately move from M2 to M0
1112 	 * without entering low power state.
1113 	 */
1114 	if (cnss_pci_is_device_awake(dev) != true)
1115 		cnss_pr_warn("MHI not in M0, while reg still accessible\n");
1116 
1117 	return 0;
1118 }
1119 
1120 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
1121 {
1122 	struct device *dev = &pci_priv->pci_dev->dev;
1123 	int ret;
1124 
1125 	ret = cnss_pci_force_wake_release(dev);
1126 	if (ret && ret != -EAGAIN)
1127 		cnss_pr_err("Failed to release force wake\n");
1128 
1129 	return ret;
1130 }
1131 
1132 #if IS_ENABLED(CONFIG_INTERCONNECT)
1133 /**
1134  * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
1135  * @plat_priv: Platform private data struct
1136  * @bw: bandwidth
1137  * @save: toggle flag to save bandwidth to current_bw_vote
1138  *
1139  * Setup bandwidth votes for configured interconnect paths
1140  *
1141  * Return: 0 for success
1142  */
1143 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1144 				    u32 bw, bool save)
1145 {
1146 	int ret = 0;
1147 	struct cnss_bus_bw_info *bus_bw_info;
1148 
1149 	if (!plat_priv->icc.path_count)
1150 		return -EOPNOTSUPP;
1151 
1152 	if (bw >= plat_priv->icc.bus_bw_cfg_count) {
1153 		cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
1154 		return -EINVAL;
1155 	}
1156 
1157 	cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
1158 
1159 	list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
1160 		ret = icc_set_bw(bus_bw_info->icc_path,
1161 				 bus_bw_info->cfg_table[bw].avg_bw,
1162 				 bus_bw_info->cfg_table[bw].peak_bw);
1163 		if (ret) {
1164 			cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
1165 				    bw, ret, bus_bw_info->icc_name,
1166 				    bus_bw_info->cfg_table[bw].avg_bw,
1167 				    bus_bw_info->cfg_table[bw].peak_bw);
1168 			break;
1169 		}
1170 	}
1171 	if (ret == 0 && save)
1172 		plat_priv->icc.current_bw_vote = bw;
1173 	return ret;
1174 }
1175 
1176 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1177 {
1178 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1179 
1180 	if (!plat_priv)
1181 		return -ENODEV;
1182 
1183 	if (bandwidth < 0)
1184 		return -EINVAL;
1185 
1186 	return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
1187 }
1188 #else
1189 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1190 				    u32 bw, bool save)
1191 {
1192 	return 0;
1193 }
1194 
1195 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1196 {
1197 	return 0;
1198 }
1199 #endif
1200 EXPORT_SYMBOL(cnss_request_bus_bandwidth);
1201 
1202 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
1203 			    u32 *val, bool raw_access)
1204 {
1205 	int ret = 0;
1206 	bool do_force_wake_put = true;
1207 
1208 	if (raw_access) {
1209 		ret = cnss_pci_reg_read(pci_priv, offset, val);
1210 		goto out;
1211 	}
1212 
1213 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1214 	if (ret)
1215 		goto out;
1216 
1217 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1218 	if (ret < 0)
1219 		goto runtime_pm_put;
1220 
1221 	ret = cnss_pci_force_wake_get(pci_priv);
1222 	if (ret)
1223 		do_force_wake_put = false;
1224 
1225 	ret = cnss_pci_reg_read(pci_priv, offset, val);
1226 	if (ret) {
1227 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
1228 			    offset, ret);
1229 		goto force_wake_put;
1230 	}
1231 
1232 force_wake_put:
1233 	if (do_force_wake_put)
1234 		cnss_pci_force_wake_put(pci_priv);
1235 runtime_pm_put:
1236 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1237 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1238 out:
1239 	return ret;
1240 }
1241 
1242 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1243 			     u32 val, bool raw_access)
1244 {
1245 	int ret = 0;
1246 	bool do_force_wake_put = true;
1247 
1248 	if (raw_access) {
1249 		ret = cnss_pci_reg_write(pci_priv, offset, val);
1250 		goto out;
1251 	}
1252 
1253 	ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1254 	if (ret)
1255 		goto out;
1256 
1257 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1258 	if (ret < 0)
1259 		goto runtime_pm_put;
1260 
1261 	ret = cnss_pci_force_wake_get(pci_priv);
1262 	if (ret)
1263 		do_force_wake_put = false;
1264 
1265 	ret = cnss_pci_reg_write(pci_priv, offset, val);
1266 	if (ret) {
1267 		cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
1268 			    val, offset, ret);
1269 		goto force_wake_put;
1270 	}
1271 
1272 force_wake_put:
1273 	if (do_force_wake_put)
1274 		cnss_pci_force_wake_put(pci_priv);
1275 runtime_pm_put:
1276 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1277 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1278 out:
1279 	return ret;
1280 }
1281 
1282 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
1283 {
1284 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1285 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1286 	bool link_down_or_recovery;
1287 
1288 	if (!plat_priv)
1289 		return -ENODEV;
1290 
1291 	link_down_or_recovery = pci_priv->pci_link_down_ind ||
1292 		(test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
1293 
1294 	if (save) {
1295 		if (link_down_or_recovery) {
1296 			pci_priv->saved_state = NULL;
1297 		} else {
1298 			pci_save_state(pci_dev);
1299 			pci_priv->saved_state = pci_store_saved_state(pci_dev);
1300 		}
1301 	} else {
1302 		if (link_down_or_recovery) {
1303 			pci_load_saved_state(pci_dev, pci_priv->default_state);
1304 			pci_restore_state(pci_dev);
1305 		} else if (pci_priv->saved_state) {
1306 			pci_load_and_free_saved_state(pci_dev,
1307 						      &pci_priv->saved_state);
1308 			pci_restore_state(pci_dev);
1309 		}
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv)
1316 {
1317 	int ret = 0;
1318 	struct pci_dev *root_port;
1319 	struct device_node *root_of_node;
1320 	struct cnss_plat_data *plat_priv;
1321 
1322 	if (!pci_priv)
1323 		return -EINVAL;
1324 
1325 	if (pci_priv->device_id != KIWI_DEVICE_ID)
1326 		return ret;
1327 
1328 	plat_priv = pci_priv->plat_priv;
1329 	root_port = pcie_find_root_port(pci_priv->pci_dev);
1330 
1331 	if (!root_port) {
1332 		cnss_pr_err("PCIe root port is null\n");
1333 		return -EINVAL;
1334 	}
1335 
1336 	root_of_node = root_port->dev.of_node;
1337 	if (root_of_node && root_of_node->parent) {
1338 		ret = of_property_read_u32(root_of_node->parent,
1339 					   "qcom,target-link-speed",
1340 					   &plat_priv->supported_link_speed);
1341 		if (!ret)
1342 			cnss_pr_dbg("Supported PCIe Link Speed: %d\n",
1343 				    plat_priv->supported_link_speed);
1344 		else
1345 			plat_priv->supported_link_speed = 0;
1346 	}
1347 
1348 	return ret;
1349 }
1350 
1351 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
1352 {
1353 	u16 link_status;
1354 	int ret;
1355 
1356 	ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
1357 					&link_status);
1358 	if (ret)
1359 		return ret;
1360 
1361 	cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
1362 
1363 	pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
1364 	pci_priv->def_link_width =
1365 		(link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
1366 	pci_priv->cur_link_speed = pci_priv->def_link_speed;
1367 
1368 	cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
1369 		    pci_priv->def_link_speed, pci_priv->def_link_width);
1370 
1371 	return 0;
1372 }
1373 
1374 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
1375 {
1376 	u32 reg_offset, val;
1377 	int i;
1378 
1379 	switch (pci_priv->device_id) {
1380 	case QCA6390_DEVICE_ID:
1381 	case QCA6490_DEVICE_ID:
1382 	case KIWI_DEVICE_ID:
1383 	case MANGO_DEVICE_ID:
1384 	case PEACH_DEVICE_ID:
1385 		break;
1386 	default:
1387 		return;
1388 	}
1389 
1390 	if (in_interrupt() || irqs_disabled())
1391 		return;
1392 
1393 	if (cnss_pci_check_link_status(pci_priv))
1394 		return;
1395 
1396 	cnss_pr_dbg("Start to dump SOC Scratch registers\n");
1397 
1398 	for (i = 0; pci_scratch[i].name; i++) {
1399 		reg_offset = pci_scratch[i].offset;
1400 		if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1401 			return;
1402 		cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
1403 			    pci_scratch[i].name, val);
1404 	}
1405 }
1406 
1407 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
1408 {
1409 	int ret = 0;
1410 
1411 	if (!pci_priv)
1412 		return -ENODEV;
1413 
1414 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1415 		cnss_pr_info("PCI link is already suspended\n");
1416 		goto out;
1417 	}
1418 
1419 	pci_clear_master(pci_priv->pci_dev);
1420 
1421 	ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
1422 	if (ret)
1423 		goto out;
1424 
1425 	pci_disable_device(pci_priv->pci_dev);
1426 
1427 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1428 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
1429 		if (ret)
1430 			cnss_pr_err("Failed to set D3Hot, err =  %d\n", ret);
1431 	}
1432 
1433 	/* Always do PCIe L2 suspend during power off/PCIe link recovery */
1434 	pci_priv->drv_connected_last = 0;
1435 
1436 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
1437 	if (ret)
1438 		goto out;
1439 
1440 	pci_priv->pci_link_state = PCI_LINK_DOWN;
1441 
1442 	return 0;
1443 out:
1444 	return ret;
1445 }
1446 
1447 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
1448 {
1449 	int ret = 0;
1450 
1451 	if (!pci_priv)
1452 		return -ENODEV;
1453 
1454 	if (pci_priv->pci_link_state == PCI_LINK_UP) {
1455 		cnss_pr_info("PCI link is already resumed\n");
1456 		goto out;
1457 	}
1458 
1459 	ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
1460 	if (ret) {
1461 		ret = -EAGAIN;
1462 		goto out;
1463 	}
1464 
1465 	pci_priv->pci_link_state = PCI_LINK_UP;
1466 
1467 	if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1468 		ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
1469 		if (ret) {
1470 			cnss_pr_err("Failed to set D0, err = %d\n", ret);
1471 			goto out;
1472 		}
1473 	}
1474 
1475 	ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
1476 	if (ret)
1477 		goto out;
1478 
1479 	ret = pci_enable_device(pci_priv->pci_dev);
1480 	if (ret) {
1481 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
1482 		goto out;
1483 	}
1484 
1485 	pci_set_master(pci_priv->pci_dev);
1486 
1487 	if (pci_priv->pci_link_down_ind)
1488 		pci_priv->pci_link_down_ind = false;
1489 
1490 	return 0;
1491 out:
1492 	return ret;
1493 }
1494 
1495 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
1496 {
1497 	int ret;
1498 
1499 	switch (pci_priv->device_id) {
1500 	case QCA6390_DEVICE_ID:
1501 	case QCA6490_DEVICE_ID:
1502 	case KIWI_DEVICE_ID:
1503 	case MANGO_DEVICE_ID:
1504 	case PEACH_DEVICE_ID:
1505 		break;
1506 	default:
1507 		return -EOPNOTSUPP;
1508 	}
1509 
1510 	/* Always wait here to avoid missing WAKE assert for RDDM
1511 	 * before link recovery
1512 	 */
1513 	msleep(WAKE_EVENT_TIMEOUT);
1514 
1515 	ret = cnss_suspend_pci_link(pci_priv);
1516 	if (ret)
1517 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
1518 
1519 	ret = cnss_resume_pci_link(pci_priv);
1520 	if (ret) {
1521 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
1522 		del_timer(&pci_priv->dev_rddm_timer);
1523 		return ret;
1524 	}
1525 
1526 	mod_timer(&pci_priv->dev_rddm_timer,
1527 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1528 
1529 	cnss_mhi_debug_reg_dump(pci_priv);
1530 	cnss_pci_soc_scratch_reg_dump(pci_priv);
1531 
1532 	return 0;
1533 }
1534 
1535 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
1536 				       enum cnss_bus_event_type type,
1537 				       void *data)
1538 {
1539 	struct cnss_bus_event bus_event;
1540 
1541 	bus_event.etype = type;
1542 	bus_event.event_data = data;
1543 	cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
1544 }
1545 
1546 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
1547 {
1548 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1549 	struct pci_dev *pci_dev = pci_priv->pci_dev;
1550 	unsigned long flags;
1551 
1552 	if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
1553 		     &plat_priv->ctrl_params.quirks))
1554 		panic("cnss: PCI link is down\n");
1555 
1556 	spin_lock_irqsave(&pci_link_down_lock, flags);
1557 	if (pci_priv->pci_link_down_ind) {
1558 		cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
1559 		spin_unlock_irqrestore(&pci_link_down_lock, flags);
1560 		return;
1561 	}
1562 	pci_priv->pci_link_down_ind = true;
1563 	spin_unlock_irqrestore(&pci_link_down_lock, flags);
1564 
1565 	if (pci_priv->mhi_ctrl) {
1566 		/* Notify MHI about link down*/
1567 		mhi_report_error(pci_priv->mhi_ctrl);
1568 	}
1569 
1570 	if (pci_dev->device == QCA6174_DEVICE_ID)
1571 		disable_irq_nosync(pci_dev->irq);
1572 
1573 	/* Notify bus related event. Now for all supported chips.
1574 	 * Here PCIe LINK_DOWN notification taken care.
1575 	 * uevent buffer can be extended later, to cover more bus info.
1576 	 */
1577 	cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
1578 
1579 	cnss_fatal_err("PCI link down, schedule recovery\n");
1580 	cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
1581 }
1582 
1583 int cnss_pci_link_down(struct device *dev)
1584 {
1585 	struct pci_dev *pci_dev = to_pci_dev(dev);
1586 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1587 	struct cnss_plat_data *plat_priv = NULL;
1588 	int ret;
1589 
1590 	if (!pci_priv) {
1591 		cnss_pr_err("pci_priv is NULL\n");
1592 		return -EINVAL;
1593 	}
1594 
1595 	plat_priv = pci_priv->plat_priv;
1596 	if (!plat_priv) {
1597 		cnss_pr_err("plat_priv is NULL\n");
1598 		return -ENODEV;
1599 	}
1600 
1601 	if (pci_priv->pci_link_down_ind) {
1602 		cnss_pr_dbg("PCI link down recovery is already in progress\n");
1603 		return -EBUSY;
1604 	}
1605 
1606 	if (pci_priv->drv_connected_last &&
1607 	    of_property_read_bool(plat_priv->plat_dev->dev.of_node,
1608 				  "cnss-enable-self-recovery"))
1609 		plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
1610 
1611 	cnss_pr_err("PCI link down is detected by drivers\n");
1612 
1613 	ret = cnss_pci_assert_perst(pci_priv);
1614 	if (ret)
1615 		cnss_pci_handle_linkdown(pci_priv);
1616 
1617 	return ret;
1618 }
1619 EXPORT_SYMBOL(cnss_pci_link_down);
1620 
1621 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
1622 {
1623 	struct pci_dev *pci_dev = to_pci_dev(dev);
1624 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1625 
1626 	if (!pci_priv) {
1627 		cnss_pr_err("pci_priv is NULL\n");
1628 		return -ENODEV;
1629 	}
1630 
1631 	if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1632 		cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
1633 		return -EACCES;
1634 	}
1635 
1636 	cnss_pr_dbg("Start to get PCIe reg dump\n");
1637 
1638 	return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
1639 }
1640 EXPORT_SYMBOL(cnss_pci_get_reg_dump);
1641 
1642 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
1643 {
1644 	struct cnss_plat_data *plat_priv;
1645 
1646 	if (!pci_priv) {
1647 		cnss_pr_err("pci_priv is NULL\n");
1648 		return -ENODEV;
1649 	}
1650 
1651 	plat_priv = pci_priv->plat_priv;
1652 	if (!plat_priv) {
1653 		cnss_pr_err("plat_priv is NULL\n");
1654 		return -ENODEV;
1655 	}
1656 
1657 	return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
1658 		pci_priv->pci_link_down_ind;
1659 }
1660 
1661 int cnss_pci_is_device_down(struct device *dev)
1662 {
1663 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
1664 
1665 	return cnss_pcie_is_device_down(pci_priv);
1666 }
1667 EXPORT_SYMBOL(cnss_pci_is_device_down);
1668 
1669 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
1670 {
1671 	spin_lock_bh(&pci_reg_window_lock);
1672 }
1673 EXPORT_SYMBOL(cnss_pci_lock_reg_window);
1674 
1675 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
1676 {
1677 	spin_unlock_bh(&pci_reg_window_lock);
1678 }
1679 EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
1680 
1681 int cnss_get_pci_slot(struct device *dev)
1682 {
1683 	struct pci_dev *pci_dev = to_pci_dev(dev);
1684 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1685 	struct cnss_plat_data *plat_priv = NULL;
1686 
1687 	if (!pci_priv) {
1688 		cnss_pr_err("pci_priv is NULL\n");
1689 		return -EINVAL;
1690 	}
1691 
1692 	plat_priv = pci_priv->plat_priv;
1693 	if (!plat_priv) {
1694 		cnss_pr_err("plat_priv is NULL\n");
1695 		return -ENODEV;
1696 	}
1697 
1698 	return plat_priv->rc_num;
1699 }
1700 EXPORT_SYMBOL(cnss_get_pci_slot);
1701 
1702 /**
1703  * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
1704  * @pci_priv: driver PCI bus context pointer
1705  *
1706  * Dump primary and secondary bootloader debug log data. For SBL check the
1707  * log struct address and size for validity.
1708  *
1709  * Return: None
1710  */
1711 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
1712 {
1713 	enum mhi_ee_type ee;
1714 	u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
1715 	u32 pbl_log_sram_start;
1716 	u32 pbl_stage, sbl_log_start, sbl_log_size;
1717 	u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
1718 	u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
1719 	u32 sbl_log_def_start = SRAM_START;
1720 	u32 sbl_log_def_end = SRAM_END;
1721 	int i;
1722 
1723 	switch (pci_priv->device_id) {
1724 	case QCA6390_DEVICE_ID:
1725 		pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
1726 		pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1727 		sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1728 		break;
1729 	case QCA6490_DEVICE_ID:
1730 		pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
1731 		pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1732 		sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1733 		break;
1734 	case KIWI_DEVICE_ID:
1735 		pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
1736 		pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
1737 		pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1738 		sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1739 		break;
1740 	case MANGO_DEVICE_ID:
1741 		pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
1742 		pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
1743 		pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1744 		sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1745 		break;
1746 	case PEACH_DEVICE_ID:
1747 		pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS;
1748 		pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START;
1749 		pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1750 		sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1751 		break;
1752 	default:
1753 		return;
1754 	}
1755 
1756 	if (cnss_pci_check_link_status(pci_priv))
1757 		return;
1758 
1759 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1760 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1761 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1762 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1763 	cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
1764 			  &pbl_bootstrap_status);
1765 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
1766 		    pbl_stage, sbl_log_start, sbl_log_size);
1767 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
1768 		    pbl_wlan_boot_cfg, pbl_bootstrap_status);
1769 
1770 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1771 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1772 		cnss_pr_dbg("Avoid Dumping PBL log data in Mission mode\n");
1773 		return;
1774 	}
1775 
1776 	cnss_pr_dbg("Dumping PBL log data\n");
1777 	for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
1778 		mem_addr = pbl_log_sram_start + i;
1779 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1780 			break;
1781 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1782 	}
1783 
1784 	sbl_log_size = (sbl_log_size > sbl_log_max_size ?
1785 			sbl_log_max_size : sbl_log_size);
1786 	if (sbl_log_start < sbl_log_def_start ||
1787 	    sbl_log_start > sbl_log_def_end ||
1788 	    (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
1789 		cnss_pr_err("Invalid SBL log data\n");
1790 		return;
1791 	}
1792 
1793 	ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1794 	if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1795 		cnss_pr_dbg("Avoid Dumping SBL log data in Mission mode\n");
1796 		return;
1797 	}
1798 
1799 	cnss_pr_dbg("Dumping SBL log data\n");
1800 	for (i = 0; i < sbl_log_size; i += sizeof(val)) {
1801 		mem_addr = sbl_log_start + i;
1802 		if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1803 			break;
1804 		cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1805 	}
1806 }
1807 
1808 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
1809 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1810 {
1811 }
1812 #else
1813 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1814 {
1815 	struct cnss_plat_data *plat_priv;
1816 	u32 i, mem_addr;
1817 	u32 *dump_ptr;
1818 
1819 	plat_priv = pci_priv->plat_priv;
1820 
1821 	if (plat_priv->device_id != QCA6490_DEVICE_ID ||
1822 	    cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
1823 		return;
1824 
1825 	if (!plat_priv->sram_dump) {
1826 		cnss_pr_err("SRAM dump memory is not allocated\n");
1827 		return;
1828 	}
1829 
1830 	if (cnss_pci_check_link_status(pci_priv))
1831 		return;
1832 
1833 	cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
1834 
1835 	for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
1836 		mem_addr = SRAM_START + i;
1837 		dump_ptr = (u32 *)(plat_priv->sram_dump + i);
1838 		if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
1839 			cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
1840 			break;
1841 		}
1842 		/* Relinquish CPU after dumping 256KB chunks*/
1843 		if (!(i % CNSS_256KB_SIZE))
1844 			cond_resched();
1845 	}
1846 }
1847 #endif
1848 
1849 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
1850 {
1851 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1852 
1853 	cnss_fatal_err("MHI power up returns timeout\n");
1854 
1855 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
1856 	    cnss_get_dev_sol_value(plat_priv) > 0) {
1857 		/* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
1858 		 * high. If RDDM times out, PBL/SBL error region may have been
1859 		 * erased so no need to dump them either.
1860 		 */
1861 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
1862 		    !pci_priv->pci_link_down_ind) {
1863 			mod_timer(&pci_priv->dev_rddm_timer,
1864 				  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
1865 		}
1866 	} else {
1867 		cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
1868 		cnss_mhi_debug_reg_dump(pci_priv);
1869 		cnss_pci_soc_scratch_reg_dump(pci_priv);
1870 		/* Dump PBL/SBL error log if RDDM cookie is not set */
1871 		cnss_pci_dump_bl_sram_mem(pci_priv);
1872 		cnss_pci_dump_sram(pci_priv);
1873 		return -ETIMEDOUT;
1874 	}
1875 
1876 	return 0;
1877 }
1878 
1879 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
1880 {
1881 	switch (mhi_state) {
1882 	case CNSS_MHI_INIT:
1883 		return "INIT";
1884 	case CNSS_MHI_DEINIT:
1885 		return "DEINIT";
1886 	case CNSS_MHI_POWER_ON:
1887 		return "POWER_ON";
1888 	case CNSS_MHI_POWERING_OFF:
1889 		return "POWERING_OFF";
1890 	case CNSS_MHI_POWER_OFF:
1891 		return "POWER_OFF";
1892 	case CNSS_MHI_FORCE_POWER_OFF:
1893 		return "FORCE_POWER_OFF";
1894 	case CNSS_MHI_SUSPEND:
1895 		return "SUSPEND";
1896 	case CNSS_MHI_RESUME:
1897 		return "RESUME";
1898 	case CNSS_MHI_TRIGGER_RDDM:
1899 		return "TRIGGER_RDDM";
1900 	case CNSS_MHI_RDDM_DONE:
1901 		return "RDDM_DONE";
1902 	default:
1903 		return "UNKNOWN";
1904 	}
1905 };
1906 
1907 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
1908 					enum cnss_mhi_state mhi_state)
1909 {
1910 	switch (mhi_state) {
1911 	case CNSS_MHI_INIT:
1912 		if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
1913 			return 0;
1914 		break;
1915 	case CNSS_MHI_DEINIT:
1916 	case CNSS_MHI_POWER_ON:
1917 		if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
1918 		    !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1919 			return 0;
1920 		break;
1921 	case CNSS_MHI_FORCE_POWER_OFF:
1922 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
1923 			return 0;
1924 		break;
1925 	case CNSS_MHI_POWER_OFF:
1926 	case CNSS_MHI_SUSPEND:
1927 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1928 		    !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1929 			return 0;
1930 		break;
1931 	case CNSS_MHI_RESUME:
1932 		if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
1933 			return 0;
1934 		break;
1935 	case CNSS_MHI_TRIGGER_RDDM:
1936 		if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
1937 		    !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
1938 			return 0;
1939 		break;
1940 	case CNSS_MHI_RDDM_DONE:
1941 		return 0;
1942 	default:
1943 		cnss_pr_err("Unhandled MHI state: %s(%d)\n",
1944 			    cnss_mhi_state_to_str(mhi_state), mhi_state);
1945 	}
1946 
1947 	cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
1948 		    cnss_mhi_state_to_str(mhi_state), mhi_state,
1949 		    pci_priv->mhi_state);
1950 	if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
1951 		CNSS_ASSERT(0);
1952 
1953 	return -EINVAL;
1954 }
1955 
1956 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
1957 {
1958 	int read_val, ret;
1959 
1960 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1961 		return -EOPNOTSUPP;
1962 
1963 	if (cnss_pci_check_link_status(pci_priv))
1964 		return -EINVAL;
1965 
1966 	cnss_pr_err("Write GCC Spare with ACE55 Pattern");
1967 	cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
1968 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1969 	cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
1970 	ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
1971 				&read_val);
1972 	cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
1973 	return ret;
1974 }
1975 
1976 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
1977 {
1978 	int read_val, ret;
1979 	u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg;
1980 
1981 	if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
1982 		return -EOPNOTSUPP;
1983 
1984 	if (cnss_pci_check_link_status(pci_priv))
1985 		return -EINVAL;
1986 
1987 	ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
1988 	cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
1989 		    read_val, ret);
1990 
1991 	cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1992 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1993 	cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1994 	cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1995 	cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n",
1996 		    pbl_stage, sbl_log_start, sbl_log_size);
1997 	cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg);
1998 
1999 	return ret;
2000 }
2001 
2002 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
2003 				       enum cnss_mhi_state mhi_state)
2004 {
2005 	switch (mhi_state) {
2006 	case CNSS_MHI_INIT:
2007 		set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2008 		break;
2009 	case CNSS_MHI_DEINIT:
2010 		clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2011 		break;
2012 	case CNSS_MHI_POWER_ON:
2013 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2014 		break;
2015 	case CNSS_MHI_POWERING_OFF:
2016 		set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2017 		break;
2018 	case CNSS_MHI_POWER_OFF:
2019 	case CNSS_MHI_FORCE_POWER_OFF:
2020 		clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2021 		clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2022 		clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2023 		clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2024 		break;
2025 	case CNSS_MHI_SUSPEND:
2026 		set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2027 		break;
2028 	case CNSS_MHI_RESUME:
2029 		clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2030 		break;
2031 	case CNSS_MHI_TRIGGER_RDDM:
2032 		set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2033 		break;
2034 	case CNSS_MHI_RDDM_DONE:
2035 		set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2036 		break;
2037 	default:
2038 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2039 	}
2040 }
2041 
2042 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
2043 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2044 {
2045 	return mhi_pm_resume_force(pci_priv->mhi_ctrl);
2046 }
2047 #else
2048 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2049 {
2050 	return mhi_pm_resume(pci_priv->mhi_ctrl);
2051 }
2052 #endif
2053 
2054 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
2055 				  enum cnss_mhi_state mhi_state)
2056 {
2057 	int ret = 0, retry = 0;
2058 
2059 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
2060 		return 0;
2061 
2062 	if (mhi_state < 0) {
2063 		cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
2064 		return -EINVAL;
2065 	}
2066 
2067 	ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
2068 	if (ret)
2069 		goto out;
2070 
2071 	cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
2072 		     cnss_mhi_state_to_str(mhi_state), mhi_state);
2073 
2074 	switch (mhi_state) {
2075 	case CNSS_MHI_INIT:
2076 		ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
2077 		break;
2078 	case CNSS_MHI_DEINIT:
2079 		mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
2080 		ret = 0;
2081 		break;
2082 	case CNSS_MHI_POWER_ON:
2083 		ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
2084 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
2085 		/* Only set img_pre_alloc when power up succeeds */
2086 		if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
2087 			cnss_pr_dbg("Notify MHI to use already allocated images\n");
2088 			pci_priv->mhi_ctrl->img_pre_alloc = true;
2089 		}
2090 #endif
2091 		break;
2092 	case CNSS_MHI_POWER_OFF:
2093 		mhi_power_down(pci_priv->mhi_ctrl, true);
2094 		ret = 0;
2095 		break;
2096 	case CNSS_MHI_FORCE_POWER_OFF:
2097 		mhi_power_down(pci_priv->mhi_ctrl, false);
2098 		ret = 0;
2099 		break;
2100 	case CNSS_MHI_SUSPEND:
2101 retry_mhi_suspend:
2102 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2103 		if (pci_priv->drv_connected_last)
2104 			ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
2105 		else
2106 			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
2107 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2108 		if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
2109 			cnss_pr_vdbg("Retry MHI suspend #%d\n", retry);
2110 			usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
2111 				     MHI_SUSPEND_RETRY_DELAY_US + 1000);
2112 			goto retry_mhi_suspend;
2113 		}
2114 		break;
2115 	case CNSS_MHI_RESUME:
2116 		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2117 		if (pci_priv->drv_connected_last) {
2118 			ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
2119 			if (ret) {
2120 				mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2121 				break;
2122 			}
2123 			ret = cnss_mhi_pm_fast_resume(pci_priv, true);
2124 			cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
2125 		} else {
2126 			if (pci_priv->device_id == QCA6390_DEVICE_ID)
2127 				ret = cnss_mhi_pm_force_resume(pci_priv);
2128 			else
2129 				ret = mhi_pm_resume(pci_priv->mhi_ctrl);
2130 		}
2131 		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2132 		break;
2133 	case CNSS_MHI_TRIGGER_RDDM:
2134 		cnss_rddm_trigger_debug(pci_priv);
2135 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
2136 		if (ret) {
2137 			cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
2138 
2139 			cnss_pr_dbg("Sending host reset req\n");
2140 			ret = cnss_mhi_force_reset(pci_priv);
2141 			cnss_rddm_trigger_check(pci_priv);
2142 		}
2143 		break;
2144 	case CNSS_MHI_RDDM_DONE:
2145 		break;
2146 	default:
2147 		cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2148 		ret = -EINVAL;
2149 	}
2150 
2151 	if (ret)
2152 		goto out;
2153 
2154 	cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
2155 
2156 	return 0;
2157 
2158 out:
2159 	cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
2160 		    cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
2161 	return ret;
2162 }
2163 
2164 static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv)
2165 {
2166 	int ret = 0;
2167 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2168 	struct cnss_plat_data *plat_priv;
2169 
2170 	if (!pci_dev)
2171 		return -ENODEV;
2172 
2173 	if (!pci_dev->msix_enabled)
2174 		return ret;
2175 
2176 	plat_priv = pci_priv->plat_priv;
2177 	if (!plat_priv) {
2178 		cnss_pr_err("plat_priv is NULL\n");
2179 		return -ENODEV;
2180 	}
2181 
2182 	ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
2183 				   "msix-match-addr",
2184 				   &pci_priv->msix_addr);
2185 	cnss_pr_dbg("MSI-X Match address is 0x%X\n",
2186 		    pci_priv->msix_addr);
2187 
2188 	return ret;
2189 }
2190 
2191 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
2192 {
2193 	struct msi_desc *msi_desc;
2194 	struct cnss_msi_config *msi_config;
2195 	struct pci_dev *pci_dev = pci_priv->pci_dev;
2196 
2197 	msi_config = pci_priv->msi_config;
2198 
2199 	if (pci_dev->msix_enabled) {
2200 		pci_priv->msi_ep_base_data = msi_config->users[0].base_vector;
2201 		cnss_pr_dbg("MSI-X base data is %d\n",
2202 			    pci_priv->msi_ep_base_data);
2203 		return 0;
2204 	}
2205 
2206 	msi_desc = irq_get_msi_desc(pci_dev->irq);
2207 	if (!msi_desc) {
2208 		cnss_pr_err("msi_desc is NULL!\n");
2209 		return -EINVAL;
2210 	}
2211 
2212 	pci_priv->msi_ep_base_data = msi_desc->msg.data;
2213 	cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
2214 
2215 	return 0;
2216 }
2217 
2218 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
2219 #define PLC_PCIE_NAME_LEN		14
2220 
2221 static struct cnss_plat_data *
2222 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2223 {
2224 	int plat_env_count = cnss_get_plat_env_count();
2225 	struct cnss_plat_data *plat_env;
2226 	struct cnss_pci_data *pci_priv;
2227 	int i = 0;
2228 
2229 	if (!driver_ops) {
2230 		cnss_pr_err("No cnss driver\n");
2231 		return NULL;
2232 	}
2233 
2234 	for (i = 0; i < plat_env_count; i++) {
2235 		plat_env = cnss_get_plat_env(i);
2236 		if (!plat_env)
2237 			continue;
2238 		if (driver_ops->name && plat_env->pld_bus_ops_name) {
2239 			/* driver_ops->name = PLD_PCIE_OPS_NAME
2240 			 * #ifdef MULTI_IF_NAME
2241 			 * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME
2242 			 * #else
2243 			 * #define PLD_PCIE_OPS_NAME "pld_pcie"
2244 			 * #endif
2245 			 */
2246 			if (memcmp(driver_ops->name,
2247 				   plat_env->pld_bus_ops_name,
2248 				   PLC_PCIE_NAME_LEN) == 0)
2249 				return plat_env;
2250 		}
2251 	}
2252 
2253 	cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name);
2254 	/* in the dual wlan card case, the pld_bus_ops_name from dts
2255 	 * and driver_ops-> name from ko should match, otherwise
2256 	 * wlanhost driver don't know which plat_env it can use;
2257 	 * if doesn't find the match one, then get first available
2258 	 * instance insteadly.
2259 	 */
2260 
2261 	for (i = 0; i < plat_env_count; i++) {
2262 		plat_env = cnss_get_plat_env(i);
2263 
2264 		if (!plat_env)
2265 			continue;
2266 
2267 		pci_priv = plat_env->bus_priv;
2268 		if (!pci_priv) {
2269 			cnss_pr_err("pci_priv is NULL\n");
2270 			continue;
2271 		}
2272 
2273 		if (driver_ops == pci_priv->driver_ops)
2274 			return plat_env;
2275 	}
2276 	/* Doesn't find the existing instance,
2277 	 * so return the fist empty instance
2278 	 */
2279 	for (i = 0; i < plat_env_count; i++) {
2280 		plat_env = cnss_get_plat_env(i);
2281 
2282 		if (!plat_env)
2283 			continue;
2284 		pci_priv = plat_env->bus_priv;
2285 		if (!pci_priv) {
2286 			cnss_pr_err("pci_priv is NULL\n");
2287 			continue;
2288 		}
2289 
2290 		if (!pci_priv->driver_ops)
2291 			return plat_env;
2292 	}
2293 
2294 	return NULL;
2295 }
2296 
2297 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2298 {
2299 	int ret = 0;
2300 	u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG;
2301 	struct cnss_plat_data *plat_priv;
2302 
2303 	if (!pci_priv) {
2304 		cnss_pr_err("pci_priv is NULL\n");
2305 		return -ENODEV;
2306 	}
2307 
2308 	plat_priv = pci_priv->plat_priv;
2309 	/**
2310 	 * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0,
2311 	 * wlan fw will use the hardcode 7 as the qrtr node id.
2312 	 * in the dual Hastings case, we will read qrtr node id
2313 	 * from device tree and pass to get plat_priv->qrtr_node_id,
2314 	 * which always is not zero. And then store this new value
2315 	 * to pcie register, wlan fw will read out this qrtr node id
2316 	 * from this register and overwrite to the hardcode one
2317 	 * while do initialization for ipc router.
2318 	 * without this change, two Hastings will use the same
2319 	 * qrtr node instance id, which will mess up qmi message
2320 	 * exchange. According to qrtr spec, every node should
2321 	 * have unique qrtr node id
2322 	 */
2323 	if (plat_priv->device_id == QCA6390_DEVICE_ID &&
2324 	    plat_priv->qrtr_node_id) {
2325 		u32 val;
2326 
2327 		cnss_pr_dbg("write 0x%x to SCRATCH REG\n",
2328 			    plat_priv->qrtr_node_id);
2329 		ret = cnss_pci_reg_write(pci_priv, scratch,
2330 					 plat_priv->qrtr_node_id);
2331 		if (ret) {
2332 			cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2333 				    scratch, ret);
2334 			goto out;
2335 		}
2336 
2337 		ret = cnss_pci_reg_read(pci_priv, scratch, &val);
2338 		if (ret) {
2339 			cnss_pr_err("Failed to read SCRATCH REG");
2340 			goto out;
2341 		}
2342 
2343 		if (val != plat_priv->qrtr_node_id) {
2344 			cnss_pr_err("qrtr node id write to register doesn't match with readout value");
2345 			return -ERANGE;
2346 		}
2347 	}
2348 out:
2349 	return ret;
2350 }
2351 #else
2352 static struct cnss_plat_data *
2353 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2354 {
2355 	return cnss_bus_dev_to_plat_priv(NULL);
2356 }
2357 
2358 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2359 {
2360 	return 0;
2361 }
2362 #endif
2363 
2364 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
2365 {
2366 	int ret = 0;
2367 	struct cnss_plat_data *plat_priv;
2368 	unsigned int timeout = 0;
2369 	int retry = 0;
2370 
2371 	if (!pci_priv) {
2372 		cnss_pr_err("pci_priv is NULL\n");
2373 		return -ENODEV;
2374 	}
2375 
2376 	plat_priv = pci_priv->plat_priv;
2377 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2378 		return 0;
2379 
2380 	if (MHI_TIMEOUT_OVERWRITE_MS)
2381 		pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
2382 	cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
2383 
2384 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
2385 	if (ret)
2386 		return ret;
2387 
2388 	timeout = pci_priv->mhi_ctrl->timeout_ms;
2389 	/* For non-perf builds the timeout is 10 (default) * 6 seconds */
2390 	if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2391 		pci_priv->mhi_ctrl->timeout_ms *= 6;
2392 	else /* For perf builds the timeout is 10 (default) * 3 seconds */
2393 		pci_priv->mhi_ctrl->timeout_ms *= 3;
2394 
2395 retry:
2396 	ret = cnss_pci_store_qrtr_node_id(pci_priv);
2397 	if (ret) {
2398 		if (retry++ < REG_RETRY_MAX_TIMES)
2399 			goto retry;
2400 		else
2401 			return ret;
2402 	}
2403 
2404 	/* Start the timer to dump MHI/PBL/SBL debug data periodically */
2405 	mod_timer(&pci_priv->boot_debug_timer,
2406 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
2407 
2408 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
2409 	del_timer_sync(&pci_priv->boot_debug_timer);
2410 	if (ret == 0)
2411 		cnss_wlan_adsp_pc_enable(pci_priv, false);
2412 
2413 	pci_priv->mhi_ctrl->timeout_ms = timeout;
2414 
2415 	if (ret == -ETIMEDOUT) {
2416 		/* This is a special case needs to be handled that if MHI
2417 		 * power on returns -ETIMEDOUT, controller needs to take care
2418 		 * the cleanup by calling MHI power down. Force to set the bit
2419 		 * for driver internal MHI state to make sure it can be handled
2420 		 * properly later.
2421 		 */
2422 		set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2423 		ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
2424 	} else if (!ret) {
2425 		/* kernel may allocate a dummy vector before request_irq and
2426 		 * then allocate a real vector when request_irq is called.
2427 		 * So get msi_data here again to avoid spurious interrupt
2428 		 * as msi_data will configured to srngs.
2429 		 */
2430 		if (cnss_pci_is_one_msi(pci_priv))
2431 			ret = cnss_pci_config_msi_data(pci_priv);
2432 	}
2433 
2434 	return ret;
2435 }
2436 
2437 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
2438 {
2439 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2440 
2441 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2442 		return;
2443 
2444 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
2445 		cnss_pr_dbg("MHI is already powered off\n");
2446 		return;
2447 	}
2448 	cnss_wlan_adsp_pc_enable(pci_priv, true);
2449 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
2450 	cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
2451 
2452 	if (!pci_priv->pci_link_down_ind)
2453 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
2454 	else
2455 		cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
2456 }
2457 
2458 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
2459 {
2460 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2461 
2462 	if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2463 		return;
2464 
2465 	if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
2466 		cnss_pr_dbg("MHI is already deinited\n");
2467 		return;
2468 	}
2469 
2470 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
2471 }
2472 
2473 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
2474 					bool set_vddd4blow, bool set_shutdown,
2475 					bool do_force_wake)
2476 {
2477 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2478 	int ret;
2479 	u32 val;
2480 
2481 	if (!plat_priv->set_wlaon_pwr_ctrl)
2482 		return;
2483 
2484 	if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
2485 	    pci_priv->pci_link_down_ind)
2486 		return;
2487 
2488 	if (do_force_wake)
2489 		if (cnss_pci_force_wake_get(pci_priv))
2490 			return;
2491 
2492 	ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
2493 	if (ret) {
2494 		cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
2495 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2496 		goto force_wake_put;
2497 	}
2498 
2499 	cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
2500 		    WLAON_QFPROM_PWR_CTRL_REG, val);
2501 
2502 	if (set_vddd4blow)
2503 		val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2504 	else
2505 		val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2506 
2507 	if (set_shutdown)
2508 		val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2509 	else
2510 		val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2511 
2512 	ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
2513 	if (ret) {
2514 		cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2515 			    WLAON_QFPROM_PWR_CTRL_REG, ret);
2516 		goto force_wake_put;
2517 	}
2518 
2519 	cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
2520 		    WLAON_QFPROM_PWR_CTRL_REG);
2521 
2522 	if (set_shutdown)
2523 		usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
2524 			     WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
2525 
2526 force_wake_put:
2527 	if (do_force_wake)
2528 		cnss_pci_force_wake_put(pci_priv);
2529 }
2530 
2531 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
2532 					 u64 *time_us)
2533 {
2534 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2535 	u32 low, high;
2536 	u64 device_ticks;
2537 
2538 	if (!plat_priv->device_freq_hz) {
2539 		cnss_pr_err("Device time clock frequency is not valid\n");
2540 		return -EINVAL;
2541 	}
2542 
2543 	switch (pci_priv->device_id) {
2544 	case KIWI_DEVICE_ID:
2545 	case MANGO_DEVICE_ID:
2546 	case PEACH_DEVICE_ID:
2547 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
2548 		cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
2549 		break;
2550 	default:
2551 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
2552 		cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
2553 		break;
2554 	}
2555 
2556 	device_ticks = (u64)high << 32 | low;
2557 	do_div(device_ticks, plat_priv->device_freq_hz / 100000);
2558 	*time_us = device_ticks * 10;
2559 
2560 	return 0;
2561 }
2562 
2563 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
2564 {
2565 	switch (pci_priv->device_id) {
2566 	case KIWI_DEVICE_ID:
2567 	case MANGO_DEVICE_ID:
2568 	case PEACH_DEVICE_ID:
2569 		return;
2570 	default:
2571 		break;
2572 	}
2573 
2574 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2575 			   TIME_SYNC_ENABLE);
2576 }
2577 
2578 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
2579 {
2580 	switch (pci_priv->device_id) {
2581 	case KIWI_DEVICE_ID:
2582 	case MANGO_DEVICE_ID:
2583 	case PEACH_DEVICE_ID:
2584 		return;
2585 	default:
2586 		break;
2587 	}
2588 
2589 	cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2590 			   TIME_SYNC_CLEAR);
2591 }
2592 
2593 
2594 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
2595 					  u32 low, u32 high)
2596 {
2597 	u32 time_reg_low;
2598 	u32 time_reg_high;
2599 
2600 	switch (pci_priv->device_id) {
2601 	case KIWI_DEVICE_ID:
2602 	case MANGO_DEVICE_ID:
2603 	case PEACH_DEVICE_ID:
2604 		/* Use the next two shadow registers after host's usage */
2605 		time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
2606 				(pci_priv->plat_priv->num_shadow_regs_v3 *
2607 				 SHADOW_REG_LEN_BYTES);
2608 		time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
2609 		break;
2610 	default:
2611 		time_reg_low = PCIE_SHADOW_REG_VALUE_34;
2612 		time_reg_high = PCIE_SHADOW_REG_VALUE_35;
2613 		break;
2614 	}
2615 
2616 	cnss_pci_reg_write(pci_priv, time_reg_low, low);
2617 	cnss_pci_reg_write(pci_priv, time_reg_high, high);
2618 
2619 	cnss_pci_reg_read(pci_priv, time_reg_low, &low);
2620 	cnss_pci_reg_read(pci_priv, time_reg_high, &high);
2621 
2622 	cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
2623 		    time_reg_low, low, time_reg_high, high);
2624 }
2625 
2626 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
2627 {
2628 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2629 	struct device *dev = &pci_priv->pci_dev->dev;
2630 	unsigned long flags = 0;
2631 	u64 host_time_us, device_time_us, offset;
2632 	u32 low, high;
2633 	int ret;
2634 
2635 	ret = cnss_pci_prevent_l1(dev);
2636 	if (ret)
2637 		goto out;
2638 
2639 	ret = cnss_pci_force_wake_get(pci_priv);
2640 	if (ret)
2641 		goto allow_l1;
2642 
2643 	spin_lock_irqsave(&time_sync_lock, flags);
2644 	cnss_pci_clear_time_sync_counter(pci_priv);
2645 	cnss_pci_enable_time_sync_counter(pci_priv);
2646 	host_time_us = cnss_get_host_timestamp(plat_priv);
2647 	ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
2648 	cnss_pci_clear_time_sync_counter(pci_priv);
2649 	spin_unlock_irqrestore(&time_sync_lock, flags);
2650 	if (ret)
2651 		goto force_wake_put;
2652 
2653 	if (host_time_us < device_time_us) {
2654 		cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
2655 			    host_time_us, device_time_us);
2656 		ret = -EINVAL;
2657 		goto force_wake_put;
2658 	}
2659 
2660 	offset = host_time_us - device_time_us;
2661 	cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
2662 		    host_time_us, device_time_us, offset);
2663 
2664 	low = offset & 0xFFFFFFFF;
2665 	high = offset >> 32;
2666 
2667 	cnss_pci_time_sync_reg_update(pci_priv, low, high);
2668 
2669 force_wake_put:
2670 	cnss_pci_force_wake_put(pci_priv);
2671 allow_l1:
2672 	cnss_pci_allow_l1(dev);
2673 out:
2674 	return ret;
2675 }
2676 
2677 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
2678 {
2679 	struct cnss_pci_data *pci_priv =
2680 		container_of(work, struct cnss_pci_data, time_sync_work.work);
2681 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2682 	unsigned int time_sync_period_ms =
2683 		plat_priv->ctrl_params.time_sync_period;
2684 
2685 	if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
2686 		cnss_pr_dbg("Time sync is disabled\n");
2687 		return;
2688 	}
2689 
2690 	if (!time_sync_period_ms) {
2691 		cnss_pr_dbg("Skip time sync as time period is 0\n");
2692 		return;
2693 	}
2694 
2695 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
2696 		return;
2697 
2698 	if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
2699 		goto runtime_pm_put;
2700 
2701 	mutex_lock(&pci_priv->bus_lock);
2702 	cnss_pci_update_timestamp(pci_priv);
2703 	mutex_unlock(&pci_priv->bus_lock);
2704 	schedule_delayed_work(&pci_priv->time_sync_work,
2705 			      msecs_to_jiffies(time_sync_period_ms));
2706 
2707 runtime_pm_put:
2708 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
2709 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
2710 }
2711 
2712 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
2713 {
2714 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2715 
2716 	switch (pci_priv->device_id) {
2717 	case QCA6390_DEVICE_ID:
2718 	case QCA6490_DEVICE_ID:
2719 	case KIWI_DEVICE_ID:
2720 	case MANGO_DEVICE_ID:
2721 	case PEACH_DEVICE_ID:
2722 		break;
2723 	default:
2724 		return -EOPNOTSUPP;
2725 	}
2726 
2727 	if (!plat_priv->device_freq_hz) {
2728 		cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
2729 		return -EINVAL;
2730 	}
2731 
2732 	cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
2733 
2734 	return 0;
2735 }
2736 
2737 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
2738 {
2739 	switch (pci_priv->device_id) {
2740 	case QCA6390_DEVICE_ID:
2741 	case QCA6490_DEVICE_ID:
2742 	case KIWI_DEVICE_ID:
2743 	case MANGO_DEVICE_ID:
2744 	case PEACH_DEVICE_ID:
2745 		break;
2746 	default:
2747 		return;
2748 	}
2749 
2750 	cancel_delayed_work_sync(&pci_priv->time_sync_work);
2751 }
2752 
2753 int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv,
2754 				  unsigned long thermal_state,
2755 				  int tcdev_id)
2756 {
2757 	if (!pci_priv) {
2758 		cnss_pr_err("pci_priv is NULL!\n");
2759 		return -ENODEV;
2760 	}
2761 
2762 	if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) {
2763 		cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n");
2764 		return -EINVAL;
2765 	}
2766 
2767 	return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev,
2768 							 thermal_state,
2769 							 tcdev_id);
2770 }
2771 
2772 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
2773 				     unsigned int time_sync_period)
2774 {
2775 	struct cnss_plat_data *plat_priv;
2776 
2777 	if (!pci_priv)
2778 		return -ENODEV;
2779 
2780 	plat_priv = pci_priv->plat_priv;
2781 
2782 	cnss_pci_stop_time_sync_update(pci_priv);
2783 	plat_priv->ctrl_params.time_sync_period = time_sync_period;
2784 	cnss_pci_start_time_sync_update(pci_priv);
2785 	cnss_pr_dbg("WLAN time sync period %u ms\n",
2786 		    plat_priv->ctrl_params.time_sync_period);
2787 
2788 	return 0;
2789 }
2790 
2791 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
2792 {
2793 	int ret = 0;
2794 	struct cnss_plat_data *plat_priv;
2795 
2796 	if (!pci_priv)
2797 		return -ENODEV;
2798 
2799 	plat_priv = pci_priv->plat_priv;
2800 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2801 		cnss_pr_err("Reboot is in progress, skip driver probe\n");
2802 		return -EINVAL;
2803 	}
2804 
2805 	if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2806 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2807 		cnss_pr_dbg("Skip driver probe\n");
2808 		goto out;
2809 	}
2810 
2811 	if (!pci_priv->driver_ops) {
2812 		cnss_pr_err("driver_ops is NULL\n");
2813 		ret = -EINVAL;
2814 		goto out;
2815 	}
2816 
2817 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2818 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2819 		ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
2820 						   pci_priv->pci_device_id);
2821 		if (ret) {
2822 			cnss_pr_err("Failed to reinit host driver, err = %d\n",
2823 				    ret);
2824 			goto out;
2825 		}
2826 		complete(&plat_priv->recovery_complete);
2827 	} else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
2828 		ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
2829 						  pci_priv->pci_device_id);
2830 		if (ret) {
2831 			cnss_pr_err("Failed to probe host driver, err = %d\n",
2832 				    ret);
2833 			complete_all(&plat_priv->power_up_complete);
2834 			goto out;
2835 		}
2836 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
2837 		set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2838 		cnss_pci_free_blob_mem(pci_priv);
2839 		complete_all(&plat_priv->power_up_complete);
2840 	} else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
2841 			    &plat_priv->driver_state)) {
2842 		ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
2843 			pci_priv->pci_device_id);
2844 		if (ret) {
2845 			cnss_pr_err("Failed to idle restart host driver, err = %d\n",
2846 				    ret);
2847 			plat_priv->power_up_error = ret;
2848 			complete_all(&plat_priv->power_up_complete);
2849 			goto out;
2850 		}
2851 		clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
2852 		complete_all(&plat_priv->power_up_complete);
2853 	} else {
2854 		complete(&plat_priv->power_up_complete);
2855 	}
2856 
2857 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2858 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2859 		__pm_relax(plat_priv->recovery_ws);
2860 	}
2861 
2862 	cnss_pci_start_time_sync_update(pci_priv);
2863 
2864 	return 0;
2865 
2866 out:
2867 	return ret;
2868 }
2869 
2870 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
2871 {
2872 	struct cnss_plat_data *plat_priv;
2873 	int ret;
2874 
2875 	if (!pci_priv)
2876 		return -ENODEV;
2877 
2878 	plat_priv = pci_priv->plat_priv;
2879 
2880 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
2881 	    test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
2882 	    test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2883 		cnss_pr_dbg("Skip driver remove\n");
2884 		return 0;
2885 	}
2886 
2887 	if (!pci_priv->driver_ops) {
2888 		cnss_pr_err("driver_ops is NULL\n");
2889 		return -EINVAL;
2890 	}
2891 
2892 	cnss_pci_stop_time_sync_update(pci_priv);
2893 
2894 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
2895 	    test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
2896 		pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
2897 	} else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
2898 		pci_priv->driver_ops->remove(pci_priv->pci_dev);
2899 		clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
2900 	} else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2901 			    &plat_priv->driver_state)) {
2902 		ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
2903 		if (ret == -EAGAIN) {
2904 			clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2905 				  &plat_priv->driver_state);
2906 			return ret;
2907 		}
2908 	}
2909 
2910 	plat_priv->get_info_cb_ctx = NULL;
2911 	plat_priv->get_info_cb = NULL;
2912 
2913 	return 0;
2914 }
2915 
2916 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
2917 				      int modem_current_status)
2918 {
2919 	struct cnss_wlan_driver *driver_ops;
2920 
2921 	if (!pci_priv)
2922 		return -ENODEV;
2923 
2924 	driver_ops = pci_priv->driver_ops;
2925 	if (!driver_ops || !driver_ops->modem_status)
2926 		return -EINVAL;
2927 
2928 	driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
2929 
2930 	return 0;
2931 }
2932 
2933 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
2934 			   enum cnss_driver_status status)
2935 {
2936 	struct cnss_wlan_driver *driver_ops;
2937 
2938 	if (!pci_priv)
2939 		return -ENODEV;
2940 
2941 	driver_ops = pci_priv->driver_ops;
2942 	if (!driver_ops || !driver_ops->update_status)
2943 		return -EINVAL;
2944 
2945 	cnss_pr_dbg("Update driver status: %d\n", status);
2946 
2947 	driver_ops->update_status(pci_priv->pci_dev, status);
2948 
2949 	return 0;
2950 }
2951 
2952 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
2953 				   struct cnss_misc_reg *misc_reg,
2954 				   u32 misc_reg_size,
2955 				   char *reg_name)
2956 {
2957 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2958 	bool do_force_wake_put = true;
2959 	int i;
2960 
2961 	if (!misc_reg)
2962 		return;
2963 
2964 	if (in_interrupt() || irqs_disabled())
2965 		return;
2966 
2967 	if (cnss_pci_check_link_status(pci_priv))
2968 		return;
2969 
2970 	if (cnss_pci_force_wake_get(pci_priv)) {
2971 		/* Continue to dump when device has entered RDDM already */
2972 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
2973 			return;
2974 		do_force_wake_put = false;
2975 	}
2976 
2977 	cnss_pr_dbg("Start to dump %s registers\n", reg_name);
2978 
2979 	for (i = 0; i < misc_reg_size; i++) {
2980 		if (!test_bit(pci_priv->misc_reg_dev_mask,
2981 			      &misc_reg[i].dev_mask))
2982 			continue;
2983 
2984 		if (misc_reg[i].wr) {
2985 			if (misc_reg[i].offset ==
2986 			    QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
2987 			    i >= 1)
2988 				misc_reg[i].val =
2989 				QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
2990 				misc_reg[i - 1].val;
2991 			if (cnss_pci_reg_write(pci_priv,
2992 					       misc_reg[i].offset,
2993 					       misc_reg[i].val))
2994 				goto force_wake_put;
2995 			cnss_pr_vdbg("Write 0x%X to 0x%X\n",
2996 				     misc_reg[i].val,
2997 				     misc_reg[i].offset);
2998 
2999 		} else {
3000 			if (cnss_pci_reg_read(pci_priv,
3001 					      misc_reg[i].offset,
3002 					      &misc_reg[i].val))
3003 				goto force_wake_put;
3004 		}
3005 	}
3006 
3007 force_wake_put:
3008 	if (do_force_wake_put)
3009 		cnss_pci_force_wake_put(pci_priv);
3010 }
3011 
3012 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
3013 {
3014 	if (in_interrupt() || irqs_disabled())
3015 		return;
3016 
3017 	if (cnss_pci_check_link_status(pci_priv))
3018 		return;
3019 
3020 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
3021 			       WCSS_REG_SIZE, "wcss");
3022 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
3023 			       PCIE_REG_SIZE, "pcie");
3024 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
3025 			       WLAON_REG_SIZE, "wlaon");
3026 	cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
3027 			       SYSPM_REG_SIZE, "syspm");
3028 }
3029 
3030 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
3031 {
3032 	int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
3033 	u32 reg_offset;
3034 	bool do_force_wake_put = true;
3035 
3036 	if (in_interrupt() || irqs_disabled())
3037 		return;
3038 
3039 	if (cnss_pci_check_link_status(pci_priv))
3040 		return;
3041 
3042 	if (!pci_priv->debug_reg) {
3043 		pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
3044 						   sizeof(*pci_priv->debug_reg)
3045 						   * array_size, GFP_KERNEL);
3046 		if (!pci_priv->debug_reg)
3047 			return;
3048 	}
3049 
3050 	if (cnss_pci_force_wake_get(pci_priv))
3051 		do_force_wake_put = false;
3052 
3053 	cnss_pr_dbg("Start to dump shadow registers\n");
3054 
3055 	for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
3056 		reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
3057 		pci_priv->debug_reg[j].offset = reg_offset;
3058 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3059 				      &pci_priv->debug_reg[j].val))
3060 			goto force_wake_put;
3061 	}
3062 
3063 	for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
3064 		reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
3065 		pci_priv->debug_reg[j].offset = reg_offset;
3066 		if (cnss_pci_reg_read(pci_priv, reg_offset,
3067 				      &pci_priv->debug_reg[j].val))
3068 			goto force_wake_put;
3069 	}
3070 
3071 force_wake_put:
3072 	if (do_force_wake_put)
3073 		cnss_pci_force_wake_put(pci_priv);
3074 }
3075 
3076 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
3077 {
3078 	int ret = 0;
3079 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3080 
3081 	ret = cnss_power_on_device(plat_priv, false);
3082 	if (ret) {
3083 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3084 		goto out;
3085 	}
3086 
3087 	ret = cnss_resume_pci_link(pci_priv);
3088 	if (ret) {
3089 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3090 		goto power_off;
3091 	}
3092 
3093 	ret = cnss_pci_call_driver_probe(pci_priv);
3094 	if (ret)
3095 		goto suspend_link;
3096 
3097 	return 0;
3098 suspend_link:
3099 	cnss_suspend_pci_link(pci_priv);
3100 power_off:
3101 	cnss_power_off_device(plat_priv);
3102 out:
3103 	return ret;
3104 }
3105 
3106 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
3107 {
3108 	int ret = 0;
3109 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3110 
3111 	cnss_pci_pm_runtime_resume(pci_priv);
3112 
3113 	ret = cnss_pci_call_driver_remove(pci_priv);
3114 	if (ret == -EAGAIN)
3115 		goto out;
3116 
3117 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3118 				   CNSS_BUS_WIDTH_NONE);
3119 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3120 	cnss_pci_set_auto_suspended(pci_priv, 0);
3121 
3122 	ret = cnss_suspend_pci_link(pci_priv);
3123 	if (ret)
3124 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3125 
3126 	cnss_power_off_device(plat_priv);
3127 
3128 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3129 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3130 
3131 out:
3132 	return ret;
3133 }
3134 
3135 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
3136 {
3137 	if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
3138 		pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
3139 }
3140 
3141 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
3142 {
3143 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3144 	struct cnss_ramdump_info *ramdump_info;
3145 
3146 	ramdump_info = &plat_priv->ramdump_info;
3147 	if (!ramdump_info->ramdump_size)
3148 		return -EINVAL;
3149 
3150 	return cnss_do_ramdump(plat_priv);
3151 }
3152 
3153 static void cnss_get_driver_mode_update_fw_name(struct cnss_plat_data *plat_priv)
3154 {
3155 	struct cnss_pci_data *pci_priv;
3156 	struct cnss_wlan_driver *driver_ops;
3157 
3158 	pci_priv = plat_priv->bus_priv;
3159 	driver_ops = pci_priv->driver_ops;
3160 
3161 	if (driver_ops && driver_ops->get_driver_mode) {
3162 		plat_priv->driver_mode = driver_ops->get_driver_mode();
3163 		cnss_pci_update_fw_name(pci_priv);
3164 		cnss_pr_dbg("New driver mode is %d", plat_priv->driver_mode);
3165 	}
3166 }
3167 
3168 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
3169 {
3170 	int ret = 0;
3171 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3172 	unsigned int timeout;
3173 	int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
3174 	int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
3175 
3176 	if (plat_priv->ramdump_info_v2.dump_data_valid) {
3177 		cnss_pci_clear_dump_info(pci_priv);
3178 		cnss_pci_power_off_mhi(pci_priv);
3179 		cnss_suspend_pci_link(pci_priv);
3180 		cnss_pci_deinit_mhi(pci_priv);
3181 		cnss_power_off_device(plat_priv);
3182 	}
3183 
3184 	/* Clear QMI send usage count during every power up */
3185 	pci_priv->qmi_send_usage_count = 0;
3186 
3187 	plat_priv->power_up_error = 0;
3188 
3189 	cnss_get_driver_mode_update_fw_name(plat_priv);
3190 retry:
3191 	ret = cnss_power_on_device(plat_priv, false);
3192 	if (ret) {
3193 		cnss_pr_err("Failed to power on device, err = %d\n", ret);
3194 		goto out;
3195 	}
3196 
3197 	ret = cnss_resume_pci_link(pci_priv);
3198 	if (ret) {
3199 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3200 		cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3201 			    cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
3202 		if (test_bit(IGNORE_PCI_LINK_FAILURE,
3203 			     &plat_priv->ctrl_params.quirks)) {
3204 			cnss_pr_dbg("Ignore PCI link resume failure\n");
3205 			ret = 0;
3206 			goto out;
3207 		}
3208 		if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
3209 			cnss_power_off_device(plat_priv);
3210 			/* Force toggle BT_EN GPIO low */
3211 			if (retry == POWER_ON_RETRY_MAX_TIMES) {
3212 				cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
3213 					    retry, bt_en_gpio);
3214 				if (bt_en_gpio >= 0)
3215 					gpio_direction_output(bt_en_gpio, 0);
3216 				cnss_pr_dbg("BT_EN GPIO val: %d\n",
3217 					    gpio_get_value(bt_en_gpio));
3218 			}
3219 			cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
3220 			cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3221 				    cnss_get_input_gpio_value(plat_priv,
3222 							      sw_ctrl_gpio));
3223 			msleep(POWER_ON_RETRY_DELAY_MS * retry);
3224 			goto retry;
3225 		}
3226 		/* Assert when it reaches maximum retries */
3227 		CNSS_ASSERT(0);
3228 		goto power_off;
3229 	}
3230 
3231 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
3232 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
3233 
3234 	ret = cnss_pci_start_mhi(pci_priv);
3235 	if (ret) {
3236 		cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
3237 		if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
3238 		    !pci_priv->pci_link_down_ind && timeout) {
3239 			/* Start recovery directly for MHI start failures */
3240 			cnss_schedule_recovery(&pci_priv->pci_dev->dev,
3241 					       CNSS_REASON_DEFAULT);
3242 		}
3243 		return 0;
3244 	}
3245 
3246 	if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
3247 		clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
3248 		clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3249 		return 0;
3250 	}
3251 
3252 	cnss_set_pin_connect_status(plat_priv);
3253 
3254 	if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
3255 		ret = cnss_pci_call_driver_probe(pci_priv);
3256 		if (ret)
3257 			goto stop_mhi;
3258 	} else if (timeout) {
3259 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
3260 			timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
3261 		else
3262 			timeout += WLAN_MISSION_MODE_TIMEOUT;
3263 		mod_timer(&plat_priv->fw_boot_timer,
3264 			  jiffies + msecs_to_jiffies(timeout));
3265 	}
3266 
3267 	return 0;
3268 
3269 stop_mhi:
3270 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
3271 	cnss_pci_power_off_mhi(pci_priv);
3272 	cnss_suspend_pci_link(pci_priv);
3273 	cnss_pci_deinit_mhi(pci_priv);
3274 power_off:
3275 	cnss_power_off_device(plat_priv);
3276 out:
3277 	return ret;
3278 }
3279 
3280 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
3281 {
3282 	int ret = 0;
3283 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3284 	int do_force_wake = true;
3285 
3286 	cnss_pci_pm_runtime_resume(pci_priv);
3287 
3288 	ret = cnss_pci_call_driver_remove(pci_priv);
3289 	if (ret == -EAGAIN)
3290 		goto out;
3291 
3292 	cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3293 				   CNSS_BUS_WIDTH_NONE);
3294 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3295 	cnss_pci_set_auto_suspended(pci_priv, 0);
3296 
3297 	if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
3298 	     test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3299 	     test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
3300 	     test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
3301 	     test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
3302 	    test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
3303 		del_timer(&pci_priv->dev_rddm_timer);
3304 		cnss_pci_collect_dump_info(pci_priv, false);
3305 
3306 		if (!plat_priv->recovery_enabled)
3307 			CNSS_ASSERT(0);
3308 	}
3309 
3310 	if (!cnss_is_device_powered_on(plat_priv)) {
3311 		cnss_pr_dbg("Device is already powered off, ignore\n");
3312 		goto skip_power_off;
3313 	}
3314 
3315 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3316 		do_force_wake = false;
3317 	cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
3318 
3319 	/* FBC image will be freed after powering off MHI, so skip
3320 	 * if RAM dump data is still valid.
3321 	 */
3322 	if (plat_priv->ramdump_info_v2.dump_data_valid)
3323 		goto skip_power_off;
3324 
3325 	cnss_pci_power_off_mhi(pci_priv);
3326 	ret = cnss_suspend_pci_link(pci_priv);
3327 	if (ret)
3328 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3329 	cnss_pci_deinit_mhi(pci_priv);
3330 	cnss_power_off_device(plat_priv);
3331 
3332 skip_power_off:
3333 	pci_priv->remap_window = 0;
3334 
3335 	clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
3336 	clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
3337 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3338 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
3339 		clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
3340 		pci_priv->pci_link_down_ind = false;
3341 	}
3342 	clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3343 	clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3344 	memset(&print_optimize, 0, sizeof(print_optimize));
3345 
3346 out:
3347 	return ret;
3348 }
3349 
3350 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
3351 {
3352 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3353 
3354 	set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3355 	cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
3356 		    plat_priv->driver_state);
3357 
3358 	cnss_pci_collect_dump_info(pci_priv, true);
3359 	clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3360 }
3361 
3362 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
3363 {
3364 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3365 	struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3366 	struct cnss_dump_data *dump_data = &info_v2->dump_data;
3367 	struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3368 	int ret = 0;
3369 
3370 	if (!info_v2->dump_data_valid || !dump_seg ||
3371 	    dump_data->nentries == 0)
3372 		return 0;
3373 
3374 	ret = cnss_do_elf_ramdump(plat_priv);
3375 
3376 	cnss_pci_clear_dump_info(pci_priv);
3377 	cnss_pci_power_off_mhi(pci_priv);
3378 	cnss_suspend_pci_link(pci_priv);
3379 	cnss_pci_deinit_mhi(pci_priv);
3380 	cnss_power_off_device(plat_priv);
3381 
3382 	return ret;
3383 }
3384 
3385 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
3386 {
3387 	int ret = 0;
3388 
3389 	if (!pci_priv) {
3390 		cnss_pr_err("pci_priv is NULL\n");
3391 		return -ENODEV;
3392 	}
3393 
3394 	switch (pci_priv->device_id) {
3395 	case QCA6174_DEVICE_ID:
3396 		ret = cnss_qca6174_powerup(pci_priv);
3397 		break;
3398 	case QCA6290_DEVICE_ID:
3399 	case QCA6390_DEVICE_ID:
3400 	case QCN7605_DEVICE_ID:
3401 	case QCA6490_DEVICE_ID:
3402 	case KIWI_DEVICE_ID:
3403 	case MANGO_DEVICE_ID:
3404 	case PEACH_DEVICE_ID:
3405 		ret = cnss_qca6290_powerup(pci_priv);
3406 		break;
3407 	default:
3408 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3409 			    pci_priv->device_id);
3410 		ret = -ENODEV;
3411 	}
3412 
3413 	return ret;
3414 }
3415 
3416 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
3417 {
3418 	int ret = 0;
3419 
3420 	if (!pci_priv) {
3421 		cnss_pr_err("pci_priv is NULL\n");
3422 		return -ENODEV;
3423 	}
3424 
3425 	switch (pci_priv->device_id) {
3426 	case QCA6174_DEVICE_ID:
3427 		ret = cnss_qca6174_shutdown(pci_priv);
3428 		break;
3429 	case QCA6290_DEVICE_ID:
3430 	case QCA6390_DEVICE_ID:
3431 	case QCN7605_DEVICE_ID:
3432 	case QCA6490_DEVICE_ID:
3433 	case KIWI_DEVICE_ID:
3434 	case MANGO_DEVICE_ID:
3435 	case PEACH_DEVICE_ID:
3436 		ret = cnss_qca6290_shutdown(pci_priv);
3437 		break;
3438 	default:
3439 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3440 			    pci_priv->device_id);
3441 		ret = -ENODEV;
3442 	}
3443 
3444 	return ret;
3445 }
3446 
3447 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
3448 {
3449 	int ret = 0;
3450 
3451 	if (!pci_priv) {
3452 		cnss_pr_err("pci_priv is NULL\n");
3453 		return -ENODEV;
3454 	}
3455 
3456 	switch (pci_priv->device_id) {
3457 	case QCA6174_DEVICE_ID:
3458 		cnss_qca6174_crash_shutdown(pci_priv);
3459 		break;
3460 	case QCA6290_DEVICE_ID:
3461 	case QCA6390_DEVICE_ID:
3462 	case QCN7605_DEVICE_ID:
3463 	case QCA6490_DEVICE_ID:
3464 	case KIWI_DEVICE_ID:
3465 	case MANGO_DEVICE_ID:
3466 	case PEACH_DEVICE_ID:
3467 		cnss_qca6290_crash_shutdown(pci_priv);
3468 		break;
3469 	default:
3470 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3471 			    pci_priv->device_id);
3472 		ret = -ENODEV;
3473 	}
3474 
3475 	return ret;
3476 }
3477 
3478 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
3479 {
3480 	int ret = 0;
3481 
3482 	if (!pci_priv) {
3483 		cnss_pr_err("pci_priv is NULL\n");
3484 		return -ENODEV;
3485 	}
3486 
3487 	switch (pci_priv->device_id) {
3488 	case QCA6174_DEVICE_ID:
3489 		ret = cnss_qca6174_ramdump(pci_priv);
3490 		break;
3491 	case QCA6290_DEVICE_ID:
3492 	case QCA6390_DEVICE_ID:
3493 	case QCN7605_DEVICE_ID:
3494 	case QCA6490_DEVICE_ID:
3495 	case KIWI_DEVICE_ID:
3496 	case MANGO_DEVICE_ID:
3497 	case PEACH_DEVICE_ID:
3498 		ret = cnss_qca6290_ramdump(pci_priv);
3499 		break;
3500 	default:
3501 		cnss_pr_err("Unknown device_id found: 0x%x\n",
3502 			    pci_priv->device_id);
3503 		ret = -ENODEV;
3504 	}
3505 
3506 	return ret;
3507 }
3508 
3509 int cnss_pci_is_drv_connected(struct device *dev)
3510 {
3511 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
3512 
3513 	if (!pci_priv)
3514 		return -ENODEV;
3515 
3516 	return pci_priv->drv_connected_last;
3517 }
3518 EXPORT_SYMBOL(cnss_pci_is_drv_connected);
3519 
3520 static void cnss_wlan_reg_driver_work(struct work_struct *work)
3521 {
3522 	struct cnss_plat_data *plat_priv =
3523 	container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
3524 	struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
3525 	struct cnss_cal_info *cal_info;
3526 	unsigned int timeout;
3527 
3528 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
3529 		return;
3530 
3531 	if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
3532 		goto reg_driver;
3533 	} else {
3534 		if (plat_priv->charger_mode) {
3535 			cnss_pr_err("Ignore calibration timeout in charger mode\n");
3536 			return;
3537 		}
3538 		if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
3539 			      &plat_priv->driver_state)) {
3540 			timeout = cnss_get_timeout(plat_priv,
3541 						   CNSS_TIMEOUT_CALIBRATION);
3542 			cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
3543 				    timeout / 1000);
3544 			schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3545 					      msecs_to_jiffies(timeout));
3546 			return;
3547 		}
3548 
3549 		del_timer(&plat_priv->fw_boot_timer);
3550 		if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
3551 		    !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3552 			cnss_pr_err("Timeout waiting for calibration to complete\n");
3553 			CNSS_ASSERT(0);
3554 		}
3555 		cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
3556 		if (!cal_info)
3557 			return;
3558 		cal_info->cal_status = CNSS_CAL_TIMEOUT;
3559 		cnss_driver_event_post(plat_priv,
3560 				       CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
3561 				       0, cal_info);
3562 	}
3563 reg_driver:
3564 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3565 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3566 		return;
3567 	}
3568 	reinit_completion(&plat_priv->power_up_complete);
3569 	cnss_driver_event_post(plat_priv,
3570 			       CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3571 			       CNSS_EVENT_SYNC_UNKILLABLE,
3572 			       pci_priv->driver_ops);
3573 }
3574 
3575 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
3576 {
3577 	int ret = 0;
3578 	struct cnss_plat_data *plat_priv;
3579 	struct cnss_pci_data *pci_priv;
3580 	const struct pci_device_id *id_table = driver_ops->id_table;
3581 	unsigned int timeout;
3582 
3583 	if (!cnss_check_driver_loading_allowed()) {
3584 		cnss_pr_info("No cnss2 dtsi entry present");
3585 		return -ENODEV;
3586 	}
3587 
3588 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3589 
3590 	if (!plat_priv) {
3591 		cnss_pr_buf("plat_priv is not ready for register driver\n");
3592 		return -EAGAIN;
3593 	}
3594 
3595 	pci_priv = plat_priv->bus_priv;
3596 	if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
3597 		while (id_table && id_table->device) {
3598 			if (plat_priv->device_id == id_table->device) {
3599 				if (plat_priv->device_id == KIWI_DEVICE_ID &&
3600 				    driver_ops->chip_version != 2) {
3601 					cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
3602 					return -ENODEV;
3603 				}
3604 				cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
3605 					     id_table->device);
3606 				plat_priv->driver_ops = driver_ops;
3607 				return 0;
3608 			}
3609 			id_table++;
3610 		}
3611 		return -ENODEV;
3612 	}
3613 
3614 	if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
3615 		cnss_pr_info("pci probe not yet done for register driver\n");
3616 		return -EAGAIN;
3617 	}
3618 
3619 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
3620 		cnss_pr_err("Driver has already registered\n");
3621 		return -EEXIST;
3622 	}
3623 
3624 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3625 		cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3626 		return -EINVAL;
3627 	}
3628 
3629 	if (!id_table || !pci_dev_present(id_table)) {
3630 		/* id_table pointer will move from pci_dev_present(),
3631 		 * so check again using local pointer.
3632 		 */
3633 		id_table = driver_ops->id_table;
3634 		while (id_table && id_table->vendor) {
3635 			cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
3636 				     id_table->device);
3637 			id_table++;
3638 		}
3639 		cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
3640 			    pci_priv->device_id);
3641 		return -ENODEV;
3642 	}
3643 
3644 	if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
3645 	    driver_ops->chip_version != plat_priv->device_version.major_version) {
3646 		cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
3647 			    driver_ops->chip_version,
3648 			    plat_priv->device_version.major_version);
3649 		return -ENODEV;
3650 	}
3651 
3652 	cnss_get_driver_mode_update_fw_name(plat_priv);
3653 	set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
3654 
3655 	if (!plat_priv->cbc_enabled ||
3656 	    test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
3657 		goto register_driver;
3658 
3659 	pci_priv->driver_ops = driver_ops;
3660 	/* If Cold Boot Calibration is enabled, it is the 1st step in init
3661 	 * sequence.CBC is done on file system_ready trigger. Qcacld will be
3662 	 * loaded from vendor_modprobe.sh at early boot and must be deferred
3663 	 * until CBC is complete
3664 	 */
3665 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
3666 	INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
3667 			  cnss_wlan_reg_driver_work);
3668 	schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3669 			      msecs_to_jiffies(timeout));
3670 	cnss_pr_info("WLAN register driver deferred for Calibration\n");
3671 	return 0;
3672 register_driver:
3673 	reinit_completion(&plat_priv->power_up_complete);
3674 	ret = cnss_driver_event_post(plat_priv,
3675 				     CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3676 				     CNSS_EVENT_SYNC_UNKILLABLE,
3677 				     driver_ops);
3678 
3679 	return ret;
3680 }
3681 EXPORT_SYMBOL(cnss_wlan_register_driver);
3682 
3683 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
3684 {
3685 	struct cnss_plat_data *plat_priv;
3686 	int ret = 0;
3687 	unsigned int timeout;
3688 
3689 	plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3690 	if (!plat_priv) {
3691 		cnss_pr_err("plat_priv is NULL\n");
3692 		return;
3693 	}
3694 
3695 	mutex_lock(&plat_priv->driver_ops_lock);
3696 
3697 	if (plat_priv->device_id == QCA6174_DEVICE_ID)
3698 		goto skip_wait_power_up;
3699 
3700 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
3701 	ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
3702 					  msecs_to_jiffies(timeout));
3703 	if (!ret) {
3704 		cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
3705 			    timeout);
3706 		CNSS_ASSERT(0);
3707 	}
3708 
3709 skip_wait_power_up:
3710 	if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3711 	    !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3712 		goto skip_wait_recovery;
3713 
3714 	reinit_completion(&plat_priv->recovery_complete);
3715 	timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
3716 	ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
3717 					  msecs_to_jiffies(timeout));
3718 	if (!ret) {
3719 		cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
3720 			    timeout);
3721 		CNSS_ASSERT(0);
3722 	}
3723 
3724 skip_wait_recovery:
3725 	cnss_driver_event_post(plat_priv,
3726 			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3727 			       CNSS_EVENT_SYNC_UNKILLABLE, NULL);
3728 
3729 	mutex_unlock(&plat_priv->driver_ops_lock);
3730 }
3731 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
3732 
3733 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
3734 				  void *data)
3735 {
3736 	int ret = 0;
3737 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3738 
3739 	if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3740 		cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
3741 		return -EINVAL;
3742 	}
3743 
3744 	set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3745 	pci_priv->driver_ops = data;
3746 
3747 	ret = cnss_pci_dev_powerup(pci_priv);
3748 	if (ret) {
3749 		clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3750 		pci_priv->driver_ops = NULL;
3751 	} else {
3752 		set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3753 	}
3754 
3755 	return ret;
3756 }
3757 
3758 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
3759 {
3760 	struct cnss_plat_data *plat_priv;
3761 
3762 	if (!pci_priv)
3763 		return -EINVAL;
3764 
3765 	plat_priv = pci_priv->plat_priv;
3766 	set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3767 	cnss_pci_dev_shutdown(pci_priv);
3768 	pci_priv->driver_ops = NULL;
3769 	clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3770 
3771 	return 0;
3772 }
3773 
3774 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
3775 {
3776 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3777 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3778 	int ret = 0;
3779 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3780 
3781 	pm_message_t state = { .event = PM_EVENT_SUSPEND };
3782 
3783 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3784 	    driver_ops && driver_ops->suspend) {
3785 		ret = driver_ops->suspend(pci_dev, state);
3786 		if (ret) {
3787 			cnss_pr_err("Failed to suspend host driver, err = %d\n",
3788 				    ret);
3789 			ret = -EAGAIN;
3790 		}
3791 	}
3792 
3793 	return ret;
3794 }
3795 
3796 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
3797 {
3798 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3799 	struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3800 	int ret = 0;
3801 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3802 
3803 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3804 	    driver_ops && driver_ops->resume) {
3805 		ret = driver_ops->resume(pci_dev);
3806 		if (ret)
3807 			cnss_pr_err("Failed to resume host driver, err = %d\n",
3808 				    ret);
3809 	}
3810 
3811 	return ret;
3812 }
3813 
3814 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
3815 {
3816 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3817 	int ret = 0;
3818 
3819 	if (pci_priv->pci_link_state == PCI_LINK_DOWN)
3820 		goto out;
3821 
3822 	if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
3823 		ret = -EAGAIN;
3824 		goto out;
3825 	}
3826 
3827 	if (pci_priv->drv_connected_last)
3828 		goto skip_disable_pci;
3829 
3830 	pci_clear_master(pci_dev);
3831 	cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
3832 	pci_disable_device(pci_dev);
3833 
3834 	ret = pci_set_power_state(pci_dev, PCI_D3hot);
3835 	if (ret)
3836 		cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
3837 
3838 skip_disable_pci:
3839 	if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
3840 		ret = -EAGAIN;
3841 		goto resume_mhi;
3842 	}
3843 	pci_priv->pci_link_state = PCI_LINK_DOWN;
3844 
3845 	return 0;
3846 
3847 resume_mhi:
3848 	if (!pci_is_enabled(pci_dev))
3849 		if (pci_enable_device(pci_dev))
3850 			cnss_pr_err("Failed to enable PCI device\n");
3851 	if (pci_priv->saved_state)
3852 		cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
3853 	pci_set_master(pci_dev);
3854 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3855 out:
3856 	return ret;
3857 }
3858 
3859 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
3860 {
3861 	struct pci_dev *pci_dev = pci_priv->pci_dev;
3862 	int ret = 0;
3863 
3864 	if (pci_priv->pci_link_state == PCI_LINK_UP)
3865 		goto out;
3866 
3867 	if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
3868 		cnss_fatal_err("Failed to resume PCI link from suspend\n");
3869 		cnss_pci_link_down(&pci_dev->dev);
3870 		ret = -EAGAIN;
3871 		goto out;
3872 	}
3873 
3874 	pci_priv->pci_link_state = PCI_LINK_UP;
3875 
3876 	if (pci_priv->drv_connected_last)
3877 		goto skip_enable_pci;
3878 
3879 	ret = pci_enable_device(pci_dev);
3880 	if (ret) {
3881 		cnss_pr_err("Failed to enable PCI device, err = %d\n",
3882 			    ret);
3883 		goto out;
3884 	}
3885 
3886 	if (pci_priv->saved_state)
3887 		cnss_set_pci_config_space(pci_priv,
3888 					  RESTORE_PCI_CONFIG_SPACE);
3889 	pci_set_master(pci_dev);
3890 
3891 skip_enable_pci:
3892 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
3893 out:
3894 	return ret;
3895 }
3896 
3897 static int cnss_pci_suspend(struct device *dev)
3898 {
3899 	int ret = 0;
3900 	struct pci_dev *pci_dev = to_pci_dev(dev);
3901 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3902 	struct cnss_plat_data *plat_priv;
3903 
3904 	if (!pci_priv)
3905 		goto out;
3906 
3907 	plat_priv = pci_priv->plat_priv;
3908 	if (!plat_priv)
3909 		goto out;
3910 
3911 	if (!cnss_is_device_powered_on(plat_priv))
3912 		goto out;
3913 
3914 	/* No mhi state bit set if only finish pcie enumeration,
3915 	 * so test_bit is not applicable to check if it is INIT state.
3916 	 */
3917 	if (pci_priv->mhi_state == CNSS_MHI_INIT) {
3918 		bool suspend = cnss_should_suspend_pwroff(pci_dev);
3919 
3920 		/* Do PCI link suspend and power off in the LPM case
3921 		 * if chipset didn't do that after pcie enumeration.
3922 		 */
3923 		if (!suspend) {
3924 			ret = cnss_suspend_pci_link(pci_priv);
3925 			if (ret)
3926 				cnss_pr_err("Failed to suspend PCI link, err = %d\n",
3927 					    ret);
3928 			cnss_power_off_device(plat_priv);
3929 			goto out;
3930 		}
3931 	}
3932 
3933 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
3934 	    pci_priv->drv_supported) {
3935 		pci_priv->drv_connected_last =
3936 			cnss_pci_get_drv_connected(pci_priv);
3937 		if (!pci_priv->drv_connected_last) {
3938 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
3939 			ret = -EAGAIN;
3940 			goto out;
3941 		}
3942 	}
3943 
3944 	set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3945 
3946 	ret = cnss_pci_suspend_driver(pci_priv);
3947 	if (ret)
3948 		goto clear_flag;
3949 
3950 	if (!pci_priv->disable_pc) {
3951 		mutex_lock(&pci_priv->bus_lock);
3952 		ret = cnss_pci_suspend_bus(pci_priv);
3953 		mutex_unlock(&pci_priv->bus_lock);
3954 		if (ret)
3955 			goto resume_driver;
3956 	}
3957 
3958 	cnss_pci_set_monitor_wake_intr(pci_priv, false);
3959 
3960 	return 0;
3961 
3962 resume_driver:
3963 	cnss_pci_resume_driver(pci_priv);
3964 clear_flag:
3965 	pci_priv->drv_connected_last = 0;
3966 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
3967 out:
3968 	return ret;
3969 }
3970 
3971 static int cnss_pci_resume(struct device *dev)
3972 {
3973 	int ret = 0;
3974 	struct pci_dev *pci_dev = to_pci_dev(dev);
3975 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
3976 	struct cnss_plat_data *plat_priv;
3977 
3978 	if (!pci_priv)
3979 		goto out;
3980 
3981 	plat_priv = pci_priv->plat_priv;
3982 	if (!plat_priv)
3983 		goto out;
3984 
3985 	if (pci_priv->pci_link_down_ind)
3986 		goto out;
3987 
3988 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
3989 		goto out;
3990 
3991 	if (!pci_priv->disable_pc) {
3992 		mutex_lock(&pci_priv->bus_lock);
3993 		ret = cnss_pci_resume_bus(pci_priv);
3994 		mutex_unlock(&pci_priv->bus_lock);
3995 		if (ret)
3996 			goto out;
3997 	}
3998 
3999 	ret = cnss_pci_resume_driver(pci_priv);
4000 
4001 	pci_priv->drv_connected_last = 0;
4002 	clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4003 
4004 out:
4005 	return ret;
4006 }
4007 
4008 static int cnss_pci_suspend_noirq(struct device *dev)
4009 {
4010 	int ret = 0;
4011 	struct pci_dev *pci_dev = to_pci_dev(dev);
4012 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4013 	struct cnss_wlan_driver *driver_ops;
4014 	struct cnss_plat_data *plat_priv;
4015 
4016 	if (!pci_priv)
4017 		goto out;
4018 
4019 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4020 		goto out;
4021 
4022 	driver_ops = pci_priv->driver_ops;
4023 	plat_priv = pci_priv->plat_priv;
4024 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4025 	    driver_ops && driver_ops->suspend_noirq)
4026 		ret = driver_ops->suspend_noirq(pci_dev);
4027 
4028 	if (pci_priv->disable_pc && !pci_dev->state_saved &&
4029 	    !pci_priv->plat_priv->use_pm_domain)
4030 		pci_save_state(pci_dev);
4031 
4032 out:
4033 	return ret;
4034 }
4035 
4036 static int cnss_pci_resume_noirq(struct device *dev)
4037 {
4038 	int ret = 0;
4039 	struct pci_dev *pci_dev = to_pci_dev(dev);
4040 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4041 	struct cnss_wlan_driver *driver_ops;
4042 	struct cnss_plat_data *plat_priv;
4043 
4044 	if (!pci_priv)
4045 		goto out;
4046 
4047 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4048 		goto out;
4049 
4050 	plat_priv = pci_priv->plat_priv;
4051 	driver_ops = pci_priv->driver_ops;
4052 	if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4053 	    driver_ops && driver_ops->resume_noirq &&
4054 	    !pci_priv->pci_link_down_ind)
4055 		ret = driver_ops->resume_noirq(pci_dev);
4056 
4057 out:
4058 	return ret;
4059 }
4060 
4061 static int cnss_pci_runtime_suspend(struct device *dev)
4062 {
4063 	int ret = 0;
4064 	struct pci_dev *pci_dev = to_pci_dev(dev);
4065 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4066 	struct cnss_plat_data *plat_priv;
4067 	struct cnss_wlan_driver *driver_ops;
4068 
4069 	if (!pci_priv)
4070 		return -EAGAIN;
4071 
4072 	plat_priv = pci_priv->plat_priv;
4073 	if (!plat_priv)
4074 		return -EAGAIN;
4075 
4076 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4077 		return -EAGAIN;
4078 
4079 	if (pci_priv->pci_link_down_ind) {
4080 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4081 		return -EAGAIN;
4082 	}
4083 
4084 	if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4085 	    pci_priv->drv_supported) {
4086 		pci_priv->drv_connected_last =
4087 			cnss_pci_get_drv_connected(pci_priv);
4088 		if (!pci_priv->drv_connected_last) {
4089 			cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4090 			return -EAGAIN;
4091 		}
4092 	}
4093 
4094 	cnss_pr_vdbg("Runtime suspend start\n");
4095 
4096 	driver_ops = pci_priv->driver_ops;
4097 	if (driver_ops && driver_ops->runtime_ops &&
4098 	    driver_ops->runtime_ops->runtime_suspend)
4099 		ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
4100 	else
4101 		ret = cnss_auto_suspend(dev);
4102 
4103 	if (ret)
4104 		pci_priv->drv_connected_last = 0;
4105 
4106 	cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
4107 
4108 	return ret;
4109 }
4110 
4111 static int cnss_pci_runtime_resume(struct device *dev)
4112 {
4113 	int ret = 0;
4114 	struct pci_dev *pci_dev = to_pci_dev(dev);
4115 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4116 	struct cnss_wlan_driver *driver_ops;
4117 
4118 	if (!pci_priv)
4119 		return -EAGAIN;
4120 
4121 	if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4122 		return -EAGAIN;
4123 
4124 	if (pci_priv->pci_link_down_ind) {
4125 		cnss_pr_dbg("PCI link down recovery is in progress!\n");
4126 		return -EAGAIN;
4127 	}
4128 
4129 	cnss_pr_vdbg("Runtime resume start\n");
4130 
4131 	driver_ops = pci_priv->driver_ops;
4132 	if (driver_ops && driver_ops->runtime_ops &&
4133 	    driver_ops->runtime_ops->runtime_resume)
4134 		ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
4135 	else
4136 		ret = cnss_auto_resume(dev);
4137 
4138 	if (!ret)
4139 		pci_priv->drv_connected_last = 0;
4140 
4141 	cnss_pr_vdbg("Runtime resume status: %d\n", ret);
4142 
4143 	return ret;
4144 }
4145 
4146 static int cnss_pci_runtime_idle(struct device *dev)
4147 {
4148 	cnss_pr_vdbg("Runtime idle\n");
4149 
4150 	pm_request_autosuspend(dev);
4151 
4152 	return -EBUSY;
4153 }
4154 
4155 int cnss_wlan_pm_control(struct device *dev, bool vote)
4156 {
4157 	struct pci_dev *pci_dev = to_pci_dev(dev);
4158 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4159 	int ret = 0;
4160 
4161 	if (!pci_priv)
4162 		return -ENODEV;
4163 
4164 	ret = cnss_pci_disable_pc(pci_priv, vote);
4165 	if (ret)
4166 		return ret;
4167 
4168 	pci_priv->disable_pc = vote;
4169 	cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
4170 
4171 	return 0;
4172 }
4173 EXPORT_SYMBOL(cnss_wlan_pm_control);
4174 
4175 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
4176 					   enum cnss_rtpm_id id)
4177 {
4178 	if (id >= RTPM_ID_MAX)
4179 		return;
4180 
4181 	atomic_inc(&pci_priv->pm_stats.runtime_get);
4182 	atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
4183 	pci_priv->pm_stats.runtime_get_timestamp_id[id] =
4184 		cnss_get_host_timestamp(pci_priv->plat_priv);
4185 }
4186 
4187 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
4188 					   enum cnss_rtpm_id id)
4189 {
4190 	if (id >= RTPM_ID_MAX)
4191 		return;
4192 
4193 	atomic_inc(&pci_priv->pm_stats.runtime_put);
4194 	atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
4195 	pci_priv->pm_stats.runtime_put_timestamp_id[id] =
4196 		cnss_get_host_timestamp(pci_priv->plat_priv);
4197 }
4198 
4199 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
4200 {
4201 	struct device *dev;
4202 
4203 	if (!pci_priv)
4204 		return;
4205 
4206 	dev = &pci_priv->pci_dev->dev;
4207 
4208 	cnss_pr_dbg("Runtime PM usage count: %d\n",
4209 		    atomic_read(&dev->power.usage_count));
4210 }
4211 
4212 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
4213 {
4214 	struct device *dev;
4215 	enum rpm_status status;
4216 
4217 	if (!pci_priv)
4218 		return -ENODEV;
4219 
4220 	dev = &pci_priv->pci_dev->dev;
4221 
4222 	status = dev->power.runtime_status;
4223 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4224 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4225 			     (void *)_RET_IP_);
4226 
4227 	return pm_request_resume(dev);
4228 }
4229 
4230 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
4231 {
4232 	struct device *dev;
4233 	enum rpm_status status;
4234 
4235 	if (!pci_priv)
4236 		return -ENODEV;
4237 
4238 	dev = &pci_priv->pci_dev->dev;
4239 
4240 	status = dev->power.runtime_status;
4241 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4242 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4243 			     (void *)_RET_IP_);
4244 
4245 	return pm_runtime_resume(dev);
4246 }
4247 
4248 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
4249 			    enum cnss_rtpm_id id)
4250 {
4251 	struct device *dev;
4252 	enum rpm_status status;
4253 
4254 	if (!pci_priv)
4255 		return -ENODEV;
4256 
4257 	dev = &pci_priv->pci_dev->dev;
4258 
4259 	status = dev->power.runtime_status;
4260 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4261 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4262 			     (void *)_RET_IP_);
4263 
4264 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4265 
4266 	return pm_runtime_get(dev);
4267 }
4268 
4269 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
4270 				 enum cnss_rtpm_id id)
4271 {
4272 	struct device *dev;
4273 	enum rpm_status status;
4274 
4275 	if (!pci_priv)
4276 		return -ENODEV;
4277 
4278 	dev = &pci_priv->pci_dev->dev;
4279 
4280 	status = dev->power.runtime_status;
4281 	if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4282 		cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4283 			     (void *)_RET_IP_);
4284 
4285 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4286 
4287 	return pm_runtime_get_sync(dev);
4288 }
4289 
4290 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
4291 				      enum cnss_rtpm_id id)
4292 {
4293 	if (!pci_priv)
4294 		return;
4295 
4296 	cnss_pci_pm_runtime_get_record(pci_priv, id);
4297 	pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
4298 }
4299 
4300 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
4301 					enum cnss_rtpm_id id)
4302 {
4303 	struct device *dev;
4304 
4305 	if (!pci_priv)
4306 		return -ENODEV;
4307 
4308 	dev = &pci_priv->pci_dev->dev;
4309 
4310 	if (atomic_read(&dev->power.usage_count) == 0) {
4311 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4312 		return -EINVAL;
4313 	}
4314 
4315 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4316 
4317 	return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
4318 }
4319 
4320 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
4321 				    enum cnss_rtpm_id id)
4322 {
4323 	struct device *dev;
4324 
4325 	if (!pci_priv)
4326 		return;
4327 
4328 	dev = &pci_priv->pci_dev->dev;
4329 
4330 	if (atomic_read(&dev->power.usage_count) == 0) {
4331 		cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4332 		return;
4333 	}
4334 
4335 	cnss_pci_pm_runtime_put_record(pci_priv, id);
4336 	pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
4337 }
4338 
4339 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
4340 {
4341 	if (!pci_priv)
4342 		return;
4343 
4344 	pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
4345 }
4346 
4347 int cnss_auto_suspend(struct device *dev)
4348 {
4349 	int ret = 0;
4350 	struct pci_dev *pci_dev = to_pci_dev(dev);
4351 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4352 	struct cnss_plat_data *plat_priv;
4353 
4354 	if (!pci_priv)
4355 		return -ENODEV;
4356 
4357 	plat_priv = pci_priv->plat_priv;
4358 	if (!plat_priv)
4359 		return -ENODEV;
4360 
4361 	mutex_lock(&pci_priv->bus_lock);
4362 	if (!pci_priv->qmi_send_usage_count) {
4363 		ret = cnss_pci_suspend_bus(pci_priv);
4364 		if (ret) {
4365 			mutex_unlock(&pci_priv->bus_lock);
4366 			return ret;
4367 		}
4368 	}
4369 
4370 	cnss_pci_set_auto_suspended(pci_priv, 1);
4371 	mutex_unlock(&pci_priv->bus_lock);
4372 
4373 	cnss_pci_set_monitor_wake_intr(pci_priv, true);
4374 
4375 	/* For suspend temporarily set bandwidth vote to NONE and dont save in
4376 	 * current_bw_vote as in resume path we should vote for last used
4377 	 * bandwidth vote. Also ignore error if bw voting is not setup.
4378 	 */
4379 	cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
4380 	return 0;
4381 }
4382 EXPORT_SYMBOL(cnss_auto_suspend);
4383 
4384 int cnss_auto_resume(struct device *dev)
4385 {
4386 	int ret = 0;
4387 	struct pci_dev *pci_dev = to_pci_dev(dev);
4388 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4389 	struct cnss_plat_data *plat_priv;
4390 
4391 	if (!pci_priv)
4392 		return -ENODEV;
4393 
4394 	plat_priv = pci_priv->plat_priv;
4395 	if (!plat_priv)
4396 		return -ENODEV;
4397 
4398 	mutex_lock(&pci_priv->bus_lock);
4399 	ret = cnss_pci_resume_bus(pci_priv);
4400 	if (ret) {
4401 		mutex_unlock(&pci_priv->bus_lock);
4402 		return ret;
4403 	}
4404 
4405 	cnss_pci_set_auto_suspended(pci_priv, 0);
4406 	mutex_unlock(&pci_priv->bus_lock);
4407 
4408 	cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
4409 
4410 	return 0;
4411 }
4412 EXPORT_SYMBOL(cnss_auto_resume);
4413 
4414 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
4415 {
4416 	struct pci_dev *pci_dev = to_pci_dev(dev);
4417 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4418 	struct cnss_plat_data *plat_priv;
4419 	struct mhi_controller *mhi_ctrl;
4420 
4421 	if (!pci_priv)
4422 		return -ENODEV;
4423 
4424 	switch (pci_priv->device_id) {
4425 	case QCA6390_DEVICE_ID:
4426 	case QCA6490_DEVICE_ID:
4427 	case KIWI_DEVICE_ID:
4428 	case MANGO_DEVICE_ID:
4429 	case PEACH_DEVICE_ID:
4430 		break;
4431 	default:
4432 		return 0;
4433 	}
4434 
4435 	mhi_ctrl = pci_priv->mhi_ctrl;
4436 	if (!mhi_ctrl)
4437 		return -EINVAL;
4438 
4439 	plat_priv = pci_priv->plat_priv;
4440 	if (!plat_priv)
4441 		return -ENODEV;
4442 
4443 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4444 		return -EAGAIN;
4445 
4446 	if (timeout_us) {
4447 		/* Busy wait for timeout_us */
4448 		return cnss_mhi_device_get_sync_atomic(pci_priv,
4449 						       timeout_us, false);
4450 	} else {
4451 		/* Sleep wait for mhi_ctrl->timeout_ms */
4452 		return mhi_device_get_sync(mhi_ctrl->mhi_dev);
4453 	}
4454 }
4455 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
4456 
4457 int cnss_pci_force_wake_request(struct device *dev)
4458 {
4459 	struct pci_dev *pci_dev = to_pci_dev(dev);
4460 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4461 	struct cnss_plat_data *plat_priv;
4462 	struct mhi_controller *mhi_ctrl;
4463 
4464 	if (!pci_priv)
4465 		return -ENODEV;
4466 
4467 	switch (pci_priv->device_id) {
4468 	case QCA6390_DEVICE_ID:
4469 	case QCA6490_DEVICE_ID:
4470 	case KIWI_DEVICE_ID:
4471 	case MANGO_DEVICE_ID:
4472 	case PEACH_DEVICE_ID:
4473 		break;
4474 	default:
4475 		return 0;
4476 	}
4477 
4478 	mhi_ctrl = pci_priv->mhi_ctrl;
4479 	if (!mhi_ctrl)
4480 		return -EINVAL;
4481 
4482 	plat_priv = pci_priv->plat_priv;
4483 	if (!plat_priv)
4484 		return -ENODEV;
4485 
4486 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4487 		return -EAGAIN;
4488 
4489 	mhi_device_get(mhi_ctrl->mhi_dev);
4490 
4491 	return 0;
4492 }
4493 EXPORT_SYMBOL(cnss_pci_force_wake_request);
4494 
4495 int cnss_pci_is_device_awake(struct device *dev)
4496 {
4497 	struct pci_dev *pci_dev = to_pci_dev(dev);
4498 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4499 	struct mhi_controller *mhi_ctrl;
4500 
4501 	if (!pci_priv)
4502 		return -ENODEV;
4503 
4504 	switch (pci_priv->device_id) {
4505 	case QCA6390_DEVICE_ID:
4506 	case QCA6490_DEVICE_ID:
4507 	case KIWI_DEVICE_ID:
4508 	case MANGO_DEVICE_ID:
4509 	case PEACH_DEVICE_ID:
4510 		break;
4511 	default:
4512 		return 0;
4513 	}
4514 
4515 	mhi_ctrl = pci_priv->mhi_ctrl;
4516 	if (!mhi_ctrl)
4517 		return -EINVAL;
4518 
4519 	return (mhi_ctrl->dev_state == MHI_STATE_M0);
4520 }
4521 EXPORT_SYMBOL(cnss_pci_is_device_awake);
4522 
4523 int cnss_pci_force_wake_release(struct device *dev)
4524 {
4525 	struct pci_dev *pci_dev = to_pci_dev(dev);
4526 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4527 	struct cnss_plat_data *plat_priv;
4528 	struct mhi_controller *mhi_ctrl;
4529 
4530 	if (!pci_priv)
4531 		return -ENODEV;
4532 
4533 	switch (pci_priv->device_id) {
4534 	case QCA6390_DEVICE_ID:
4535 	case QCA6490_DEVICE_ID:
4536 	case KIWI_DEVICE_ID:
4537 	case MANGO_DEVICE_ID:
4538 	case PEACH_DEVICE_ID:
4539 		break;
4540 	default:
4541 		return 0;
4542 	}
4543 
4544 	mhi_ctrl = pci_priv->mhi_ctrl;
4545 	if (!mhi_ctrl)
4546 		return -EINVAL;
4547 
4548 	plat_priv = pci_priv->plat_priv;
4549 	if (!plat_priv)
4550 		return -ENODEV;
4551 
4552 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4553 		return -EAGAIN;
4554 
4555 	mhi_device_put(mhi_ctrl->mhi_dev);
4556 
4557 	return 0;
4558 }
4559 EXPORT_SYMBOL(cnss_pci_force_wake_release);
4560 
4561 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
4562 {
4563 	int ret = 0;
4564 
4565 	if (!pci_priv)
4566 		return -ENODEV;
4567 
4568 	mutex_lock(&pci_priv->bus_lock);
4569 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4570 	    !pci_priv->qmi_send_usage_count)
4571 		ret = cnss_pci_resume_bus(pci_priv);
4572 	pci_priv->qmi_send_usage_count++;
4573 	cnss_pr_buf("Increased QMI send usage count to %d\n",
4574 		    pci_priv->qmi_send_usage_count);
4575 	mutex_unlock(&pci_priv->bus_lock);
4576 
4577 	return ret;
4578 }
4579 
4580 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
4581 {
4582 	int ret = 0;
4583 
4584 	if (!pci_priv)
4585 		return -ENODEV;
4586 
4587 	mutex_lock(&pci_priv->bus_lock);
4588 	if (pci_priv->qmi_send_usage_count)
4589 		pci_priv->qmi_send_usage_count--;
4590 	cnss_pr_buf("Decreased QMI send usage count to %d\n",
4591 		    pci_priv->qmi_send_usage_count);
4592 	if (cnss_pci_get_auto_suspended(pci_priv) &&
4593 	    !pci_priv->qmi_send_usage_count &&
4594 	    !cnss_pcie_is_device_down(pci_priv))
4595 		ret = cnss_pci_suspend_bus(pci_priv);
4596 	mutex_unlock(&pci_priv->bus_lock);
4597 
4598 	return ret;
4599 }
4600 
4601 int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb,
4602 			       uint32_t len, uint8_t slotid)
4603 {
4604 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4605 	struct cnss_fw_mem *fw_mem;
4606 	void *mem = NULL;
4607 	int i, ret;
4608 	u32 *status;
4609 
4610 	if (!plat_priv)
4611 		return -EINVAL;
4612 
4613 	fw_mem = plat_priv->fw_mem;
4614 	if (slotid >= AFC_MAX_SLOT) {
4615 		cnss_pr_err("Invalid slot id %d\n", slotid);
4616 		ret = -EINVAL;
4617 		goto err;
4618 	}
4619 	if (len > AFC_SLOT_SIZE) {
4620 		cnss_pr_err("len %d greater than slot size", len);
4621 		ret = -EINVAL;
4622 		goto err;
4623 	}
4624 
4625 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4626 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4627 			mem = fw_mem[i].va;
4628 			status = mem + (slotid * AFC_SLOT_SIZE);
4629 			break;
4630 		}
4631 	}
4632 
4633 	if (!mem) {
4634 		cnss_pr_err("AFC mem is not available\n");
4635 		ret = -ENOMEM;
4636 		goto err;
4637 	}
4638 
4639 	memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
4640 	if (len < AFC_SLOT_SIZE)
4641 		memset(mem + (slotid * AFC_SLOT_SIZE) + len,
4642 		       0, AFC_SLOT_SIZE - len);
4643 	status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
4644 
4645 	return 0;
4646 err:
4647 	return ret;
4648 }
4649 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
4650 
4651 int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
4652 {
4653 	struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4654 	struct cnss_fw_mem *fw_mem;
4655 	void *mem = NULL;
4656 	int i, ret;
4657 
4658 	if (!plat_priv)
4659 		return -EINVAL;
4660 
4661 	fw_mem = plat_priv->fw_mem;
4662 	if (slotid >= AFC_MAX_SLOT) {
4663 		cnss_pr_err("Invalid slot id %d\n", slotid);
4664 		ret = -EINVAL;
4665 		goto err;
4666 	}
4667 
4668 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4669 		if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4670 			mem = fw_mem[i].va;
4671 			break;
4672 		}
4673 	}
4674 
4675 	if (!mem) {
4676 		cnss_pr_err("AFC mem is not available\n");
4677 		ret = -ENOMEM;
4678 		goto err;
4679 	}
4680 
4681 	memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
4682 	return 0;
4683 
4684 err:
4685 	return ret;
4686 }
4687 EXPORT_SYMBOL(cnss_reset_afcmem);
4688 
4689 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
4690 {
4691 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4692 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4693 	struct device *dev = &pci_priv->pci_dev->dev;
4694 	int i;
4695 
4696 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4697 		if (!fw_mem[i].va && fw_mem[i].size) {
4698 retry:
4699 			fw_mem[i].va =
4700 				dma_alloc_attrs(dev, fw_mem[i].size,
4701 						&fw_mem[i].pa, GFP_KERNEL,
4702 						fw_mem[i].attrs);
4703 
4704 			if (!fw_mem[i].va) {
4705 				if ((fw_mem[i].attrs &
4706 				    DMA_ATTR_FORCE_CONTIGUOUS)) {
4707 					fw_mem[i].attrs &=
4708 						~DMA_ATTR_FORCE_CONTIGUOUS;
4709 
4710 					cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
4711 						    fw_mem[i].type);
4712 					goto retry;
4713 				}
4714 				cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
4715 					    fw_mem[i].size, fw_mem[i].type);
4716 				CNSS_ASSERT(0);
4717 				return -ENOMEM;
4718 			}
4719 		}
4720 	}
4721 
4722 	return 0;
4723 }
4724 
4725 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
4726 {
4727 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4728 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4729 	struct device *dev = &pci_priv->pci_dev->dev;
4730 	int i;
4731 
4732 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4733 		if (fw_mem[i].va && fw_mem[i].size) {
4734 			cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
4735 				    fw_mem[i].va, &fw_mem[i].pa,
4736 				    fw_mem[i].size, fw_mem[i].type);
4737 			dma_free_attrs(dev, fw_mem[i].size,
4738 				       fw_mem[i].va, fw_mem[i].pa,
4739 				       fw_mem[i].attrs);
4740 			fw_mem[i].va = NULL;
4741 			fw_mem[i].pa = 0;
4742 			fw_mem[i].size = 0;
4743 			fw_mem[i].type = 0;
4744 		}
4745 	}
4746 
4747 	plat_priv->fw_mem_seg_len = 0;
4748 }
4749 
4750 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
4751 {
4752 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4753 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4754 	int i, j;
4755 
4756 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4757 		if (!qdss_mem[i].va && qdss_mem[i].size) {
4758 			qdss_mem[i].va =
4759 				dma_alloc_coherent(&pci_priv->pci_dev->dev,
4760 						   qdss_mem[i].size,
4761 						   &qdss_mem[i].pa,
4762 						   GFP_KERNEL);
4763 			if (!qdss_mem[i].va) {
4764 				cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
4765 					    qdss_mem[i].size,
4766 					    qdss_mem[i].type, i);
4767 				break;
4768 			}
4769 		}
4770 	}
4771 
4772 	/* Best-effort allocation for QDSS trace */
4773 	if (i < plat_priv->qdss_mem_seg_len) {
4774 		for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
4775 			qdss_mem[j].type = 0;
4776 			qdss_mem[j].size = 0;
4777 		}
4778 		plat_priv->qdss_mem_seg_len = i;
4779 	}
4780 
4781 	return 0;
4782 }
4783 
4784 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
4785 {
4786 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4787 	struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4788 	int i;
4789 
4790 	for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4791 		if (qdss_mem[i].va && qdss_mem[i].size) {
4792 			cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
4793 				    &qdss_mem[i].pa, qdss_mem[i].size,
4794 				    qdss_mem[i].type);
4795 			dma_free_coherent(&pci_priv->pci_dev->dev,
4796 					  qdss_mem[i].size, qdss_mem[i].va,
4797 					  qdss_mem[i].pa);
4798 			qdss_mem[i].va = NULL;
4799 			qdss_mem[i].pa = 0;
4800 			qdss_mem[i].size = 0;
4801 			qdss_mem[i].type = 0;
4802 		}
4803 	}
4804 	plat_priv->qdss_mem_seg_len = 0;
4805 }
4806 
4807 int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv)
4808 {
4809 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4810 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4811 	char filename[MAX_FIRMWARE_NAME_LEN];
4812 	char *tme_patch_filename = NULL;
4813 	const struct firmware *fw_entry;
4814 	int ret = 0;
4815 
4816 	switch (pci_priv->device_id) {
4817 	case PEACH_DEVICE_ID:
4818 		tme_patch_filename = TME_PATCH_FILE_NAME;
4819 		break;
4820 	case QCA6174_DEVICE_ID:
4821 	case QCA6290_DEVICE_ID:
4822 	case QCA6390_DEVICE_ID:
4823 	case QCA6490_DEVICE_ID:
4824 	case KIWI_DEVICE_ID:
4825 	case MANGO_DEVICE_ID:
4826 	default:
4827 		cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n",
4828 			    pci_priv->device_id);
4829 		return 0;
4830 	}
4831 
4832 	if (!tme_lite_mem->va && !tme_lite_mem->size) {
4833 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4834 					    tme_patch_filename);
4835 
4836 		ret = firmware_request_nowarn(&fw_entry, filename,
4837 					      &pci_priv->pci_dev->dev);
4838 		if (ret) {
4839 			cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n",
4840 				    filename, ret);
4841 			return ret;
4842 		}
4843 
4844 		tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4845 						fw_entry->size, &tme_lite_mem->pa,
4846 						GFP_KERNEL);
4847 		if (!tme_lite_mem->va) {
4848 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4849 				    fw_entry->size);
4850 			release_firmware(fw_entry);
4851 			return -ENOMEM;
4852 		}
4853 
4854 		memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
4855 		tme_lite_mem->size = fw_entry->size;
4856 		release_firmware(fw_entry);
4857 	}
4858 
4859 	return 0;
4860 }
4861 
4862 static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv)
4863 {
4864 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4865 	struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4866 
4867 	if (tme_lite_mem->va && tme_lite_mem->size) {
4868 		cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4869 			    tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
4870 		dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size,
4871 				  tme_lite_mem->va, tme_lite_mem->pa);
4872 	}
4873 
4874 	tme_lite_mem->va = NULL;
4875 	tme_lite_mem->pa = 0;
4876 	tme_lite_mem->size = 0;
4877 }
4878 
4879 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
4880 {
4881 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4882 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4883 	char filename[MAX_FIRMWARE_NAME_LEN];
4884 	char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
4885 	const struct firmware *fw_entry;
4886 	int ret = 0;
4887 
4888 	/* Use forward compatibility here since for any recent device
4889 	 * it should use DEFAULT_PHY_UCODE_FILE_NAME.
4890 	 */
4891 	switch (pci_priv->device_id) {
4892 	case QCA6174_DEVICE_ID:
4893 		cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
4894 			    pci_priv->device_id);
4895 		return -EINVAL;
4896 	case QCA6290_DEVICE_ID:
4897 	case QCA6390_DEVICE_ID:
4898 	case QCA6490_DEVICE_ID:
4899 		phy_filename = DEFAULT_PHY_M3_FILE_NAME;
4900 		break;
4901 	case KIWI_DEVICE_ID:
4902 	case MANGO_DEVICE_ID:
4903 	case PEACH_DEVICE_ID:
4904 		switch (plat_priv->device_version.major_version) {
4905 		case FW_V2_NUMBER:
4906 			phy_filename = PHY_UCODE_V2_FILE_NAME;
4907 			break;
4908 		default:
4909 			break;
4910 		}
4911 		break;
4912 	default:
4913 		break;
4914 	}
4915 
4916 	if (!m3_mem->va && !m3_mem->size) {
4917 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4918 					    phy_filename);
4919 
4920 		ret = firmware_request_nowarn(&fw_entry, filename,
4921 					      &pci_priv->pci_dev->dev);
4922 		if (ret) {
4923 			cnss_pr_err("Failed to load M3 image: %s\n", filename);
4924 			return ret;
4925 		}
4926 
4927 		m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4928 						fw_entry->size, &m3_mem->pa,
4929 						GFP_KERNEL);
4930 		if (!m3_mem->va) {
4931 			cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
4932 				    fw_entry->size);
4933 			release_firmware(fw_entry);
4934 			return -ENOMEM;
4935 		}
4936 
4937 		memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
4938 		m3_mem->size = fw_entry->size;
4939 		release_firmware(fw_entry);
4940 	}
4941 
4942 	return 0;
4943 }
4944 
4945 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
4946 {
4947 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4948 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
4949 
4950 	if (m3_mem->va && m3_mem->size) {
4951 		cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
4952 			    m3_mem->va, &m3_mem->pa, m3_mem->size);
4953 		dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
4954 				  m3_mem->va, m3_mem->pa);
4955 	}
4956 
4957 	m3_mem->va = NULL;
4958 	m3_mem->pa = 0;
4959 	m3_mem->size = 0;
4960 }
4961 
4962 #ifdef CONFIG_FREE_M3_BLOB_MEM
4963 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
4964 {
4965 	cnss_pci_free_m3_mem(pci_priv);
4966 }
4967 #else
4968 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
4969 {
4970 }
4971 #endif
4972 
4973 int cnss_pci_load_aux(struct cnss_pci_data *pci_priv)
4974 {
4975 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4976 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
4977 	char filename[MAX_FIRMWARE_NAME_LEN];
4978 	char *aux_filename = DEFAULT_AUX_FILE_NAME;
4979 	const struct firmware *fw_entry;
4980 	int ret = 0;
4981 
4982 	if (!aux_mem->va && !aux_mem->size) {
4983 		cnss_pci_add_fw_prefix_name(pci_priv, filename,
4984 					    aux_filename);
4985 
4986 		ret = firmware_request_nowarn(&fw_entry, filename,
4987 					      &pci_priv->pci_dev->dev);
4988 		if (ret) {
4989 			cnss_pr_err("Failed to load AUX image: %s\n", filename);
4990 			return ret;
4991 		}
4992 
4993 		aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
4994 						fw_entry->size, &aux_mem->pa,
4995 						GFP_KERNEL);
4996 		if (!aux_mem->va) {
4997 			cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n",
4998 				    fw_entry->size);
4999 			release_firmware(fw_entry);
5000 			return -ENOMEM;
5001 		}
5002 
5003 		memcpy(aux_mem->va, fw_entry->data, fw_entry->size);
5004 		aux_mem->size = fw_entry->size;
5005 		release_firmware(fw_entry);
5006 	}
5007 
5008 	return 0;
5009 }
5010 
5011 static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv)
5012 {
5013 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5014 	struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5015 
5016 	if (aux_mem->va && aux_mem->size) {
5017 		cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5018 			    aux_mem->va, &aux_mem->pa, aux_mem->size);
5019 		dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size,
5020 				  aux_mem->va, aux_mem->pa);
5021 	}
5022 
5023 	aux_mem->va = NULL;
5024 	aux_mem->pa = 0;
5025 	aux_mem->size = 0;
5026 }
5027 
5028 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
5029 {
5030 	struct cnss_plat_data *plat_priv;
5031 
5032 	if (!pci_priv)
5033 		return;
5034 
5035 	cnss_fatal_err("Timeout waiting for FW ready indication\n");
5036 
5037 	plat_priv = pci_priv->plat_priv;
5038 	if (!plat_priv)
5039 		return;
5040 
5041 	if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
5042 		cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
5043 		return;
5044 	}
5045 
5046 	cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5047 			       CNSS_REASON_TIMEOUT);
5048 }
5049 
5050 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
5051 {
5052 	pci_priv->iommu_domain = NULL;
5053 }
5054 
5055 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5056 {
5057 	if (!pci_priv)
5058 		return -ENODEV;
5059 
5060 	if (!pci_priv->smmu_iova_len)
5061 		return -EINVAL;
5062 
5063 	*addr = pci_priv->smmu_iova_start;
5064 	*size = pci_priv->smmu_iova_len;
5065 
5066 	return 0;
5067 }
5068 
5069 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5070 {
5071 	if (!pci_priv)
5072 		return -ENODEV;
5073 
5074 	if (!pci_priv->smmu_iova_ipa_len)
5075 		return -EINVAL;
5076 
5077 	*addr = pci_priv->smmu_iova_ipa_start;
5078 	*size = pci_priv->smmu_iova_ipa_len;
5079 
5080 	return 0;
5081 }
5082 
5083 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
5084 {
5085 	if (pci_priv)
5086 		return pci_priv->smmu_s1_enable;
5087 
5088 	return false;
5089 }
5090 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
5091 {
5092 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5093 
5094 	if (!pci_priv)
5095 		return NULL;
5096 
5097 	return pci_priv->iommu_domain;
5098 }
5099 EXPORT_SYMBOL(cnss_smmu_get_domain);
5100 
5101 int cnss_smmu_map(struct device *dev,
5102 		  phys_addr_t paddr, uint32_t *iova_addr, size_t size)
5103 {
5104 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5105 	struct cnss_plat_data *plat_priv;
5106 	unsigned long iova;
5107 	size_t len;
5108 	int ret = 0;
5109 	int flag = IOMMU_READ | IOMMU_WRITE;
5110 	struct pci_dev *root_port;
5111 	struct device_node *root_of_node;
5112 	bool dma_coherent = false;
5113 
5114 	if (!pci_priv)
5115 		return -ENODEV;
5116 
5117 	if (!iova_addr) {
5118 		cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
5119 			    &paddr, size);
5120 		return -EINVAL;
5121 	}
5122 
5123 	plat_priv = pci_priv->plat_priv;
5124 
5125 	len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
5126 	iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
5127 
5128 	if (pci_priv->iommu_geometry &&
5129 	    iova >= pci_priv->smmu_iova_ipa_start +
5130 		    pci_priv->smmu_iova_ipa_len) {
5131 		cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5132 			    iova,
5133 			    &pci_priv->smmu_iova_ipa_start,
5134 			    pci_priv->smmu_iova_ipa_len);
5135 		return -ENOMEM;
5136 	}
5137 
5138 	if (!test_bit(DISABLE_IO_COHERENCY,
5139 		      &plat_priv->ctrl_params.quirks)) {
5140 		root_port = pcie_find_root_port(pci_priv->pci_dev);
5141 		if (!root_port) {
5142 			cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
5143 		} else {
5144 			root_of_node = root_port->dev.of_node;
5145 			if (root_of_node && root_of_node->parent) {
5146 				dma_coherent =
5147 				    of_property_read_bool(root_of_node->parent,
5148 							  "dma-coherent");
5149 			cnss_pr_dbg("dma-coherent is %s\n",
5150 				    dma_coherent ? "enabled" : "disabled");
5151 			if (dma_coherent)
5152 				flag |= IOMMU_CACHE;
5153 			}
5154 		}
5155 	}
5156 
5157 	cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
5158 
5159 	ret = cnss_iommu_map(pci_priv->iommu_domain, iova,
5160 			     rounddown(paddr, PAGE_SIZE), len, flag);
5161 	if (ret) {
5162 		cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
5163 		return ret;
5164 	}
5165 
5166 	pci_priv->smmu_iova_ipa_current = iova + len;
5167 	*iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
5168 	cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
5169 
5170 	return 0;
5171 }
5172 EXPORT_SYMBOL(cnss_smmu_map);
5173 
5174 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
5175 {
5176 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5177 	unsigned long iova;
5178 	size_t unmapped;
5179 	size_t len;
5180 
5181 	if (!pci_priv)
5182 		return -ENODEV;
5183 
5184 	iova = rounddown(iova_addr, PAGE_SIZE);
5185 	len = roundup(size + iova_addr - iova, PAGE_SIZE);
5186 
5187 	if (iova >= pci_priv->smmu_iova_ipa_start +
5188 		    pci_priv->smmu_iova_ipa_len) {
5189 		cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5190 			    iova,
5191 			    &pci_priv->smmu_iova_ipa_start,
5192 			    pci_priv->smmu_iova_ipa_len);
5193 		return -ENOMEM;
5194 	}
5195 
5196 	cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
5197 
5198 	unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
5199 	if (unmapped != len) {
5200 		cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
5201 			    unmapped, len);
5202 		return -EINVAL;
5203 	}
5204 
5205 	pci_priv->smmu_iova_ipa_current = iova;
5206 	return 0;
5207 }
5208 EXPORT_SYMBOL(cnss_smmu_unmap);
5209 
5210 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
5211 {
5212 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5213 	struct cnss_plat_data *plat_priv;
5214 
5215 	if (!pci_priv)
5216 		return -ENODEV;
5217 
5218 	plat_priv = pci_priv->plat_priv;
5219 	if (!plat_priv)
5220 		return -ENODEV;
5221 
5222 	info->va = pci_priv->bar;
5223 	info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
5224 	info->chip_id = plat_priv->chip_info.chip_id;
5225 	info->chip_family = plat_priv->chip_info.chip_family;
5226 	info->board_id = plat_priv->board_info.board_id;
5227 	info->soc_id = plat_priv->soc_info.soc_id;
5228 	info->fw_version = plat_priv->fw_version_info.fw_version;
5229 	strlcpy(info->fw_build_timestamp,
5230 		plat_priv->fw_version_info.fw_build_timestamp,
5231 		sizeof(info->fw_build_timestamp));
5232 	memcpy(&info->device_version, &plat_priv->device_version,
5233 	       sizeof(info->device_version));
5234 	memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
5235 	       sizeof(info->dev_mem_info));
5236 	memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
5237 	       sizeof(info->fw_build_id));
5238 
5239 	return 0;
5240 }
5241 EXPORT_SYMBOL(cnss_get_soc_info);
5242 
5243 int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv,
5244 				     char *user_name,
5245 				     int *num_vectors,
5246 				     u32 *user_base_data,
5247 				     u32 *base_vector)
5248 {
5249 	return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5250 					    user_name,
5251 					    num_vectors,
5252 					    user_base_data,
5253 					    base_vector);
5254 }
5255 
5256 static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv,
5257 					  unsigned int vec,
5258 					  const struct cpumask *cpumask)
5259 {
5260 	int ret;
5261 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5262 
5263 	ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec),
5264 				    cpumask);
5265 
5266 	return ret;
5267 }
5268 
5269 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
5270 {
5271 	int ret = 0;
5272 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5273 	int num_vectors;
5274 	struct cnss_msi_config *msi_config;
5275 
5276 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5277 		return 0;
5278 
5279 	if (cnss_pci_is_force_one_msi(pci_priv)) {
5280 		ret = cnss_pci_get_one_msi_assignment(pci_priv);
5281 		cnss_pr_dbg("force one msi\n");
5282 	} else {
5283 		ret = cnss_pci_get_msi_assignment(pci_priv);
5284 	}
5285 	if (ret) {
5286 		cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
5287 		goto out;
5288 	}
5289 
5290 	msi_config = pci_priv->msi_config;
5291 	if (!msi_config) {
5292 		cnss_pr_err("msi_config is NULL!\n");
5293 		ret = -EINVAL;
5294 		goto out;
5295 	}
5296 
5297 	num_vectors = pci_alloc_irq_vectors(pci_dev,
5298 					    msi_config->total_vectors,
5299 					    msi_config->total_vectors,
5300 					    PCI_IRQ_MSI | PCI_IRQ_MSIX);
5301 	if ((num_vectors != msi_config->total_vectors) &&
5302 	    !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
5303 		cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
5304 			    msi_config->total_vectors, num_vectors);
5305 		if (num_vectors >= 0)
5306 			ret = -EINVAL;
5307 		goto reset_msi_config;
5308 	}
5309 
5310 	/* With VT-d disabled on x86 platform, only one pci irq vector is
5311 	 * allocated. Once suspend the irq may be migrated to CPU0 if it was
5312 	 * affine to other CPU with one new msi vector re-allocated.
5313 	 * The observation cause the issue about no irq handler for vector
5314 	 * once resume.
5315 	 * The fix is to set irq vector affinity to CPU0 before calling
5316 	 * request_irq to avoid the irq migration.
5317 	 */
5318 	if (cnss_pci_is_one_msi(pci_priv)) {
5319 		ret = cnss_pci_irq_set_affinity_hint(pci_priv,
5320 						     0,
5321 						     cpumask_of(0));
5322 		if (ret) {
5323 			cnss_pr_err("Failed to affinize irq vector to CPU0\n");
5324 			goto free_msi_vector;
5325 		}
5326 	}
5327 
5328 	if (cnss_pci_config_msi_addr(pci_priv)) {
5329 		ret = -EINVAL;
5330 		goto free_msi_vector;
5331 	}
5332 
5333 	if (cnss_pci_config_msi_data(pci_priv)) {
5334 		ret = -EINVAL;
5335 		goto free_msi_vector;
5336 	}
5337 
5338 	return 0;
5339 
5340 free_msi_vector:
5341 	if (cnss_pci_is_one_msi(pci_priv))
5342 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5343 	pci_free_irq_vectors(pci_priv->pci_dev);
5344 reset_msi_config:
5345 	pci_priv->msi_config = NULL;
5346 out:
5347 	return ret;
5348 }
5349 
5350 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
5351 {
5352 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
5353 		return;
5354 
5355 	if (cnss_pci_is_one_msi(pci_priv))
5356 		cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5357 
5358 	pci_free_irq_vectors(pci_priv->pci_dev);
5359 }
5360 
5361 int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
5362 				 int *num_vectors, u32 *user_base_data,
5363 				 u32 *base_vector)
5364 {
5365 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5366 	struct cnss_msi_config *msi_config;
5367 	int idx;
5368 
5369 	if (!pci_priv)
5370 		return -ENODEV;
5371 
5372 	msi_config = pci_priv->msi_config;
5373 	if (!msi_config) {
5374 		cnss_pr_err("MSI is not supported.\n");
5375 		return -EINVAL;
5376 	}
5377 
5378 	for (idx = 0; idx < msi_config->total_users; idx++) {
5379 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
5380 			*num_vectors = msi_config->users[idx].num_vectors;
5381 			*user_base_data = msi_config->users[idx].base_vector
5382 				+ pci_priv->msi_ep_base_data;
5383 			*base_vector = msi_config->users[idx].base_vector;
5384 			/*Add only single print for each user*/
5385 			if (print_optimize.msi_log_chk[idx]++)
5386 				goto skip_print;
5387 
5388 			cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
5389 				    user_name, *num_vectors, *user_base_data,
5390 				    *base_vector);
5391 skip_print:
5392 			return 0;
5393 		}
5394 	}
5395 
5396 	cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
5397 
5398 	return -EINVAL;
5399 }
5400 EXPORT_SYMBOL(cnss_get_user_msi_assignment);
5401 
5402 int cnss_get_msi_irq(struct device *dev, unsigned int vector)
5403 {
5404 	struct pci_dev *pci_dev = to_pci_dev(dev);
5405 	int irq_num;
5406 
5407 	irq_num = pci_irq_vector(pci_dev, vector);
5408 	cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
5409 
5410 	return irq_num;
5411 }
5412 EXPORT_SYMBOL(cnss_get_msi_irq);
5413 
5414 bool cnss_is_one_msi(struct device *dev)
5415 {
5416 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5417 
5418 	if (!pci_priv)
5419 		return false;
5420 
5421 	return cnss_pci_is_one_msi(pci_priv);
5422 }
5423 EXPORT_SYMBOL(cnss_is_one_msi);
5424 
5425 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
5426 			  u32 *msi_addr_high)
5427 {
5428 	struct pci_dev *pci_dev = to_pci_dev(dev);
5429 	struct cnss_pci_data *pci_priv;
5430 	u16 control;
5431 
5432 	if (!pci_dev)
5433 		return;
5434 
5435 	pci_priv = cnss_get_pci_priv(pci_dev);
5436 	if (!pci_priv)
5437 		return;
5438 
5439 	if (pci_dev->msix_enabled) {
5440 		*msi_addr_low = pci_priv->msix_addr;
5441 		*msi_addr_high = 0;
5442 		if (!print_optimize.msi_addr_chk++)
5443 			cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5444 				    *msi_addr_low, *msi_addr_high);
5445 		return;
5446 	}
5447 
5448 	pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
5449 			     &control);
5450 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
5451 			      msi_addr_low);
5452 	/* Return MSI high address only when device supports 64-bit MSI */
5453 	if (control & PCI_MSI_FLAGS_64BIT)
5454 		pci_read_config_dword(pci_dev,
5455 				      pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
5456 				      msi_addr_high);
5457 	else
5458 		*msi_addr_high = 0;
5459 	 /*Add only single print as the address is constant*/
5460 	 if (!print_optimize.msi_addr_chk++)
5461 		cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5462 			    *msi_addr_low, *msi_addr_high);
5463 }
5464 EXPORT_SYMBOL(cnss_get_msi_address);
5465 
5466 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
5467 {
5468 	int ret, num_vectors;
5469 	u32 user_base_data, base_vector;
5470 
5471 	if (!pci_priv)
5472 		return -ENODEV;
5473 
5474 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5475 					   WAKE_MSI_NAME, &num_vectors,
5476 					   &user_base_data, &base_vector);
5477 	if (ret) {
5478 		cnss_pr_err("WAKE MSI is not valid\n");
5479 		return 0;
5480 	}
5481 
5482 	return user_base_data;
5483 }
5484 
5485 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
5486 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5487 {
5488 	return dma_set_mask(&pci_dev->dev, mask);
5489 }
5490 
5491 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5492 	u64 mask)
5493 {
5494 	return dma_set_coherent_mask(&pci_dev->dev, mask);
5495 }
5496 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5497 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5498 {
5499 	return pci_set_dma_mask(pci_dev, mask);
5500 }
5501 
5502 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5503 	u64 mask)
5504 {
5505 	return pci_set_consistent_dma_mask(pci_dev, mask);
5506 }
5507 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5508 
5509 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
5510 {
5511 	int ret = 0;
5512 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5513 	u16 device_id;
5514 
5515 	pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
5516 	if (device_id != pci_priv->pci_device_id->device)  {
5517 		cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
5518 			    device_id, pci_priv->pci_device_id->device);
5519 		ret = -EIO;
5520 		goto out;
5521 	}
5522 
5523 	ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
5524 	if (ret) {
5525 		pr_err("Failed to assign PCI resource, err = %d\n", ret);
5526 		goto out;
5527 	}
5528 
5529 	ret = pci_enable_device(pci_dev);
5530 	if (ret) {
5531 		cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
5532 		goto out;
5533 	}
5534 
5535 	ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
5536 	if (ret) {
5537 		cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
5538 		goto disable_device;
5539 	}
5540 
5541 	switch (device_id) {
5542 	case QCA6174_DEVICE_ID:
5543 	case QCN7605_DEVICE_ID:
5544 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5545 		break;
5546 	case QCA6390_DEVICE_ID:
5547 	case QCA6490_DEVICE_ID:
5548 	case KIWI_DEVICE_ID:
5549 	case MANGO_DEVICE_ID:
5550 	case PEACH_DEVICE_ID:
5551 		pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
5552 		break;
5553 	default:
5554 		pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5555 		break;
5556 	}
5557 
5558 	cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
5559 
5560 	ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5561 	if (ret) {
5562 		cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
5563 		goto release_region;
5564 	}
5565 
5566 	ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5567 	if (ret) {
5568 		cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
5569 			    ret);
5570 		goto release_region;
5571 	}
5572 
5573 	pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
5574 	if (!pci_priv->bar) {
5575 		cnss_pr_err("Failed to do PCI IO map!\n");
5576 		ret = -EIO;
5577 		goto release_region;
5578 	}
5579 
5580 	/* Save default config space without BME enabled */
5581 	pci_save_state(pci_dev);
5582 	pci_priv->default_state = pci_store_saved_state(pci_dev);
5583 
5584 	pci_set_master(pci_dev);
5585 
5586 	return 0;
5587 
5588 release_region:
5589 	pci_release_region(pci_dev, PCI_BAR_NUM);
5590 disable_device:
5591 	pci_disable_device(pci_dev);
5592 out:
5593 	return ret;
5594 }
5595 
5596 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
5597 {
5598 	struct pci_dev *pci_dev = pci_priv->pci_dev;
5599 
5600 	pci_clear_master(pci_dev);
5601 	pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
5602 	pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
5603 
5604 	if (pci_priv->bar) {
5605 		pci_iounmap(pci_dev, pci_priv->bar);
5606 		pci_priv->bar = NULL;
5607 	}
5608 
5609 	pci_release_region(pci_dev, PCI_BAR_NUM);
5610 	if (pci_is_enabled(pci_dev))
5611 		pci_disable_device(pci_dev);
5612 }
5613 
5614 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
5615 {
5616 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5617 	int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
5618 	gfp_t gfp = GFP_KERNEL;
5619 	u32 reg_offset;
5620 
5621 	if (in_interrupt() || irqs_disabled())
5622 		gfp = GFP_ATOMIC;
5623 
5624 	if (!plat_priv->qdss_reg) {
5625 		plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
5626 						   sizeof(*plat_priv->qdss_reg)
5627 						   * array_size, gfp);
5628 		if (!plat_priv->qdss_reg)
5629 			return;
5630 	}
5631 
5632 	cnss_pr_dbg("Start to dump qdss registers\n");
5633 
5634 	for (i = 0; qdss_csr[i].name; i++) {
5635 		reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
5636 		if (cnss_pci_reg_read(pci_priv, reg_offset,
5637 				      &plat_priv->qdss_reg[i]))
5638 			return;
5639 		cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
5640 			    plat_priv->qdss_reg[i]);
5641 	}
5642 }
5643 
5644 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
5645 				 enum cnss_ce_index ce)
5646 {
5647 	int i;
5648 	u32 ce_base = ce * CE_REG_INTERVAL;
5649 	u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
5650 
5651 	switch (pci_priv->device_id) {
5652 	case QCA6390_DEVICE_ID:
5653 		src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
5654 		dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
5655 		cmn_base = QCA6390_CE_COMMON_REG_BASE;
5656 		break;
5657 	case QCA6490_DEVICE_ID:
5658 		src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
5659 		dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
5660 		cmn_base = QCA6490_CE_COMMON_REG_BASE;
5661 		break;
5662 	default:
5663 		return;
5664 	}
5665 
5666 	switch (ce) {
5667 	case CNSS_CE_09:
5668 	case CNSS_CE_10:
5669 		for (i = 0; ce_src[i].name; i++) {
5670 			reg_offset = src_ring_base + ce_base + ce_src[i].offset;
5671 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5672 				return;
5673 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5674 				    ce, ce_src[i].name, reg_offset, val);
5675 		}
5676 
5677 		for (i = 0; ce_dst[i].name; i++) {
5678 			reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
5679 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5680 				return;
5681 			cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5682 				    ce, ce_dst[i].name, reg_offset, val);
5683 		}
5684 		break;
5685 	case CNSS_CE_COMMON:
5686 		for (i = 0; ce_cmn[i].name; i++) {
5687 			reg_offset = cmn_base  + ce_cmn[i].offset;
5688 			if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5689 				return;
5690 			cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
5691 				    ce_cmn[i].name, reg_offset, val);
5692 		}
5693 		break;
5694 	default:
5695 		cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
5696 	}
5697 }
5698 
5699 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
5700 {
5701 	if (cnss_pci_check_link_status(pci_priv))
5702 		return;
5703 
5704 	cnss_pr_dbg("Start to dump debug registers\n");
5705 
5706 	cnss_mhi_debug_reg_dump(pci_priv);
5707 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5708 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
5709 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
5710 	cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
5711 }
5712 
5713 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
5714 {
5715 	if (cnss_get_host_sol_value(pci_priv->plat_priv))
5716 		return -EINVAL;
5717 
5718 	cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
5719 	cnss_set_host_sol_value(pci_priv->plat_priv, 1);
5720 
5721 	return 0;
5722 }
5723 
5724 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
5725 {
5726 	if (!cnss_pci_check_link_status(pci_priv))
5727 		cnss_mhi_debug_reg_dump(pci_priv);
5728 
5729 	cnss_pci_soc_scratch_reg_dump(pci_priv);
5730 	cnss_pci_dump_misc_reg(pci_priv);
5731 	cnss_pci_dump_shadow_reg(pci_priv);
5732 }
5733 
5734 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
5735 {
5736 	int ret;
5737 	struct cnss_plat_data *plat_priv;
5738 
5739 	if (!pci_priv)
5740 		return -ENODEV;
5741 
5742 	plat_priv = pci_priv->plat_priv;
5743 	if (!plat_priv)
5744 		return -ENODEV;
5745 
5746 	if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5747 	    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
5748 		return -EINVAL;
5749 	/*
5750 	 * Call pm_runtime_get_sync insteat of auto_resume to get
5751 	 * reference and make sure runtime_suspend wont get called.
5752 	 */
5753 	ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
5754 	if (ret < 0)
5755 		goto runtime_pm_put;
5756 	/*
5757 	 * In some scenarios, cnss_pci_pm_runtime_get_sync
5758 	 * might not resume PCI bus. For those cases do auto resume.
5759 	 */
5760 	cnss_auto_resume(&pci_priv->pci_dev->dev);
5761 
5762 	if (!pci_priv->is_smmu_fault)
5763 		cnss_pci_mhi_reg_dump(pci_priv);
5764 
5765 	/* If link is still down here, directly trigger link down recovery */
5766 	ret = cnss_pci_check_link_status(pci_priv);
5767 	if (ret) {
5768 		cnss_pci_link_down(&pci_priv->pci_dev->dev);
5769 		cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5770 		cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5771 		return 0;
5772 	}
5773 
5774 	ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
5775 	if (ret) {
5776 		if (pci_priv->is_smmu_fault) {
5777 			cnss_pci_mhi_reg_dump(pci_priv);
5778 			pci_priv->is_smmu_fault = false;
5779 		}
5780 		if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
5781 		    test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
5782 			cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
5783 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5784 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5785 			return 0;
5786 		}
5787 		cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
5788 		if (!cnss_pci_assert_host_sol(pci_priv)) {
5789 			cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5790 			cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5791 			return 0;
5792 		}
5793 		cnss_pci_dump_debug_reg(pci_priv);
5794 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5795 				       CNSS_REASON_DEFAULT);
5796 		goto runtime_pm_put;
5797 	}
5798 
5799 	if (pci_priv->is_smmu_fault) {
5800 		cnss_pci_mhi_reg_dump(pci_priv);
5801 		pci_priv->is_smmu_fault = false;
5802 	}
5803 
5804 	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
5805 		mod_timer(&pci_priv->dev_rddm_timer,
5806 			  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
5807 	}
5808 
5809 runtime_pm_put:
5810 	cnss_pci_pm_runtime_mark_last_busy(pci_priv);
5811 	cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
5812 	return ret;
5813 }
5814 
5815 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
5816 				  struct cnss_dump_seg *dump_seg,
5817 				  enum cnss_fw_dump_type type, int seg_no,
5818 				  void *va, dma_addr_t dma, size_t size)
5819 {
5820 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5821 	struct device *dev = &pci_priv->pci_dev->dev;
5822 	phys_addr_t pa;
5823 
5824 	dump_seg->address = dma;
5825 	dump_seg->v_address = va;
5826 	dump_seg->size = size;
5827 	dump_seg->type = type;
5828 
5829 	cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
5830 		    seg_no, va, &dma, size);
5831 
5832 	if (cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
5833 		return;
5834 
5835 	cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
5836 }
5837 
5838 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
5839 				     struct cnss_dump_seg *dump_seg,
5840 				     enum cnss_fw_dump_type type, int seg_no,
5841 				     void *va, dma_addr_t dma, size_t size)
5842 {
5843 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5844 	struct device *dev = &pci_priv->pci_dev->dev;
5845 	phys_addr_t pa;
5846 
5847 	cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
5848 	cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
5849 }
5850 
5851 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
5852 				enum cnss_driver_status status, void *data)
5853 {
5854 	struct cnss_uevent_data uevent_data;
5855 	struct cnss_wlan_driver *driver_ops;
5856 
5857 	driver_ops = pci_priv->driver_ops;
5858 	if (!driver_ops || !driver_ops->update_event) {
5859 		cnss_pr_dbg("Hang event driver ops is NULL\n");
5860 		return -EINVAL;
5861 	}
5862 
5863 	cnss_pr_dbg("Calling driver uevent: %d\n", status);
5864 
5865 	uevent_data.status = status;
5866 	uevent_data.data = data;
5867 
5868 	return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
5869 }
5870 
5871 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
5872 {
5873 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5874 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5875 	struct cnss_hang_event hang_event;
5876 	void *hang_data_va = NULL;
5877 	u64 offset = 0;
5878 	u16 length = 0;
5879 	int i = 0;
5880 
5881 	if (!fw_mem || !plat_priv->fw_mem_seg_len)
5882 		return;
5883 
5884 	memset(&hang_event, 0, sizeof(hang_event));
5885 	switch (pci_priv->device_id) {
5886 	case QCA6390_DEVICE_ID:
5887 		offset = HST_HANG_DATA_OFFSET;
5888 		length = HANG_DATA_LENGTH;
5889 		break;
5890 	case QCA6490_DEVICE_ID:
5891 		/* Fallback to hard-coded values if hang event params not
5892 		 * present in QMI. Once all the firmware branches have the
5893 		 * fix to send params over QMI, this can be removed.
5894 		 */
5895 		if (plat_priv->hang_event_data_len) {
5896 			offset = plat_priv->hang_data_addr_offset;
5897 			length = plat_priv->hang_event_data_len;
5898 		} else {
5899 			offset = HSP_HANG_DATA_OFFSET;
5900 			length = HANG_DATA_LENGTH;
5901 		}
5902 		break;
5903 	case KIWI_DEVICE_ID:
5904 	case MANGO_DEVICE_ID:
5905 	case PEACH_DEVICE_ID:
5906 		offset = plat_priv->hang_data_addr_offset;
5907 		length = plat_priv->hang_event_data_len;
5908 		break;
5909 	default:
5910 		cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
5911 			    pci_priv->device_id);
5912 		return;
5913 	}
5914 
5915 	for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
5916 		if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
5917 		    fw_mem[i].va) {
5918 			/* The offset must be < (fw_mem size- hangdata length) */
5919 			if (!(offset <= fw_mem[i].size - length))
5920 				goto exit;
5921 
5922 			hang_data_va = fw_mem[i].va + offset;
5923 			hang_event.hang_event_data = kmemdup(hang_data_va,
5924 							     length,
5925 							     GFP_ATOMIC);
5926 			if (!hang_event.hang_event_data) {
5927 				cnss_pr_dbg("Hang data memory alloc failed\n");
5928 				return;
5929 			}
5930 			hang_event.hang_event_data_len = length;
5931 			break;
5932 		}
5933 	}
5934 
5935 	cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
5936 
5937 	kfree(hang_event.hang_event_data);
5938 	hang_event.hang_event_data = NULL;
5939 	return;
5940 exit:
5941 	cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
5942 		    plat_priv->hang_data_addr_offset,
5943 		    plat_priv->hang_event_data_len);
5944 }
5945 
5946 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
5947 void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv)
5948 {
5949 	struct cnss_ssr_driver_dump_entry *ssr_entry;
5950 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5951 	size_t num_entries_loaded = 0;
5952 	int x;
5953 	int ret = -1;
5954 
5955 	ssr_entry = kmalloc(sizeof(*ssr_entry) * CNSS_HOST_DUMP_TYPE_MAX, GFP_KERNEL);
5956 	if (!ssr_entry) {
5957 		cnss_pr_err("ssr_entry malloc failed");
5958 		return;
5959 	}
5960 
5961 	if (pci_priv->driver_ops &&
5962 	    pci_priv->driver_ops->collect_driver_dump) {
5963 		ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev,
5964 								ssr_entry,
5965 								&num_entries_loaded);
5966 	}
5967 
5968 	if (!ret) {
5969 		for (x = 0; x < num_entries_loaded; x++) {
5970 			cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n",
5971 				     x, ssr_entry[x].buffer_pointer,
5972 				     ssr_entry[x].region_name,
5973 				     ssr_entry[x].buffer_size);
5974 		}
5975 
5976 		cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded);
5977 	} else {
5978 		cnss_pr_info("Host SSR elf dump collection feature disabled\n");
5979 	}
5980 
5981 	kfree(ssr_entry);
5982 }
5983 #endif
5984 
5985 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
5986 {
5987 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5988 	struct cnss_dump_data *dump_data =
5989 		&plat_priv->ramdump_info_v2.dump_data;
5990 	struct cnss_dump_seg *dump_seg =
5991 		plat_priv->ramdump_info_v2.dump_data_vaddr;
5992 	struct image_info *fw_image, *rddm_image;
5993 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
5994 	int ret, i, j;
5995 
5996 	if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
5997 	    !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
5998 		cnss_pci_send_hang_event(pci_priv);
5999 
6000 	if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
6001 		cnss_pr_dbg("RAM dump is already collected, skip\n");
6002 		return;
6003 	}
6004 
6005 	if (!cnss_is_device_powered_on(plat_priv)) {
6006 		cnss_pr_dbg("Device is already powered off, skip\n");
6007 		return;
6008 	}
6009 
6010 	if (!in_panic) {
6011 		mutex_lock(&pci_priv->bus_lock);
6012 		ret = cnss_pci_check_link_status(pci_priv);
6013 		if (ret) {
6014 			if (ret != -EACCES) {
6015 				mutex_unlock(&pci_priv->bus_lock);
6016 				return;
6017 			}
6018 			if (cnss_pci_resume_bus(pci_priv)) {
6019 				mutex_unlock(&pci_priv->bus_lock);
6020 				return;
6021 			}
6022 		}
6023 		mutex_unlock(&pci_priv->bus_lock);
6024 	} else {
6025 		if (cnss_pci_check_link_status(pci_priv))
6026 			return;
6027 		/* Inside panic handler, reduce timeout for RDDM to avoid
6028 		 * unnecessary hypervisor watchdog bite.
6029 		 */
6030 		pci_priv->mhi_ctrl->timeout_ms /= 2;
6031 	}
6032 
6033 	cnss_mhi_debug_reg_dump(pci_priv);
6034 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6035 	cnss_pci_dump_misc_reg(pci_priv);
6036 
6037 	cnss_rddm_trigger_debug(pci_priv);
6038 	ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
6039 	if (ret) {
6040 		cnss_fatal_err("Failed to download RDDM image, err = %d\n",
6041 			       ret);
6042 		if (!cnss_pci_assert_host_sol(pci_priv))
6043 			return;
6044 		cnss_rddm_trigger_check(pci_priv);
6045 		cnss_pci_dump_debug_reg(pci_priv);
6046 		return;
6047 	}
6048 	cnss_rddm_trigger_check(pci_priv);
6049 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6050 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6051 	dump_data->nentries = 0;
6052 
6053 	if (plat_priv->qdss_mem_seg_len)
6054 		cnss_pci_dump_qdss_reg(pci_priv);
6055 	cnss_mhi_dump_sfr(pci_priv);
6056 
6057 	if (!dump_seg) {
6058 		cnss_pr_warn("FW image dump collection not setup");
6059 		goto skip_dump;
6060 	}
6061 
6062 	cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
6063 		    fw_image->entries);
6064 
6065 	for (i = 0; i < fw_image->entries; i++) {
6066 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6067 				      fw_image->mhi_buf[i].buf,
6068 				      fw_image->mhi_buf[i].dma_addr,
6069 				      fw_image->mhi_buf[i].len);
6070 		dump_seg++;
6071 	}
6072 
6073 	dump_data->nentries += fw_image->entries;
6074 
6075 	cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
6076 		    rddm_image->entries);
6077 
6078 	for (i = 0; i < rddm_image->entries; i++) {
6079 		cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6080 				      rddm_image->mhi_buf[i].buf,
6081 				      rddm_image->mhi_buf[i].dma_addr,
6082 				      rddm_image->mhi_buf[i].len);
6083 		dump_seg++;
6084 	}
6085 
6086 	dump_data->nentries += rddm_image->entries;
6087 
6088 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6089 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
6090 			if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
6091 				cnss_pr_dbg("Collect remote heap dump segment\n");
6092 				cnss_pci_add_dump_seg(pci_priv, dump_seg,
6093 						      CNSS_FW_REMOTE_HEAP, j,
6094 						      fw_mem[i].va,
6095 						      fw_mem[i].pa,
6096 						      fw_mem[i].size);
6097 				dump_seg++;
6098 				dump_data->nentries++;
6099 				j++;
6100 			} else {
6101 				cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
6102 			}
6103 		}
6104 	}
6105 
6106 	if (dump_data->nentries > 0)
6107 		plat_priv->ramdump_info_v2.dump_data_valid = true;
6108 
6109 	cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
6110 
6111 skip_dump:
6112 	complete(&plat_priv->rddm_complete);
6113 }
6114 
6115 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
6116 {
6117 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6118 	struct cnss_dump_seg *dump_seg =
6119 		plat_priv->ramdump_info_v2.dump_data_vaddr;
6120 	struct image_info *fw_image, *rddm_image;
6121 	struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6122 	int i, j;
6123 
6124 	if (!dump_seg)
6125 		return;
6126 
6127 	fw_image = pci_priv->mhi_ctrl->fbc_image;
6128 	rddm_image = pci_priv->mhi_ctrl->rddm_image;
6129 
6130 	for (i = 0; i < fw_image->entries; i++) {
6131 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6132 					 fw_image->mhi_buf[i].buf,
6133 					 fw_image->mhi_buf[i].dma_addr,
6134 					 fw_image->mhi_buf[i].len);
6135 		dump_seg++;
6136 	}
6137 
6138 	for (i = 0; i < rddm_image->entries; i++) {
6139 		cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6140 					 rddm_image->mhi_buf[i].buf,
6141 					 rddm_image->mhi_buf[i].dma_addr,
6142 					 rddm_image->mhi_buf[i].len);
6143 		dump_seg++;
6144 	}
6145 
6146 	for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6147 		if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
6148 		    (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
6149 			cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6150 						 CNSS_FW_REMOTE_HEAP, j,
6151 						 fw_mem[i].va, fw_mem[i].pa,
6152 						 fw_mem[i].size);
6153 			dump_seg++;
6154 			j++;
6155 		}
6156 	}
6157 
6158 	plat_priv->ramdump_info_v2.dump_data.nentries = 0;
6159 	plat_priv->ramdump_info_v2.dump_data_valid = false;
6160 }
6161 
6162 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
6163 {
6164 	struct cnss_plat_data *plat_priv;
6165 
6166 	if (!pci_priv) {
6167 		cnss_pr_err("pci_priv is NULL\n");
6168 		return;
6169 	}
6170 
6171 	plat_priv = pci_priv->plat_priv;
6172 	if (!plat_priv) {
6173 		cnss_pr_err("plat_priv is NULL\n");
6174 		return;
6175 	}
6176 
6177 	if (plat_priv->recovery_enabled)
6178 		cnss_pci_collect_host_dump_info(pci_priv);
6179 
6180 	/* Call recovery handler in the DRIVER_RECOVERY event context
6181 	 * instead of scheduling work. In that way complete recovery
6182 	 * will be done as part of DRIVER_RECOVERY event and get
6183 	 * serialized with other events.
6184 	 */
6185 	cnss_recovery_handler(plat_priv);
6186 }
6187 
6188 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
6189 {
6190 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6191 
6192 	return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
6193 }
6194 
6195 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
6196 {
6197 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6198 
6199 	cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
6200 }
6201 
6202 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
6203 				 char *prefix_name, char *name)
6204 {
6205 	struct cnss_plat_data *plat_priv;
6206 
6207 	if (!pci_priv)
6208 		return;
6209 
6210 	plat_priv = pci_priv->plat_priv;
6211 
6212 	if (!plat_priv->use_fw_path_with_prefix) {
6213 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6214 		return;
6215 	}
6216 
6217 	switch (pci_priv->device_id) {
6218 	case QCN7605_DEVICE_ID:
6219 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6220 			  QCN7605_PATH_PREFIX "%s", name);
6221 		break;
6222 	case QCA6390_DEVICE_ID:
6223 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6224 			  QCA6390_PATH_PREFIX "%s", name);
6225 		break;
6226 	case QCA6490_DEVICE_ID:
6227 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6228 			  QCA6490_PATH_PREFIX "%s", name);
6229 		break;
6230 	case KIWI_DEVICE_ID:
6231 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6232 			  KIWI_PATH_PREFIX "%s", name);
6233 		break;
6234 	case MANGO_DEVICE_ID:
6235 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6236 			  MANGO_PATH_PREFIX "%s", name);
6237 		break;
6238 	case PEACH_DEVICE_ID:
6239 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6240 			  PEACH_PATH_PREFIX "%s", name);
6241 		break;
6242 	default:
6243 		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6244 		break;
6245 	}
6246 
6247 	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
6248 }
6249 
6250 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
6251 {
6252 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6253 
6254 	switch (pci_priv->device_id) {
6255 	case QCA6390_DEVICE_ID:
6256 		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
6257 			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
6258 				    pci_priv->device_id,
6259 				    plat_priv->device_version.major_version);
6260 			return -EINVAL;
6261 		}
6262 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6263 					    FW_V2_FILE_NAME);
6264 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6265 			 FW_V2_FILE_NAME);
6266 		break;
6267 	case QCA6490_DEVICE_ID:
6268 		switch (plat_priv->device_version.major_version) {
6269 		case FW_V2_NUMBER:
6270 				cnss_pci_add_fw_prefix_name(pci_priv,
6271 							    plat_priv->firmware_name,
6272 							    FW_V2_FILE_NAME);
6273 				snprintf(plat_priv->fw_fallback_name,
6274 					 MAX_FIRMWARE_NAME_LEN,
6275 					 FW_V2_FILE_NAME);
6276 			break;
6277 		default:
6278 			cnss_pci_add_fw_prefix_name(pci_priv,
6279 						    plat_priv->firmware_name,
6280 						    DEFAULT_FW_FILE_NAME);
6281 			snprintf(plat_priv->fw_fallback_name,
6282 				 MAX_FIRMWARE_NAME_LEN,
6283 				 DEFAULT_FW_FILE_NAME);
6284 			break;
6285 		}
6286 		break;
6287 	case KIWI_DEVICE_ID:
6288 	case MANGO_DEVICE_ID:
6289 	case PEACH_DEVICE_ID:
6290 		switch (plat_priv->device_version.major_version) {
6291 		case FW_V2_NUMBER:
6292 			/*
6293 			 * kiwiv2 using seprate fw binary for MM and FTM mode,
6294 			 * platform driver loads corresponding binary according
6295 			 * to current mode indicated by wlan driver. Otherwise
6296 			 * use default binary.
6297 			 * Mission mode using same binary name as before,
6298 			 * if seprate binary is not there, fall back to default.
6299 			 */
6300 			if (plat_priv->driver_mode == CNSS_MISSION) {
6301 				cnss_pci_add_fw_prefix_name(pci_priv,
6302 							    plat_priv->firmware_name,
6303 							    FW_V2_FILE_NAME);
6304 				cnss_pci_add_fw_prefix_name(pci_priv,
6305 							    plat_priv->fw_fallback_name,
6306 							    FW_V2_FILE_NAME);
6307 			} else if (plat_priv->driver_mode == CNSS_FTM) {
6308 				cnss_pci_add_fw_prefix_name(pci_priv,
6309 							    plat_priv->firmware_name,
6310 							    FW_V2_FTM_FILE_NAME);
6311 				cnss_pci_add_fw_prefix_name(pci_priv,
6312 							    plat_priv->fw_fallback_name,
6313 							    FW_V2_FILE_NAME);
6314 			} else {
6315 				/*
6316 				 * Since during cold boot calibration phase,
6317 				 * wlan driver has not registered, so default
6318 				 * fw binary will be used.
6319 				 */
6320 				cnss_pci_add_fw_prefix_name(pci_priv,
6321 							    plat_priv->firmware_name,
6322 							    FW_V2_FILE_NAME);
6323 				snprintf(plat_priv->fw_fallback_name,
6324 					 MAX_FIRMWARE_NAME_LEN,
6325 					 FW_V2_FILE_NAME);
6326 			}
6327 			break;
6328 		default:
6329 			cnss_pci_add_fw_prefix_name(pci_priv,
6330 						    plat_priv->firmware_name,
6331 						    DEFAULT_FW_FILE_NAME);
6332 			snprintf(plat_priv->fw_fallback_name,
6333 				 MAX_FIRMWARE_NAME_LEN,
6334 				 DEFAULT_FW_FILE_NAME);
6335 			break;
6336 		}
6337 		break;
6338 	default:
6339 		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6340 					    DEFAULT_FW_FILE_NAME);
6341 		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6342 			 DEFAULT_FW_FILE_NAME);
6343 		break;
6344 	}
6345 
6346 	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
6347 		    plat_priv->firmware_name, plat_priv->fw_fallback_name);
6348 
6349 	return 0;
6350 }
6351 
6352 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
6353 {
6354 	switch (status) {
6355 	case MHI_CB_IDLE:
6356 		return "IDLE";
6357 	case MHI_CB_EE_RDDM:
6358 		return "RDDM";
6359 	case MHI_CB_SYS_ERROR:
6360 		return "SYS_ERROR";
6361 	case MHI_CB_FATAL_ERROR:
6362 		return "FATAL_ERROR";
6363 	case MHI_CB_EE_MISSION_MODE:
6364 		return "MISSION_MODE";
6365 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6366 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6367 	case MHI_CB_FALLBACK_IMG:
6368 		return "FW_FALLBACK";
6369 #endif
6370 	default:
6371 		return "UNKNOWN";
6372 	}
6373 };
6374 
6375 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
6376 {
6377 	struct cnss_pci_data *pci_priv =
6378 		from_timer(pci_priv, t, dev_rddm_timer);
6379 	enum mhi_ee_type mhi_ee;
6380 
6381 	if (!pci_priv)
6382 		return;
6383 
6384 	cnss_fatal_err("Timeout waiting for RDDM notification\n");
6385 
6386 	if (!cnss_pci_assert_host_sol(pci_priv))
6387 		return;
6388 
6389 	mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6390 	if (mhi_ee == MHI_EE_PBL)
6391 		cnss_pr_err("Unable to collect ramdumps due to abrupt reset\n");
6392 
6393 	if (mhi_ee == MHI_EE_RDDM) {
6394 		cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
6395 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6396 				       CNSS_REASON_RDDM);
6397 	} else {
6398 		cnss_mhi_debug_reg_dump(pci_priv);
6399 		cnss_pci_soc_scratch_reg_dump(pci_priv);
6400 		cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6401 				       CNSS_REASON_TIMEOUT);
6402 	}
6403 }
6404 
6405 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
6406 {
6407 	struct cnss_pci_data *pci_priv =
6408 		from_timer(pci_priv, t, boot_debug_timer);
6409 
6410 	if (!pci_priv)
6411 		return;
6412 
6413 	if (cnss_pci_check_link_status(pci_priv))
6414 		return;
6415 
6416 	if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
6417 		return;
6418 
6419 	if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
6420 		return;
6421 
6422 	if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
6423 		return;
6424 
6425 	cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
6426 		    BOOT_DEBUG_TIMEOUT_MS / 1000);
6427 	cnss_mhi_debug_reg_dump(pci_priv);
6428 	cnss_pci_soc_scratch_reg_dump(pci_priv);
6429 	cnss_pci_dump_bl_sram_mem(pci_priv);
6430 
6431 	mod_timer(&pci_priv->boot_debug_timer,
6432 		  jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
6433 }
6434 
6435 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
6436 {
6437 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6438 
6439 	cnss_ignore_qmi_failure(true);
6440 	set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6441 	del_timer(&plat_priv->fw_boot_timer);
6442 	mod_timer(&pci_priv->dev_rddm_timer,
6443 		  jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6444 	cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6445 
6446 	return 0;
6447 }
6448 
6449 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
6450 {
6451 	return cnss_pci_handle_mhi_sys_err(pci_priv);
6452 }
6453 
6454 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
6455 				   enum mhi_callback reason)
6456 {
6457 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6458 	struct cnss_plat_data *plat_priv;
6459 	enum cnss_recovery_reason cnss_reason;
6460 
6461 	if (!pci_priv) {
6462 		cnss_pr_err("pci_priv is NULL");
6463 		return;
6464 	}
6465 
6466 	plat_priv = pci_priv->plat_priv;
6467 
6468 	if (reason != MHI_CB_IDLE)
6469 		cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
6470 			    cnss_mhi_notify_status_to_str(reason), reason);
6471 
6472 	switch (reason) {
6473 	case MHI_CB_IDLE:
6474 	case MHI_CB_EE_MISSION_MODE:
6475 		return;
6476 	case MHI_CB_FATAL_ERROR:
6477 		cnss_ignore_qmi_failure(true);
6478 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6479 		del_timer(&plat_priv->fw_boot_timer);
6480 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6481 		cnss_reason = CNSS_REASON_DEFAULT;
6482 		break;
6483 	case MHI_CB_SYS_ERROR:
6484 		cnss_pci_handle_mhi_sys_err(pci_priv);
6485 		return;
6486 	case MHI_CB_EE_RDDM:
6487 		cnss_ignore_qmi_failure(true);
6488 		set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6489 		del_timer(&plat_priv->fw_boot_timer);
6490 		del_timer(&pci_priv->dev_rddm_timer);
6491 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6492 		cnss_reason = CNSS_REASON_RDDM;
6493 		break;
6494 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6495 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6496 	case MHI_CB_FALLBACK_IMG:
6497 		/* for kiwi_v2 binary fallback is used, skip path fallback here */
6498 		if (!(pci_priv->device_id == KIWI_DEVICE_ID &&
6499 		      plat_priv->device_version.major_version == FW_V2_NUMBER)) {
6500 			plat_priv->use_fw_path_with_prefix = false;
6501 			cnss_pci_update_fw_name(pci_priv);
6502 		}
6503 		return;
6504 #endif
6505 
6506 	default:
6507 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
6508 		return;
6509 	}
6510 
6511 	cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
6512 }
6513 
6514 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
6515 {
6516 	int ret, num_vectors, i;
6517 	u32 user_base_data, base_vector;
6518 	int *irq;
6519 	unsigned int msi_data;
6520 	bool is_one_msi = false;
6521 
6522 	ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
6523 					   MHI_MSI_NAME, &num_vectors,
6524 					   &user_base_data, &base_vector);
6525 	if (ret)
6526 		return ret;
6527 
6528 	if (cnss_pci_is_one_msi(pci_priv)) {
6529 		is_one_msi = true;
6530 		num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
6531 	}
6532 	cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
6533 		    num_vectors, base_vector);
6534 
6535 	irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
6536 	if (!irq)
6537 		return -ENOMEM;
6538 
6539 	for (i = 0; i < num_vectors; i++) {
6540 		msi_data = base_vector;
6541 		if (!is_one_msi)
6542 			msi_data += i;
6543 		irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
6544 	}
6545 
6546 	pci_priv->mhi_ctrl->irq = irq;
6547 	pci_priv->mhi_ctrl->nr_irqs = num_vectors;
6548 
6549 	return 0;
6550 }
6551 
6552 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
6553 			     struct mhi_link_info *link_info)
6554 {
6555 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6556 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6557 	int ret = 0;
6558 
6559 	cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
6560 		    link_info->target_link_speed,
6561 		    link_info->target_link_width);
6562 
6563 	/* It has to set target link speed here before setting link bandwidth
6564 	 * when device requests link speed change. This can avoid setting link
6565 	 * bandwidth getting rejected if requested link speed is higher than
6566 	 * current one.
6567 	 */
6568 	ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
6569 					  link_info->target_link_speed);
6570 	if (ret)
6571 		cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
6572 			    link_info->target_link_speed, ret);
6573 
6574 	ret = cnss_pci_set_link_bandwidth(pci_priv,
6575 					  link_info->target_link_speed,
6576 					  link_info->target_link_width);
6577 
6578 	if (ret) {
6579 		cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
6580 		return ret;
6581 	}
6582 
6583 	pci_priv->def_link_speed = link_info->target_link_speed;
6584 	pci_priv->def_link_width = link_info->target_link_width;
6585 
6586 	return 0;
6587 }
6588 
6589 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
6590 			     void __iomem *addr, u32 *out)
6591 {
6592 	struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6593 
6594 	u32 tmp = readl_relaxed(addr);
6595 
6596 	/* Unexpected value, query the link status */
6597 	if (PCI_INVALID_READ(tmp) &&
6598 	    cnss_pci_check_link_status(pci_priv))
6599 		return -EIO;
6600 
6601 	*out = tmp;
6602 
6603 	return 0;
6604 }
6605 
6606 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
6607 			       void __iomem *addr, u32 val)
6608 {
6609 	writel_relaxed(val, addr);
6610 }
6611 
6612 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
6613 				 struct mhi_controller *mhi_ctrl)
6614 {
6615 	int ret = 0;
6616 
6617 	ret = mhi_get_soc_info(mhi_ctrl);
6618 	if (ret)
6619 		goto exit;
6620 
6621 	plat_priv->device_version.family_number = mhi_ctrl->family_number;
6622 	plat_priv->device_version.device_number = mhi_ctrl->device_number;
6623 	plat_priv->device_version.major_version = mhi_ctrl->major_version;
6624 	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
6625 
6626 	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
6627 		    plat_priv->device_version.family_number,
6628 		    plat_priv->device_version.device_number,
6629 		    plat_priv->device_version.major_version,
6630 		    plat_priv->device_version.minor_version);
6631 
6632 	/* Only keep lower 4 bits as real device major version */
6633 	plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
6634 
6635 exit:
6636 	return ret;
6637 }
6638 
6639 static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv)
6640 {
6641 	if (!pci_priv) {
6642 		cnss_pr_dbg("pci_priv is NULL");
6643 		return false;
6644 	}
6645 
6646 	switch (pci_priv->device_id) {
6647 	case PEACH_DEVICE_ID:
6648 		return true;
6649 	default:
6650 		return false;
6651 	}
6652 }
6653 
6654 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
6655 {
6656 	int ret = 0;
6657 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6658 	struct pci_dev *pci_dev = pci_priv->pci_dev;
6659 	struct mhi_controller *mhi_ctrl;
6660 	phys_addr_t bar_start;
6661 	const struct mhi_controller_config *cnss_mhi_config =
6662 						&cnss_mhi_config_default;
6663 
6664 	ret = cnss_qmi_init(plat_priv);
6665 	if (ret)
6666 		return -EINVAL;
6667 
6668 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6669 		return 0;
6670 
6671 	mhi_ctrl = mhi_alloc_controller();
6672 	if (!mhi_ctrl) {
6673 		cnss_pr_err("Invalid MHI controller context\n");
6674 		return -EINVAL;
6675 	}
6676 
6677 	pci_priv->mhi_ctrl = mhi_ctrl;
6678 	mhi_ctrl->cntrl_dev = &pci_dev->dev;
6679 
6680 	mhi_ctrl->fw_image = plat_priv->firmware_name;
6681 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6682 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6683 	mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
6684 #endif
6685 
6686 	mhi_ctrl->regs = pci_priv->bar;
6687 	mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
6688 	bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
6689 	cnss_pr_dbg("BAR starts at %pa, length is %x\n",
6690 		    &bar_start, mhi_ctrl->reg_len);
6691 
6692 	ret = cnss_pci_get_mhi_msi(pci_priv);
6693 	if (ret) {
6694 		cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
6695 		goto free_mhi_ctrl;
6696 	}
6697 
6698 	if (cnss_pci_is_one_msi(pci_priv))
6699 		mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
6700 
6701 	if (pci_priv->smmu_s1_enable) {
6702 		mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
6703 		mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
6704 					pci_priv->smmu_iova_len;
6705 	} else {
6706 		mhi_ctrl->iova_start = 0;
6707 		mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
6708 	}
6709 
6710 	mhi_ctrl->status_cb = cnss_mhi_notify_status;
6711 	mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
6712 	mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
6713 	mhi_ctrl->read_reg = cnss_mhi_read_reg;
6714 	mhi_ctrl->write_reg = cnss_mhi_write_reg;
6715 
6716 	mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
6717 	if (!mhi_ctrl->rddm_size)
6718 		mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
6719 
6720 	if (plat_priv->device_id == QCN7605_DEVICE_ID)
6721 		mhi_ctrl->sbl_size = SZ_256K;
6722 	else
6723 		mhi_ctrl->sbl_size = SZ_512K;
6724 
6725 	mhi_ctrl->seg_len = SZ_512K;
6726 	mhi_ctrl->fbc_download = true;
6727 
6728 	ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
6729 	if (ret)
6730 		goto free_mhi_irq;
6731 
6732 	/* Satellite config only supported on KIWI V2 and later chipset */
6733 	if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
6734 			(plat_priv->device_id == KIWI_DEVICE_ID &&
6735 			 plat_priv->device_version.major_version == 1)) {
6736 		if (plat_priv->device_id == QCN7605_DEVICE_ID)
6737 			cnss_mhi_config = &cnss_mhi_config_genoa;
6738 		else
6739 			cnss_mhi_config = &cnss_mhi_config_no_satellite;
6740 	}
6741 
6742 	mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv);
6743 
6744 	ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
6745 	if (ret) {
6746 		cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
6747 		goto free_mhi_irq;
6748 	}
6749 
6750 	/* MHI satellite driver only needs to connect when DRV is supported */
6751 	if (cnss_pci_get_drv_supported(pci_priv))
6752 		cnss_mhi_controller_set_base(pci_priv, bar_start);
6753 
6754 	cnss_get_bwscal_info(plat_priv);
6755 	cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
6756 
6757 	/* BW scale CB needs to be set after registering MHI per requirement */
6758 	if (!plat_priv->no_bwscale)
6759 		cnss_mhi_controller_set_bw_scale_cb(pci_priv,
6760 						    cnss_mhi_bw_scale);
6761 
6762 	ret = cnss_pci_update_fw_name(pci_priv);
6763 	if (ret)
6764 		goto unreg_mhi;
6765 
6766 	return 0;
6767 
6768 unreg_mhi:
6769 	mhi_unregister_controller(mhi_ctrl);
6770 free_mhi_irq:
6771 	kfree(mhi_ctrl->irq);
6772 free_mhi_ctrl:
6773 	mhi_free_controller(mhi_ctrl);
6774 
6775 	return ret;
6776 }
6777 
6778 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
6779 {
6780 	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
6781 
6782 	if (pci_priv->device_id == QCA6174_DEVICE_ID)
6783 		return;
6784 
6785 	mhi_unregister_controller(mhi_ctrl);
6786 	kfree(mhi_ctrl->irq);
6787 	mhi_ctrl->irq = NULL;
6788 	mhi_free_controller(mhi_ctrl);
6789 	pci_priv->mhi_ctrl = NULL;
6790 }
6791 
6792 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
6793 {
6794 	switch (pci_priv->device_id) {
6795 	case QCA6390_DEVICE_ID:
6796 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
6797 		pci_priv->wcss_reg = wcss_reg_access_seq;
6798 		pci_priv->pcie_reg = pcie_reg_access_seq;
6799 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6800 		pci_priv->syspm_reg = syspm_reg_access_seq;
6801 
6802 		/* Configure WDOG register with specific value so that we can
6803 		 * know if HW is in the process of WDOG reset recovery or not
6804 		 * when reading the registers.
6805 		 */
6806 		cnss_pci_reg_write
6807 		(pci_priv,
6808 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
6809 		QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
6810 		break;
6811 	case QCA6490_DEVICE_ID:
6812 		pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
6813 		pci_priv->wlaon_reg = wlaon_reg_access_seq;
6814 		break;
6815 	default:
6816 		return;
6817 	}
6818 }
6819 
6820 #if !IS_ENABLED(CONFIG_ARCH_QCOM)
6821 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
6822 {
6823 	return 0;
6824 }
6825 
6826 static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
6827 {
6828 	struct cnss_pci_data *pci_priv = data;
6829 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6830 	enum rpm_status status;
6831 	struct device *dev;
6832 
6833 	pci_priv->wake_counter++;
6834 	cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
6835 		    pci_priv->wake_irq, pci_priv->wake_counter);
6836 
6837 	/* Make sure abort current suspend */
6838 	cnss_pm_stay_awake(plat_priv);
6839 	cnss_pm_relax(plat_priv);
6840 	/* Above two pm* API calls will abort system suspend only when
6841 	 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
6842 	 * calling pm_system_wakeup() is just to guarantee system suspend
6843 	 * can be aborted if it is not initiated in any case.
6844 	 */
6845 	pm_system_wakeup();
6846 
6847 	dev = &pci_priv->pci_dev->dev;
6848 	status = dev->power.runtime_status;
6849 
6850 	if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
6851 	     cnss_pci_get_auto_suspended(pci_priv)) ||
6852 	    (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
6853 		cnss_pci_set_monitor_wake_intr(pci_priv, false);
6854 		cnss_pci_pm_request_resume(pci_priv);
6855 	}
6856 
6857 	return IRQ_HANDLED;
6858 }
6859 
6860 /**
6861  * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
6862  * @pci_priv: driver PCI bus context pointer
6863  *
6864  * This function initializes WLAN PCI wake GPIO and corresponding
6865  * interrupt. It should be used in non-MSM platforms whose PCIe
6866  * root complex driver doesn't handle the GPIO.
6867  *
6868  * Return: 0 for success or skip, negative value for error
6869  */
6870 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
6871 {
6872 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6873 	struct device *dev = &plat_priv->plat_dev->dev;
6874 	int ret = 0;
6875 
6876 	pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
6877 						"wlan-pci-wake-gpio", 0);
6878 	if (pci_priv->wake_gpio < 0)
6879 		goto out;
6880 
6881 	cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
6882 		    pci_priv->wake_gpio);
6883 
6884 	ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
6885 	if (ret) {
6886 		cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
6887 			    ret);
6888 		goto out;
6889 	}
6890 
6891 	gpio_direction_input(pci_priv->wake_gpio);
6892 	pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
6893 
6894 	ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
6895 			  IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
6896 	if (ret) {
6897 		cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
6898 		goto free_gpio;
6899 	}
6900 
6901 	ret = enable_irq_wake(pci_priv->wake_irq);
6902 	if (ret) {
6903 		cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
6904 		goto free_irq;
6905 	}
6906 
6907 	return 0;
6908 
6909 free_irq:
6910 	free_irq(pci_priv->wake_irq, pci_priv);
6911 free_gpio:
6912 	gpio_free(pci_priv->wake_gpio);
6913 out:
6914 	return ret;
6915 }
6916 
6917 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
6918 {
6919 	if (pci_priv->wake_gpio < 0)
6920 		return;
6921 
6922 	disable_irq_wake(pci_priv->wake_irq);
6923 	free_irq(pci_priv->wake_irq, pci_priv);
6924 	gpio_free(pci_priv->wake_gpio);
6925 }
6926 #endif
6927 
6928 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
6929 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
6930 {
6931 	int ret = 0;
6932 
6933 	/* in the dual wlan card case, if call pci_register_driver after
6934 	 * finishing the first pcie device enumeration, it will cause
6935 	 * the cnss_pci_probe called in advance with the second wlan card,
6936 	 * and the sequence like this:
6937 	 * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe
6938 	 * -> exit msm_pcie_enumerate.
6939 	 * But the correct sequence we expected is like this:
6940 	 * enter msm_pcie_enumerate -> pci_bus_add_devices  ->
6941 	 * exit msm_pcie_enumerate -> cnss_pci_probe.
6942 	 * And this unexpected sequence will make the second wlan card do
6943 	 * pcie link suspend while the pcie enumeration not finished.
6944 	 * So need to add below logical to avoid doing pcie link suspend
6945 	 * if the enumeration has not finish.
6946 	 */
6947 	plat_priv->enumerate_done = true;
6948 
6949 	/* Now enumeration is finished, try to suspend PCIe link */
6950 	if (plat_priv->bus_priv) {
6951 		struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
6952 		struct pci_dev *pci_dev = pci_priv->pci_dev;
6953 
6954 		switch (pci_dev->device) {
6955 		case QCA6390_DEVICE_ID:
6956 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv,
6957 						    false,
6958 						    true,
6959 						    false);
6960 
6961 			cnss_pci_suspend_pwroff(pci_dev);
6962 			break;
6963 		default:
6964 			cnss_pr_err("Unknown PCI device found: 0x%x\n",
6965 				    pci_dev->device);
6966 			ret = -ENODEV;
6967 		}
6968 	}
6969 
6970 	return ret;
6971 }
6972 #else
6973 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
6974 {
6975 	return 0;
6976 }
6977 #endif
6978 
6979 /* Setting to use this cnss_pm_domain ops will let PM framework override the
6980  * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
6981  * has to take care everything device driver needed which is currently done
6982  * from pci_dev_pm_ops.
6983  */
6984 static struct dev_pm_domain cnss_pm_domain = {
6985 	.ops = {
6986 		SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
6987 		SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
6988 					      cnss_pci_resume_noirq)
6989 		SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
6990 				   cnss_pci_runtime_resume,
6991 				   cnss_pci_runtime_idle)
6992 	}
6993 };
6994 
6995 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
6996 {
6997 	struct device_node *child;
6998 	u32 id, i;
6999 	int id_n, ret;
7000 
7001 	if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
7002 		return 0;
7003 
7004 	if (!plat_priv->device_id) {
7005 		cnss_pr_err("Invalid device id\n");
7006 		return -EINVAL;
7007 	}
7008 
7009 	for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
7010 					 child) {
7011 		if (strcmp(child->name, "chip_cfg"))
7012 			continue;
7013 
7014 		id_n = of_property_count_u32_elems(child, "supported-ids");
7015 		if (id_n <= 0) {
7016 			cnss_pr_err("Device id is NOT set\n");
7017 			return -EINVAL;
7018 		}
7019 
7020 		for (i = 0; i < id_n; i++) {
7021 			ret = of_property_read_u32_index(child,
7022 							 "supported-ids",
7023 							 i, &id);
7024 			if (ret) {
7025 				cnss_pr_err("Failed to read supported ids\n");
7026 				return -EINVAL;
7027 			}
7028 
7029 			if (id == plat_priv->device_id) {
7030 				plat_priv->dev_node = child;
7031 				cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
7032 					    child->name, i, id);
7033 				return 0;
7034 			}
7035 		}
7036 	}
7037 
7038 	return -EINVAL;
7039 }
7040 
7041 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
7042 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7043 {
7044 	bool suspend_pwroff;
7045 
7046 	switch (pci_dev->device) {
7047 	case QCA6390_DEVICE_ID:
7048 	case QCA6490_DEVICE_ID:
7049 		suspend_pwroff = false;
7050 		break;
7051 	default:
7052 		suspend_pwroff = true;
7053 	}
7054 
7055 	return suspend_pwroff;
7056 }
7057 #else
7058 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7059 {
7060 	return true;
7061 }
7062 #endif
7063 
7064 static int cnss_pci_set_gen2_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7065 {
7066 	int ret;
7067 
7068 	/* Always set initial target PCIe link speed to Gen2 for QCA6490 device
7069 	 * since there may be link issues if it boots up with Gen3 link speed.
7070 	 * Device is able to change it later at any time. It will be rejected
7071 	 * if requested speed is higher than the one specified in PCIe DT.
7072 	 */
7073 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7074 					  PCI_EXP_LNKSTA_CLS_5_0GB);
7075 	if (ret && ret != -EPROBE_DEFER)
7076 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
7077 				rc_num, ret);
7078 
7079 	return ret;
7080 }
7081 
7082 #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED
7083 static void
7084 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7085 {
7086 	int ret;
7087 
7088 	ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7089 					  PCI_EXP_LNKSTA_CLS_2_5GB);
7090 	if (ret)
7091 		cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n",
7092 			     rc_num, ret);
7093 }
7094 
7095 static void
7096 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7097 {
7098 	int ret;
7099 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7100 
7101 	/* if not Genoa, do not restore rc speed */
7102 	if (pci_priv->device_id == QCA6490_DEVICE_ID) {
7103 		cnss_pci_set_gen2_speed(plat_priv, plat_priv->rc_num);
7104 	} else if (pci_priv->device_id != QCN7605_DEVICE_ID) {
7105 		/* The request 0 will reset maximum GEN speed to default */
7106 		ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, 0);
7107 		if (ret)
7108 			cnss_pr_err("Failed to reset max PCIe RC%x link speed to default, err = %d\n",
7109 				     plat_priv->rc_num, ret);
7110 	}
7111 }
7112 
7113 static void
7114 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7115 {
7116 	int ret;
7117 
7118 	/* suspend/resume will trigger retain to re-establish link speed */
7119 	ret = cnss_suspend_pci_link(pci_priv);
7120 	if (ret)
7121 		cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
7122 
7123 	ret = cnss_resume_pci_link(pci_priv);
7124 	if (ret)
7125 		cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
7126 
7127 	cnss_pci_get_link_status(pci_priv);
7128 }
7129 #else
7130 static void
7131 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7132 {
7133 }
7134 
7135 static void
7136 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7137 {
7138 }
7139 
7140 static void
7141 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7142 {
7143 }
7144 #endif
7145 
7146 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
7147 {
7148 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7149 	int rc_num = pci_dev->bus->domain_nr;
7150 	struct cnss_plat_data *plat_priv;
7151 	int ret = 0;
7152 	bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
7153 
7154 	plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7155 
7156 	if (suspend_pwroff) {
7157 		ret = cnss_suspend_pci_link(pci_priv);
7158 		if (ret)
7159 			cnss_pr_err("Failed to suspend PCI link, err = %d\n",
7160 				    ret);
7161 		cnss_power_off_device(plat_priv);
7162 	} else {
7163 		cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
7164 			    pci_dev->device);
7165 		cnss_pci_link_retrain_trigger(pci_priv);
7166 	}
7167 }
7168 
7169 static int cnss_pci_probe(struct pci_dev *pci_dev,
7170 			  const struct pci_device_id *id)
7171 {
7172 	int ret = 0;
7173 	struct cnss_pci_data *pci_priv;
7174 	struct device *dev = &pci_dev->dev;
7175 	int rc_num = pci_dev->bus->domain_nr;
7176 	struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7177 
7178 	cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n",
7179 		    id->vendor, pci_dev->device, rc_num);
7180 	if (!plat_priv) {
7181 		cnss_pr_err("Find match plat_priv with rc number failure\n");
7182 		ret = -ENODEV;
7183 		goto out;
7184 	}
7185 
7186 	pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
7187 	if (!pci_priv) {
7188 		ret = -ENOMEM;
7189 		goto out;
7190 	}
7191 
7192 	pci_priv->pci_link_state = PCI_LINK_UP;
7193 	pci_priv->plat_priv = plat_priv;
7194 	pci_priv->pci_dev = pci_dev;
7195 	pci_priv->pci_device_id = id;
7196 	pci_priv->device_id = pci_dev->device;
7197 	cnss_set_pci_priv(pci_dev, pci_priv);
7198 	plat_priv->device_id = pci_dev->device;
7199 	plat_priv->bus_priv = pci_priv;
7200 	mutex_init(&pci_priv->bus_lock);
7201 	if (plat_priv->use_pm_domain)
7202 		dev->pm_domain = &cnss_pm_domain;
7203 
7204 	cnss_pci_restore_rc_speed(pci_priv);
7205 
7206 	ret = cnss_pci_get_dev_cfg_node(plat_priv);
7207 	if (ret) {
7208 		cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
7209 		goto reset_ctx;
7210 	}
7211 
7212 	cnss_get_sleep_clk_supported(plat_priv);
7213 
7214 	ret = cnss_dev_specific_power_on(plat_priv);
7215 	if (ret < 0)
7216 		goto reset_ctx;
7217 
7218 	cnss_pci_of_reserved_mem_device_init(pci_priv);
7219 
7220 	ret = cnss_register_subsys(plat_priv);
7221 	if (ret)
7222 		goto reset_ctx;
7223 
7224 	ret = cnss_register_ramdump(plat_priv);
7225 	if (ret)
7226 		goto unregister_subsys;
7227 
7228 	ret = cnss_pci_init_smmu(pci_priv);
7229 	if (ret)
7230 		goto unregister_ramdump;
7231 
7232 	/* update drv support flag */
7233 	cnss_pci_update_drv_supported(pci_priv);
7234 
7235 	cnss_update_supported_link_info(pci_priv);
7236 
7237 	ret = cnss_reg_pci_event(pci_priv);
7238 	if (ret) {
7239 		cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
7240 		goto deinit_smmu;
7241 	}
7242 
7243 	ret = cnss_pci_enable_bus(pci_priv);
7244 	if (ret)
7245 		goto dereg_pci_event;
7246 
7247 	ret = cnss_pci_enable_msi(pci_priv);
7248 	if (ret)
7249 		goto disable_bus;
7250 
7251 	ret = cnss_pci_register_mhi(pci_priv);
7252 	if (ret)
7253 		goto disable_msi;
7254 
7255 	switch (pci_dev->device) {
7256 	case QCA6174_DEVICE_ID:
7257 		pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
7258 				     &pci_priv->revision_id);
7259 		break;
7260 	case QCA6290_DEVICE_ID:
7261 	case QCA6390_DEVICE_ID:
7262 	case QCN7605_DEVICE_ID:
7263 	case QCA6490_DEVICE_ID:
7264 	case KIWI_DEVICE_ID:
7265 	case MANGO_DEVICE_ID:
7266 	case PEACH_DEVICE_ID:
7267 		if ((cnss_is_dual_wlan_enabled() &&
7268 		     plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled())
7269 			cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false,
7270 						    false);
7271 
7272 		timer_setup(&pci_priv->dev_rddm_timer,
7273 			    cnss_dev_rddm_timeout_hdlr, 0);
7274 		timer_setup(&pci_priv->boot_debug_timer,
7275 			    cnss_boot_debug_timeout_hdlr, 0);
7276 		INIT_DELAYED_WORK(&pci_priv->time_sync_work,
7277 				  cnss_pci_time_sync_work_hdlr);
7278 		cnss_pci_get_link_status(pci_priv);
7279 		cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
7280 		cnss_pci_wake_gpio_init(pci_priv);
7281 		break;
7282 	default:
7283 		cnss_pr_err("Unknown PCI device found: 0x%x\n",
7284 			    pci_dev->device);
7285 		ret = -ENODEV;
7286 		goto unreg_mhi;
7287 	}
7288 
7289 	cnss_pci_config_regs(pci_priv);
7290 	if (EMULATION_HW)
7291 		goto out;
7292 	if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done)
7293 		goto probe_done;
7294 	cnss_pci_suspend_pwroff(pci_dev);
7295 
7296 probe_done:
7297 	set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7298 
7299 	return 0;
7300 
7301 unreg_mhi:
7302 	cnss_pci_unregister_mhi(pci_priv);
7303 disable_msi:
7304 	cnss_pci_disable_msi(pci_priv);
7305 disable_bus:
7306 	cnss_pci_disable_bus(pci_priv);
7307 dereg_pci_event:
7308 	cnss_dereg_pci_event(pci_priv);
7309 deinit_smmu:
7310 	cnss_pci_deinit_smmu(pci_priv);
7311 unregister_ramdump:
7312 	cnss_unregister_ramdump(plat_priv);
7313 unregister_subsys:
7314 	cnss_unregister_subsys(plat_priv);
7315 reset_ctx:
7316 	plat_priv->bus_priv = NULL;
7317 out:
7318 	return ret;
7319 }
7320 
7321 static void cnss_pci_remove(struct pci_dev *pci_dev)
7322 {
7323 	struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7324 	struct cnss_plat_data *plat_priv =
7325 		cnss_bus_dev_to_plat_priv(&pci_dev->dev);
7326 
7327 	clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7328 	cnss_pci_unregister_driver_hdlr(pci_priv);
7329 	cnss_pci_free_aux_mem(pci_priv);
7330 	cnss_pci_free_tme_lite_mem(pci_priv);
7331 	cnss_pci_free_m3_mem(pci_priv);
7332 	cnss_pci_free_fw_mem(pci_priv);
7333 	cnss_pci_free_qdss_mem(pci_priv);
7334 
7335 	switch (pci_dev->device) {
7336 	case QCA6290_DEVICE_ID:
7337 	case QCA6390_DEVICE_ID:
7338 	case QCN7605_DEVICE_ID:
7339 	case QCA6490_DEVICE_ID:
7340 	case KIWI_DEVICE_ID:
7341 	case MANGO_DEVICE_ID:
7342 	case PEACH_DEVICE_ID:
7343 		cnss_pci_wake_gpio_deinit(pci_priv);
7344 		del_timer(&pci_priv->boot_debug_timer);
7345 		del_timer(&pci_priv->dev_rddm_timer);
7346 		break;
7347 	default:
7348 		break;
7349 	}
7350 
7351 	cnss_pci_unregister_mhi(pci_priv);
7352 	cnss_pci_disable_msi(pci_priv);
7353 	cnss_pci_disable_bus(pci_priv);
7354 	cnss_dereg_pci_event(pci_priv);
7355 	cnss_pci_deinit_smmu(pci_priv);
7356 	if (plat_priv) {
7357 		cnss_unregister_ramdump(plat_priv);
7358 		cnss_unregister_subsys(plat_priv);
7359 		plat_priv->bus_priv = NULL;
7360 	} else {
7361 		cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
7362 	}
7363 }
7364 
7365 static const struct pci_device_id cnss_pci_id_table[] = {
7366 	{ QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7367 	{ QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7368 	{ QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7369 	{ QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7370 	{ QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7371 	{ KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7372 	{ MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7373 	{ PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7374 	{ 0 }
7375 };
7376 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
7377 
7378 static const struct dev_pm_ops cnss_pm_ops = {
7379 	SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7380 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7381 				      cnss_pci_resume_noirq)
7382 	SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
7383 			   cnss_pci_runtime_idle)
7384 };
7385 
7386 static struct pci_driver cnss_pci_driver = {
7387 	.name     = "cnss_pci",
7388 	.id_table = cnss_pci_id_table,
7389 	.probe    = cnss_pci_probe,
7390 	.remove   = cnss_pci_remove,
7391 	.driver = {
7392 		.pm = &cnss_pm_ops,
7393 	},
7394 };
7395 
7396 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
7397 {
7398 	int ret, retry = 0;
7399 
7400 	if (plat_priv->device_id == QCA6490_DEVICE_ID) {
7401 		cnss_pci_set_gen2_speed(plat_priv, rc_num);
7402 	} else {
7403 		cnss_pci_downgrade_rc_speed(plat_priv, rc_num);
7404 	}
7405 
7406 	cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
7407 retry:
7408 	ret = _cnss_pci_enumerate(plat_priv, rc_num);
7409 	if (ret) {
7410 		if (ret == -EPROBE_DEFER) {
7411 			cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
7412 			goto out;
7413 		}
7414 		cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
7415 			    rc_num, ret);
7416 		if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
7417 			cnss_pr_dbg("Retry PCI link training #%d\n", retry);
7418 			goto retry;
7419 		} else {
7420 			goto out;
7421 		}
7422 	}
7423 
7424 	plat_priv->rc_num = rc_num;
7425 
7426 out:
7427 	return ret;
7428 }
7429 
7430 int cnss_pci_init(struct cnss_plat_data *plat_priv)
7431 {
7432 	struct device *dev = &plat_priv->plat_dev->dev;
7433 	const __be32 *prop;
7434 	int ret = 0, prop_len = 0, rc_count, i;
7435 
7436 	prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
7437 	if (!prop || !prop_len) {
7438 		cnss_pr_err("Failed to get PCIe RC number from DT\n");
7439 		goto out;
7440 	}
7441 
7442 	rc_count = prop_len / sizeof(__be32);
7443 	for (i = 0; i < rc_count; i++) {
7444 		ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
7445 		if (!ret)
7446 			break;
7447 		else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
7448 			goto out;
7449 	}
7450 
7451 	ret = cnss_try_suspend(plat_priv);
7452 	if (ret) {
7453 		cnss_pr_err("Failed to suspend, ret: %d\n", ret);
7454 		goto out;
7455 	}
7456 
7457 	if (!cnss_driver_registered) {
7458 		ret = pci_register_driver(&cnss_pci_driver);
7459 		if (ret) {
7460 			cnss_pr_err("Failed to register to PCI framework, err = %d\n",
7461 				    ret);
7462 			goto out;
7463 		}
7464 		if (!plat_priv->bus_priv) {
7465 			cnss_pr_err("Failed to probe PCI driver\n");
7466 			ret = -ENODEV;
7467 			goto unreg_pci;
7468 		}
7469 		cnss_driver_registered = true;
7470 	}
7471 
7472 	return 0;
7473 
7474 unreg_pci:
7475 	pci_unregister_driver(&cnss_pci_driver);
7476 out:
7477 	return ret;
7478 }
7479 
7480 void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
7481 {
7482 	if (cnss_driver_registered) {
7483 		pci_unregister_driver(&cnss_pci_driver);
7484 		cnss_driver_registered = false;
7485 	}
7486 }
7487